prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# fill_missing_values.py
# Input: a dataframe that may contain missing values, strategy for
# filling those values, meta data for that df's columns
# Output: a dataframe that fills in missing values
import sys
import pandas as pd
import argparse
import json
EXEMPT_COLS = []
ID_COLS = []
def main():
parser = argparse.ArgumentParser(description="Script for filling in "
"missing values in a "
"dataset according to a "
"specified strategy.")
parser.add_argument('--data', type=str, required=True,
help='Path to csv dataframe of readings')
parser.add_argument('--static', type=str, required=False,
help='Path to csv dataframe of static metadata')
parser.add_argument('--data_dict', type=str, required=True,
help='JSON dictionary describing data schema')
parser.add_argument('--output', type=str, required=False, default=None)
parser.add_argument('--strategy', type=str, required=True,
choices=['pop_mean', 'carry_forward',
'similar_subject_mean', 'GRU_simple',
'GRU_complex', 'nulls', 'None'])
parser.add_argument('--multiple_strategies', default=False,
help='Set to True to execute a backup second and third strategy')
parser.add_argument('--second_strategy', type=str, required=False,
default='carry_forward',
choices=['pop_mean', 'carry_forward',
'similar_subject_mean', 'GRU_simple',
'GRU_complex', 'nulls', 'None'])
parser.add_argument('--third_strategy', type=str, required=False,
default='pop_mean',
choices=['pop_mean', 'carry_forward',
'similar_subject_mean', 'GRU_simple',
'GRU_complex', 'nulls', 'None'])
args = parser.parse_args()
# TODO: reorganize this control flow to better accomodate the different
# combinations of arguments needed.
ts_df = | pd.read_csv(args.data) | pandas.read_csv |
#!/usr/bin/python
from confluent_kafka import Consumer, KafkaError
import json
import re
import time
import datetime
import calendar
import shutil
import gzip
import os
import sys
import random
import struct
if sys.version_info > (3,):
import pandas as pd
from fastparquet import write as parqwrite
from fastparquet import ParquetFile
else:
from pychbase import Connection, Table, Batch
# Variables - Should be setable by arguments at some point
envvars = {}
# envars['var'] = ['default', 'True/False Required', 'str/int']
# What type of destination will this instance be sending to
envvars['dest_type'] = ['', True, 'str'] # mapdb, parquet, json
#Kafka/Streams
envvars['zookeepers'] = ['', False, 'str']
envvars['kafka_id'] = ['', False, 'str']
envvars['bootstrap_brokers'] = ['', False, 'str']
envvars['offset_reset'] = ['earliest', False, 'str']
envvars['group_id'] = ['', True, 'str']
envvars['topic'] = ['', True, 'str']
envvars['loop_timeout'] = ["5.0", False, 'flt']
# Field Creation - used as a basic way to create a field based on another field.
# If src is set and dest is not, the field is not created - Error occurs
# Example: src = ts, dst = ts_part, start = 0, end = 10. This would change a value like 2017-08-08T21:26:10.843768Z to 2017-08-08
envvars['derived_src'] = ['', False, 'str'] # The field to src
envvars['derived_dst'] = ['', False, 'str'] # The field to put in the dest
envvars['derived_start'] = [0, False, 'int'] # The position to start
envvars['derived_end'] = [0, False, 'int'] # The position to end
envvars['derived_req'] = [0, False, 'int'] # Fail if the addition/conversation fails
envvars['costom_transform'] = ['', False, 'str'] # costom transform for the JSON
#Loop Control
envvars['rowmax'] = [50, False, 'int']
envvars['timemax'] = [60, False, 'int']
envvars['sizemax'] = [256000, False, 'int']
# Parquet Options
envvars['parq_offsets'] = [50000000, False, 'int']
envvars['parq_compress'] = ['SNAPPY', False, 'str']
envvars['parq_has_nulls'] = [False, False, 'bool']
envvars['parq_merge_file'] = [0, False, 'int']
# JSON Options
envvars['json_gz_compress'] = [0, False, 'bool'] # Not supported yet
# MapR-DB Options
envvars['maprdb_table_base'] = ['', True, 'str']
envvars['maprdb_row_key_fields'] = ['', True, 'str']
envvars['maprdb_row_key_delim'] = ['_', False, 'str']
envvars['maprdb_family_mapping'] = ['', True, 'str']
envvars['maprdb_create_table'] = [0, False, 'int']
envvars['maprdb_batch_enabled'] = [0, False, 'int']
envvars['maprdb_print_drill_view'] = [0, False, 'int']
#File Options
envvars['file_maxsize'] = [8000000, False, 'int']
envvars['file_uniq_env'] = ['HOSTNAME', False, 'str']
envvars['file_partition_field'] = ['day', False, 'str']
envvars['file_partmaxage'] = ['600', False, 'int']
envvars['file_unknownpart'] = ['unknown', False, 'str']
envvars['file_table_base'] = ['', True, 'str']
envvars['file_tmp_dir'] = ['/tmp', False, 'str']
envvars['file_write_live'] = [0, False, 'int']
# Bad Data Management
envvars['remove_fields_on_fail'] = [0, False, 'int'] # If Json fails to import, should we try to remove_fields based on 'REMOVE_FIELDS'
envvars['remove_fields'] = ['', False, 'str'] # Comma Sep list of fields to try to remove if failure on JSON import
# Debug
envvars['debug'] = [0, False, 'int']
#envvars['drop_req_body_on_error'] = [1, False, 'int']
loadedenv = {}
def main():
table_schema = {}
cf_schema = {}
cf_lookup = {}
table = None
global loadedenv
loadedenv = loadenv(envvars)
if loadedenv['dest_type'] != 'maprdb':
loadedenv['tmp_dir'] = loadedenv['file_tmp_dir']
loadedenv['uniq_val'] = os.environ[loadedenv['file_uniq_env']]
if loadedenv['debug'] == 1:
print(json.dumps(loadedenv, sort_keys=True, indent=4, separators=(',', ': ')))
if loadedenv['derived_src'] != '' and loadedenv['derived_dst'] == '':
print("If adding a field, you must have a field name")
print("derived_src %s - derived_dst: %s" % (loadedenv['derived_src'], loadedenv['derived_dst']))
sys.exit(1)
if loadedenv['dest_type'] == 'parquet':
if not sys.version_info > (3,):
print("Python 2 is not supported for Parquet Writer, please use Python 3")
sys.exit(1)
elif loadedenv['dest_type'] == 'maprdb':
if sys.version_info > (3,):
print("Python 3 is not supported for maprdb load please use Python 2")
sys.exit(1)
table_schema, cf_schema, cf_lookup = loadmaprdbschemas()
myview = drill_view(table_schema)
if loadedenv['debug'] >= 1 or loadedenv['maprdb_print_drill_view'] == 1:
print("Drill Shell View:")
print( myview)
if loadedenv['maprdb_print_drill_view'] == 1:
sys.exit(0)
if loadedenv['debug'] >= 1:
print("Schema provided:")
print(table_schema)
print("")
print("cf_lookip:")
print(cf_lookup)
connection = Connection()
try:
table = connection.table(loadedenv['maprdb_table_base'])
except:
if loadedenv['maprdb_create_table'] != 1:
print("Table not found and create table not set to 1 - Cannot proceed")
sys.exit(1)
else:
print("Table not found: Creating")
connection.create_table(loadedenv['maprdb_table_base'], cf_schema)
try:
table = connection.table(loadedenv['maprdb_table_base'])
except:
print("Couldn't find table, tried to create, still can't find, exiting")
sys.exit(1)
if not loadedenv['dest_type'] == 'maprdb':
if not os.path.isdir(loadedenv['tmp_dir']):
os.makedirs(loadedenv['tmp_dir'])
curfile = loadedenv['uniq_val'] + "_curfile." + loadedenv['dest_type']
# Get the Bootstrap brokers if it doesn't exist
if loadedenv['bootstrap_brokers'] == "":
if loadedenv['zookeepers'] == "":
print("Must specify either Bootstrap servers via BOOTSTRAP_BROKERS or Zookeepers via ZOOKEEPERS")
sys.exit(1)
mybs = bootstrap_from_zk(loadedenv['zookeepers'], loadedenv['kafka_id'])
else:
mybs = loadedenv['bootstrap_brokers']
if loadedenv['debug'] >= 1:
print (mybs)
# Create Consumer group to listen on the topic specified
c = Consumer({'bootstrap.servers': mybs, 'group.id': loadedenv['group_id'], 'default.topic.config': {'auto.offset.reset': loadedenv['offset_reset']}})
c.subscribe([loadedenv['topic']], on_assign=print_assignment)
# Initialize counters
rowcnt = 0
sizecnt = 0
lastwrite = int(time.time()) - 1
dataar = []
part_ledger = {}
# Listen for messages
running = True
while running:
curtime = int(time.time())
timedelta = curtime - lastwrite
try:
message = c.poll(timeout=loadedenv['loop_timeout'])
except KeyboardInterrupt:
print("\n\nExiting per User Request")
c.close()
sys.exit(0)
if message == None:
# No message was found but we still want to check our stuff
pass
elif not message.error():
rowcnt += 1
jmsg, errcode = returnJSONRecord(message)
if errcode == 0:
sizecnt += len(json.dumps(jmsg))
dataar.append(jmsg)
elif message.error().code() != KafkaError._PARTITION_EOF:
print("MyError: " + message.error().str())
running = False
break
# If our row count is over the max, our size is over the max, or time delta is over the max, write the group .
if (rowcnt >= loadedenv['rowmax'] or timedelta >= loadedenv['timemax'] or sizecnt >= loadedenv['sizemax']) and len(dataar) > 0:
if loadedenv['dest_type'] != 'maprdb':
part_ledger = writeFile(dataar, part_ledger, curfile, curtime, rowcnt, sizecnt, timedelta)
part_ledger = dumpPart(part_ledger, curtime)
else:
writeMapRDB(dataar, table, cf_lookup, rowcnt, sizecnt, timedelta)
rowcnt = 0
sizecnt = 0
lastwrite = curtime
dataar = []
c.close()
def writeMapRDB(dataar, table, cf_lookup, rowcnt, sizecnt, timedelta):
if loadedenv['maprdb_batch_enabled'] == 1:
batch = table.batch()
for r in dataar:
batch.put(db_rowkey(r), db_row(r, cf_lookup))
batch_errors = batch.send()
if batch_errors == 0:
if loadedenv['debug'] >= 1:
print("%s Write batch to %s at %s records - Size: %s - Seconds since last write: %s - NO ERRORS" % (datetime.datetime.now(), loadedenv['maprdb_table_base'], rowcnt, sizecnt, timedelta))
else:
print("Multiple errors on write - Errors: %s" % batch_errors)
sys.exit(1)
else:
bcnt = 0
for r in dataar:
bcnt += 1
try:
table.put(db_rowkey(r), db_row(r, cf_lookup))
except:
print("Failed on record with key: %s" % db_rowkey(r))
print(db_row(r, cf_lookup))
sys.exit(1)
if loadedenv['debug'] >= 1:
print("Pushed: %s rows" % rowcnt)
def dumpPart(pledger, curtime):
removekeys = []
for x in pledger.keys():
l = pledger[x][0]
s = pledger[x][1]
f = pledger[x][2]
fw = pledger[x][3]
base_dir = loadedenv['file_table_base'] + '/' + x
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if s > loadedenv['file_maxsize'] or (curtime - fw) > loadedenv['file_partmaxage']:
new_file_name = loadedenv['uniq_val'] + "_" + str(curtime) + "." + loadedenv['dest_type']
new_file = base_dir + "/" + new_file_name
if loadedenv['debug'] >= 1:
outreason = ""
if s > loadedenv['file_maxsize']:
outreason = "Max Size"
else:
outreason = "Max Age"
print("%s %s reached - Size: %s - Age: %s - Writing to %s" % (datetime.datetime.now(), outreason, s, curtime - l, new_file))
if loadedenv['dest_type'] == 'json':
if loadedenv['json_gz_compress'] == 1:
if loadedenv['debug'] >= 1:
print("Compressing json files")
with open(f, 'rb') as f_in:
with gzip.open(f + ".gz", 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f)
f = f + ".gz"
new_file = new_file + ".gz"
shutil.move(f, new_file)
removekeys.append(x)
# If merge_file is 1 then we read in the whole parquet file and output it in one go to eliminate all the row groups from appending
if loadedenv['dest_type'] == 'parquet':
if loadedenv['parq_merge_file'] == 1:
if loadedenv['debug'] >= 1:
print("%s Merging parqfile into to new parquet file" % (datetime.datetime.now()))
inparq = ParquetFile(new_file)
inparqdf = inparq.to_pandas()
tmp_file = loadedenv['tmp_dir'] + "/" + new_file_name
parqwrite(tmp_file, inparqdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'])
shutil.move(tmp_file, new_file)
inparq = None
inparqdf = None
for y in removekeys:
del pledger[y]
return pledger
def writeFile(dataar, pledger, curfile, curtime, rowcnt, sizecnt, timedelta):
parts = []
if loadedenv['dest_type'] == 'parquet':
parqdf = pd.DataFrame.from_records([l for l in dataar])
parts = parqdf[loadedenv['file_partition_field']].unique()
if len(parts) == 0:
print("Error: Records without Partition field - Using default Partition of %s" % loadedenv['file_unknownpart']) # Need to do better job here
parts.append(loadedenv['file_unknownpart'])
else:
parts = []
for x in dataar:
try:
p = x[loadedenv['file_partition_field']]
except:
p = loadedenv['file_unknownpart']
if not p in parts:
parts.append(p)
if loadedenv['debug'] >= 1:
print("%s Write Data batch to %s at %s records - Size: %s - Seconds since last write: %s - Partitions in this batch: %s" % (datetime.datetime.now(), curfile, rowcnt, sizecnt, timedelta, parts))
for part in parts:
if loadedenv['dest_type'] == 'parquet':
partdf = parqdf[parqdf[loadedenv['file_partition_field']] == part]
else:
partar = []
for x in dataar:
try:
curpart = x[loadedenv['file_partition_field']]
except:
curpart = loadedenv['file_unknownpart']
if curpart == part:
partar.append(x)
if loadedenv['file_write_live'] == 1:
base_dir = loadedenv['file_table_base'] + "/" + part
else:
base_dir = loadedenv['file_tmp_dir'] + "/" + part
final_file = base_dir + "/" + curfile
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if loadedenv['debug'] >= 1:
print("----- Writing partition %s to %s" % (part, final_file))
if loadedenv['dest_type'] == 'parquet':
if not os.path.exists(final_file):
parqwrite(final_file, partdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'])
else:
parqwrite(final_file, partdf, compression=loadedenv['parq_compress'], row_group_offsets=loadedenv['parq_offsets'], has_nulls=loadedenv['parq_has_nulls'], append=True)
partdf = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
#!/usr/bin/env python
# Author: <NAME> (jsh) [<EMAIL>]
import joblib
import logging
import pathlib
import shutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as st
import model_lib as ml
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
_CODEDIR = pathlib.Path(__file__).parent
MODELDIR = _CODEDIR / 'model'
GFPDIR = _CODEDIR / 'gfpdata'
REPFILES = ['bsu_biorep1.csv',
'bsu_biorep2.csv',
'eco_biorep1.csv',
'eco_biorep2.csv']
JOINFILE = GFPDIR / 'joined_reps.tsv'
replicates = list()
for repfile in REPFILES:
repdata = pd.read_csv(GFPDIR / repfile, index_col=0)
repdata = repdata[['relative']].dropna()
replicates.append(repdata)
score = pd.concat(replicates, axis='columns', sort=True).mean(axis='columns')
# "relative" in these files is (C/P)-1 ; downstream assumes C/P
score = score + 1
score = pd.DataFrame(score).reset_index()
score.columns = ['variant', 'y']
origmap = pd.read_csv(GFPDIR / 'gfp.origmap.tsv', sep='\t')
nmm = origmap[['nmm']]
origmap = origmap[['variant', 'original']]
data = | pd.DataFrame(origmap) | pandas.DataFrame |
#coding:utf-8
import os
import sys
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from data import *
from models import *
import torchvision
from torchvision import transforms, utils
from tensorboardX import SummaryWriter
import pandas as pd
# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
layer_n = int(sys.argv[1])
ckpt_name = "checkpoints/ResNet-%d_cifar10.pth" %(layer_n*6+2)
log_name = "./logs/ResNet-%d_cifar10_log/" %(layer_n*6+2)
#ckpt_name = "checkpoints/PlainNet-%d_cifar10.pth" %(layer_n*6+2)
#log_name = "./logs/PlainNet-%d_cifar10_log/" %(layer_n*6+2)
batch_size = 100
def train(cnn_model, start_epoch, train_loader, test_loader, lr, auto_lr=True):
# train model from scratch
num_epochs = 500
learning_rate = lr
print("lr: %f" %(learning_rate))
optimizer = torch.optim.SGD(cnn_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
criterion = torch.nn.CrossEntropyLoss()
train_writer = SummaryWriter(log_dir=log_name+'train')
test_writer = SummaryWriter(log_dir=log_name+'test')
train_offset = 0
train_iter = 0
for epc in range(num_epochs):
epoch = epc + start_epoch
train_total = 0
train_correct = 0
if (train_iter == 64000):
break
for batch_idx, (data_x, data_y) in enumerate(train_loader):
train_iter = train_offset + epoch * len(train_loader) + batch_idx
if (auto_lr):
if (32000 == train_iter):
learning_rate = learning_rate / 10.
optimizer = torch.optim.SGD(cnn_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
if (48000 == train_iter):
learning_rate = learning_rate / 10.
optimizer = torch.optim.SGD(cnn_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
if (64000 == train_iter):
learning_rate = learning_rate / 10.
optimizer = torch.optim.SGD(cnn_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
if (train_iter == 64000):
break
data_x = data_x.to(device)
data_y = data_y.to(device)
optimizer.zero_grad()
output = cnn_model(data_x)
loss = criterion(output, data_y)
_, predicted = torch.max(output.data, 1)
train_total += batch_size
train_correct += (predicted == data_y).sum().item()
loss.backward()
optimizer.step()
if (train_iter % 10 == 0):
print("Epoch %d/%d, Step %d/%d, iter %d Loss: %f, lr: %f" \
%(epoch, start_epoch+num_epochs, batch_idx, len(train_loader), train_iter, loss.item(), learning_rate))
train_writer.add_scalar('data/loss', loss, train_iter)
if (train_iter % 100 == 0):
train_acc = float(train_correct) / train_total
print("iter %d, Train Accuracy: %f" %(train_iter, train_acc))
print("iter %d, Train correct/count: %d/%d" %(train_iter, train_correct, train_total))
train_writer.add_scalar('data/accuracy', train_acc, train_iter)
train_writer.add_scalar('data/error', 1.0-train_acc, train_iter)
train_total = 0
train_correct = 0
if (train_iter % 100 == 0):
with torch.no_grad():
correct = 0
total = 0
loss = 0
for test_batch_idx, (images, labels) in enumerate(test_loader):
images = images.to(device)
labels = labels.to(device)
outputs = cnn_model(images)
loss += criterion(outputs.squeeze(), labels.squeeze())
_, predicted = torch.max(outputs.data, 1)
total += batch_size
correct += (predicted == labels).sum().item()
loss = float(loss) / len(test_loader)
test_writer.add_scalar('data/loss', loss, train_iter)
acc = float(correct)/total
print("iter %d, Test Accuracy: %f" %(train_iter, acc))
print("iter %d, Test avg Loss: %f" %(train_iter, loss))
test_writer.add_scalar('data/accuracy', acc, train_iter)
test_writer.add_scalar('data/error', 1.0-acc, train_iter)
# save models
state_dict = {"state": cnn_model.state_dict(), "epoch": epoch, "acc": acc, "lr": learning_rate}
torch.save(state_dict, ckpt_name)
print("Model saved! %s" %(ckpt_name))
def test(cnn_model, real_test_loader):
labels = []
ids = []
for batch_idx, (images, image_name) in enumerate(real_test_loader):
images = images.to(device)
outputs = cnn_model(images)
prob = torch.nn.functional.softmax(outputs.data)
prob = prob.data.tolist()
_, predicted = torch.max(outputs.data, 1)
print("batch %d/%d" %(batch_idx, len(real_test_loader)))
for name in image_name:
ids.append(os.path.basename(name).split('.')[0])
predicted = predicted.data.tolist()
for item in predicted:
labels.append(item)
submission = | pd.DataFrame({'id': ids, 'label': labels}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 16:38:26 2019
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_FFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_FFR.pkl')
# remove DC offset
df_FFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_FFR_detrend_data = pd.DataFrame(signal.detrend(df_FFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_FFR_label_temp = pd.DataFrame(df_FFR.iloc[i, 1024:1031].values.reshape(1,7))
df_FFR_detrend = df_FFR_detrend.append(pd.concat([df_FFR_detrend_data, df_FFR_label_temp], axis=1, ignore_index=True))
# set the title of columns
df_FFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_detrend = df_FFR_detrend.reset_index(drop=True)
df_FFR = df_FFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_FFR
df_FFR_avg = pd.DataFrame()
df_FFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_FFR_avg_t = pd.DataFrame(df_FFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# implement the window function
df_FFR_avg_t_win = pd.DataFrame((df_FFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_FFR_label = pd.DataFrame(df_FFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_FFR_avg = df_FFR_avg.append(pd.concat([df_FFR_avg_t, df_FFR_label], axis=1, ignore_index=True))
df_FFR_avg_win = df_FFR_avg.append(pd.concat([df_FFR_avg_t_win, df_FFR_label], axis=1, ignore_index=True))
# set the title of columns
df_FFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_avg = df_FFR_avg.sort_values(by=["Condition", "Subject"])
df_FFR_avg = df_FFR_avg.reset_index(drop=True)
df_FFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_avg_win = df_FFR_avg_win.sort_values(by=["Condition", "Subject"])
df_FFR_avg_win = df_FFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_FFR_avg_sorted = df_FFR_avg.sort_values(by=["Sound Level", "Vowel", "Subject", "Condition"])
df_FFR_avg_sorted = df_FFR_avg_sorted.reset_index(drop=True)
df_FFR_avg_win_sorted = df_FFR_avg_win.sort_values(by=["Sound Level", "Vowel", "Subject", "Condition"])
df_FFR_avg_win_sorted = df_FFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_FFR_avg_85 = pd.DataFrame(df_FFR_avg_sorted.iloc[528:, :])
df_FFR_avg_85 = df_FFR_avg_85.reset_index(drop=True)
df_FFR_avg_win_85 = | pd.DataFrame(df_FFR_avg_win_sorted.iloc[528:, :]) | pandas.DataFrame |
import logging
import os
import re
import warnings
# import numpy as np
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import sqlalchemy as sa
# from odo import odo
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import sessionmaker
import spherpro.bro as bro
import spherpro.bromodules.io_anndata as io_anndata
import spherpro.configuration as config
import spherpro.db as db
import spherpro.library as lib
DICT_DB_KEYS = {
"image_id": db.images.image_id.key,
"object_number": db.objects.object_number.key,
"measurement_type": db.measurement_types.measurement_type.key,
"measurement_name": db.measurement_names.measurement_name.key,
"stack_name": db.stacks.stack_name.key,
"plane_id": db.ref_planes.ref_plane_number.key,
"object_id": db.objects.object_id.key,
}
OBJECTS_STACKNAME = "ObjectStack"
OBJECTS_CHANNELNAME = "object"
OBJECTS_PLANEID = "1"
OBJECTS_CHANNELTYPE = "object"
READONLY = "_readonly"
class DataStore(object):
"""DataStore
The DataStore class is intended to be used as a storage for spheroid IMC
data.
Methods:
Base:
read_config: read configfile
import_data: reads and writes data to the database
resume_data: reads non-database files and configures backend
"""
def __init__(self):
# init empty properties here
self.experiment_layout = None
self.barcode_key = None
self.well_measurements = None
self.cut_meta = None
self.roi_meta = None
self.channel_meta = None
self.sphere_meta = None
self.measurement_meta_cache = None
self._pannel = None
self._session = None
self._session_maker = None
self.connectors = {
config.CON_SQLITE: db.connect_sqlite,
config.CON_SQLITE + READONLY: db.connect_sqlite_ro,
config.CON_MYSQL: db.connect_mysql,
config.CON_POSTGRESQL: db.connect_postgresql,
}
#########################################################################
#########################################################################
# Import or Resume functions: #
#########################################################################
#########################################################################
def read_config(self, configpath):
"""
finds the measurement meta information from a given string
Args:
configpath: A string denoting the location of the config file
Raises:
YAMLError
"""
self.conf = config.read_configuration(configpath)
def import_data(self, minimal=None):
"""read_data
Reads the Data using the file locations given in the configfile.
Args:
minimal: Bool, if True, the import process only imports values from
the RefStacks and no location values
"""
if minimal is None:
minimal = False
# Read the data based on the config
self._read_experiment_layout()
self._read_barcode_key()
# self._read_measurement_data()
self._read_image_data()
self._read_relation_data()
self._read_stack_meta()
self._populate_db(minimal)
def resume_data(self, readonly=False):
"""read_data
Reads non-database files and configures backend according to
the configfile.
"""
# Read the data based on the config
# self._read_experiment_layout()
# self._read_barcode_key()
# self._readWellMeasurements()
# self._read_cut_meta()
# self._read_roi_meta()
# self._read_measurement_data()
# self._read_stack_meta()
self._read_pannel()
backend = self.conf[config.BACKEND]
if readonly:
backend += READONLY
self.db_conn = self.connectors[backend](self.conf)
self.bro = bro.Bro(self)
def drop_all(self):
self.db_conn = self.connectors[self.conf[config.BACKEND]](self.conf)
db.drop_all(self.db_conn)
##########################################
# Helper functions used by readData: #
##########################################
def _read_experiment_layout(self):
"""
reads the experiment layout as stated in the config
and saves it in the datastore
"""
if self.conf[config.LAYOUT_CSV][config.PATH] is not None:
sep = self.conf[config.LAYOUT_CSV][config.SEP]
experiment_layout = pd.read_csv(
self.conf[config.LAYOUT_CSV][config.PATH], sep=sep
)
# rename the columns
rename_dict = {
self.conf[config.LAYOUT_CSV][c]: target
for c, target in [
(config.LAYOUT_CSV_COND_ID, db.conditions.condition_id.key),
(config.LAYOUT_CSV_COND_NAME, db.conditions.condition_name.key),
(config.LAYOUT_CSV_TIMEPOINT_NAME, db.conditions.time_point.key),
(config.LAYOUT_CSV_BARCODE, db.conditions.barcode.key),
(
config.LAYOUT_CSV_CONCENTRATION_NAME,
db.conditions.concentration.key,
),
(config.LAYOUT_CSV_BC_PLATE_NAME, db.conditions.bc_plate.key),
(config.LAYOUT_CSV_PLATE_NAME, db.conditions.plate_id.key),
(config.LAYOUT_CSV_WELL_NAME, db.conditions.well_name.key),
]
}
experiment_layout = experiment_layout.rename(columns=rename_dict)
self.experiment_layout = experiment_layout.fillna(0)
else:
self.experiment_layout = None
def _read_barcode_key(self):
"""
reads the barcode key as stated in the config
"""
conf_bc = self.conf[config.BARCODE_CSV]
conf_layout = self.conf[config.LAYOUT_CSV]
path = conf_bc[config.PATH]
if path is not None:
# Load the barcode key
sep = conf_bc[config.SEP]
barcodes = pd.read_csv(path, sep=sep)
# Adapt the names
rename_dict = {
conf_bc[config.BC_CSV_PLATE_NAME]: conf_layout[
config.LAYOUT_CSV_BC_PLATE_NAME
],
conf_bc[config.BC_CSV_WELL_NAME]: db.conditions.well_name.key,
}
barcodes = barcodes.rename(columns=rename_dict)
# Convert the barcode key to a dictionary string
barcodes = barcodes.set_index(list(rename_dict.values()))
barcodes = (
barcodes.transpose()
# converts the barcodes to a string dictionary
.apply(lambda x: str(x.to_dict()))
)
barcodes = barcodes.rename(db.conditions.barcode.key)
barcodes = barcodes.reset_index(drop=False)
self.barcode_key = barcodes
else:
self.barcode_key = None
def _read_objtype_measurements(self, object_type, chunksize):
conf_meas = self.conf[config.CPOUTPUT][config.MEASUREMENT_CSV]
sep = conf_meas[config.SEP]
cpdir = self.conf[config.CP_DIR]
filetype = conf_meas[config.FILETYPE]
reader = pd.read_csv(
os.path.join(cpdir, object_type + filetype), sep=sep, chunksize=chunksize
)
if chunksize is None:
reader = [reader]
for dat_objmeas in reader:
rename_dict = {
self.conf[config.OBJECTNUMBER]: db.objects.object_number.key,
self.conf[config.IMAGENUMBER]: db.images.image_number.key,
}
dat_objmeas.rename(columns=rename_dict, inplace=True)
dat_objmeas[db.objects.object_type.key] = object_type
yield dat_objmeas
def _read_image_data(self):
cpdir = self.conf[config.CP_DIR]
rename_dict = {self.conf[config.IMAGENUMBER]: db.images.image_number.key}
images_csv = lib.read_csv_from_config(
self.conf[config.CPOUTPUT][config.IMAGES_CSV], base_dir=cpdir
)
images_csv = images_csv.rename(columns=rename_dict)
self._images_csv = images_csv
def _read_relation_data(self):
conf_rel = self.conf[config.CPOUTPUT][config.RELATION_CSV]
cpdir = self.conf[config.CP_DIR]
relation_csv = lib.read_csv_from_config(
self.conf[config.CPOUTPUT][config.RELATION_CSV], base_dir=cpdir
)
col_map = {
conf_rel[c]: target
for c, target in [
(config.OBJECTTYPE_FROM, config.OBJECTTYPE_FROM),
(config.OBJECTTYPE_TO, config.OBJECTTYPE_TO),
(config.OBJECTNUMBER_FROM, config.OBJECTNUMBER_FROM),
(config.OBJECTNUMBER_TO, config.OBJECTNUMBER_TO),
(config.IMAGENUMBER_FROM, config.IMAGENUMBER_FROM),
(config.IMAGENUMBER_TO, config.IMAGENUMBER_TO),
(
config.RELATIONSHIP,
db.object_relation_types.object_relationtype_name.key,
),
]
}
self._relation_csv = relation_csv.rename(columns=col_map)
def _read_stack_meta(self):
"""
reads the stack meta as stated in the config
and saves it in the datastore
"""
stack_dir = self.conf[config.STACK_DIR][config.PATH]
sep = self.conf[config.STACK_DIR][config.SEP]
match = re.compile("(.*)\.csv")
stack_files = [f for f in listdir(stack_dir) if isfile(join(stack_dir, f))]
stack_data = [pd.read_csv(join(stack_dir, n), sep) for n in stack_files]
stack_files = [match.match(name).groups()[0] for name in stack_files]
self.stack_csvs = {stack: data for stack, data in zip(stack_files, stack_data)}
self._stack_relation_csv = lib.read_csv_from_config(
self.conf[config.STACK_RELATIONS]
)
def _read_pannel(self):
"""
Reads the pannel as stated in the config.
"""
self._pannel = lib.read_csv_from_config(self.conf[config.PANNEL_CSV])
def _populate_db(self, minimal):
"""
Writes the tables to the database
"""
self.db_conn = self.connectors[self.conf[config.BACKEND]](self.conf)
self.drop_all()
db.initialize_database(self.db_conn)
self.bro = bro.Bro(self)
self._write_imagemeta_tables()
self._write_masks_table()
self._write_stack_tables()
self._write_refplanes_table()
self._write_planes_table()
self._write_pannel_table()
self._write_condition_table()
self._write_measurement_table(minimal)
self._write_image_stacks_table()
self.reset_valid_objects()
self.reset_valid_images()
# self._write_object_relations_table()
#### Helpers ####
def replace_condition_table(self):
"""
This is used in case an the experiment layout or
barcoding is updated.
Note that this will delete any debarcoding.
"""
# read the tables
self._read_experiment_layout()
self._read_barcode_key()
# delete the link between images and conditions
session = self.main_session
q = session.query(db.images).update({db.images.condition_id.key: None})
# delete the existing table
(session.query(db.conditions).delete())
session.commit()
# write the table
self._write_condition_table()
session.commit()
##########################################
# Database Table Generation: #
##########################################
def _write_stack_tables(self):
"""
Creates the StackModifications, StackRelations, Modifications,
RefStack and DerivedStack tables and writes them to the database
"""
# Modifications
modifications = self._generate_modifications()
self._bulkinsert(modifications, db.modifications)
# RefStacks
refstack = self._generate_refstack()
self._bulkinsert(refstack, db.ref_stacks)
# Stacks
stack = self._generate_stack()
self._bulkinsert(stack, db.stacks)
# StackModifications
stackmodification = self._generate_stackmodification()
self._bulkinsert(stackmodification, db.stack_modifications)
def _generate_modifications(self):
"""
Generates the modification table
"""
parent_col = self.conf[config.STACK_RELATIONS][config.PARENT]
modname_col = self.conf[config.STACK_RELATIONS][config.MODNAME]
modpre_col = self.conf[config.STACK_RELATIONS][config.MODPRE]
stackrel = self._stack_relation_csv.loc[
self._stack_relation_csv[parent_col] != "0"
]
Modifications = | pd.DataFrame(stackrel[modname_col]) | pandas.DataFrame |
# coding=utf-8
#
# Copyright (c) 2010-2015 Illumina, Inc.
# All rights reserved.
#
# This file is distributed under the simplified BSD license.
# The full text can be found here (and in LICENSE.txt in the root folder of
# this distribution):
#
# https://github.com/Illumina/licenses/blob/master/Simplified-BSD-License.txt
"""
Date: 2/10/2015
Author: <NAME> <<EMAIL>>
"""
import pandas
import logging
import re
from Tools.vcfextract import vcfExtract, extractHeadersJSON
def extractMutectSNVFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type """
records = []
if not avg_depth:
logging.warn("No average depths available, normalized depth features cannot be calculated")
hdrs = extractHeadersJSON(vcfname)
tsn = ""
nsn = ""
t_sample = "S.1."
n_sample = "S.2."
try:
samples = hdrs["samples"]
for f in hdrs["fields"]:
if f["key"] == "GATKCommandLine" and f["values"]["ID"].lower() == "mutect":
clopts = f["values"]["CommandLineOptions"]
# ... tumor_sample_name=HCC2218_tumour ... normal_sample_name=HCC2218_normal
m = re.search("tumor_sample_name=([^\s]+)", clopts)
if m:
tsn = m.group(1)
for i, x in enumerate(samples):
if x == tsn:
t_sample = "S.%i." % (i+1)
break
m = re.search("normal_sample_name=([^\s]+)", clopts)
if m:
nsn = m.group(1)
for i, x in enumerate(samples):
if x == nsn:
n_sample = "S.%i." % (i+1)
break
except:
logging.warn("Unable to detect tumour / normal sample order from VCF header")
logging.info("Normal sample name : %s (prefix %s) / tumour sample name : %s (prefix %s)" % (nsn, n_sample,
tsn, t_sample))
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
for q in [n_sample + "GT", t_sample + "GT"]:
if not q in rec or rec[q] is None:
rec[q] = "."
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
# fix missing features
for q in ["I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]:
if not q in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
else:
# list features
if q.endswith("AD") or q.endswith("QSS"):
if type(rec[q]) is not list:
if not q + "_PARSE_FAIL" in has_warned:
logging.warn("Cannot parse %s: %s" % (q, str(rec[q])))
has_warned[q + "_PARSE_FAIL"] = True
rec[q] = [0] * (1 + len(rec["ALT"]))
for xx in range(0, 1 + len(rec["ALT"])):
if len(rec[q]) <= xx:
rec[q].append(0)
else:
try:
rec[q][xx] = float(rec[q][xx])
except ValueError:
rec[q][xx] = 0
else:
try:
rec[q] = int(rec[q])
except ValueError:
rec[q] = -1
rec["tag"] = tag
TLOD = float(rec["I.TLOD"])
NLOD = float(rec["I.NLOD"])
n_DP = float(rec[n_sample + "DP"])
t_DP = float(rec[t_sample + "DP"])
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
if rec["CHROM"] in avg_depth:
n_DP_ratio = n_DP/float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP/float(avg_depth[rec["CHROM"]])
elif not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif not "DPnorm" in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
n_allele_ref_count = rec[n_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
n_allele_alt_count = 0
else:
n_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
n_allele_alt_count += float(rec[n_sample + "AD"][a + 1])
if n_allele_alt_count + n_allele_ref_count == 0:
n_allele_rate = 0
else:
n_allele_rate = n_allele_alt_count / float(n_allele_alt_count + n_allele_ref_count)
t_allele_ref_count = rec[t_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
t_allele_alt_count = 0
else:
t_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
t_allele_alt_count += float(rec[t_sample + "AD"][a + 1])
if t_allele_alt_count + t_allele_ref_count == 0:
t_allele_rate = 0
else:
t_allele_rate = t_allele_alt_count / float(t_allele_alt_count + t_allele_ref_count)
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"FILTER": ",".join(rec["FILTER"]),
"DBSNP": rec["I.DB"],
"TLOD": TLOD,
"NLOD": NLOD,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE" : n_DP_ratio,
"T_DP_RATE" : t_DP_ratio,
"N_GT": rec[n_sample + "GT"],
"T_GT": rec[t_sample + "GT"],
"N_AD": rec[n_sample + "AD"],
"T_AD": rec[t_sample + "AD"],
"N_QSS": rec[n_sample + "QSS"],
"T_QSS": rec[t_sample + "QSS"],
"N_AF": n_allele_rate,
"T_AF": t_allele_rate,
"ECNT": rec["I.ECNT"],
"HCNT": rec["I.HCNT"],
"MAX_ED": rec["I.MAX_ED"],
"MIN_ED": rec["I.MIN_ED"],
"tag" : tag
}
records.append(qrec)
cols = ["CHROM", "POS", "REF", "ALT",
"FILTER", "TLOD", "NLOD", "DBSNP",
"N_DP", "T_DP", "N_DP_RATE", "T_DP_RATE", "N_GT", "T_GT",
"N_AD", "T_AD", "N_QSS", "T_QSS",
"N_AF", "T_AF",
"tag"]
if records:
df = pandas.DataFrame(records, columns=cols)
else:
df = pandas.DataFrame(columns=cols)
return df
def extractMutectIndelFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type """
records = []
if not avg_depth:
logging.warn("No average depths available, normalized depth features cannot be calculated")
hdrs = extractHeadersJSON(vcfname)
tsn = ""
nsn = ""
t_sample = "S.1."
n_sample = "S.2."
try:
samples = hdrs["samples"]
for f in hdrs["fields"]:
if f["key"] == "GATKCommandLine" and f["values"]["ID"].lower() == "mutect":
clopts = f["values"]["CommandLineOptions"]
# ... tumor_sample_name=HCC2218_tumour ... normal_sample_name=HCC2218_normal
m = re.search("tumor_sample_name=([^\s]+)", clopts)
if m:
tsn = m.group(1)
for i, x in enumerate(samples):
if x == tsn:
t_sample = "S.%i." % (i+1)
break
m = re.search("normal_sample_name=([^\s]+)", clopts)
if m:
nsn = m.group(1)
for i, x in enumerate(samples):
if x == nsn:
n_sample = "S.%i." % (i+1)
break
except:
logging.warn("Unable to detect tumour / normal sample order from VCF header")
logging.info("Normal sample name : %s (prefix %s) / tumour sample name : %s (prefix %s)" % (nsn, n_sample,
tsn, t_sample))
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
"I.RPA", "I.RU", # indel only
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
for q in [n_sample + "GT", t_sample + "GT"]:
if not q in rec or rec[q] is None:
rec[q] = "."
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
# fix missing features
for q in ["I.DB", "I.TLOD", "I.NLOD", "I.ECNT",
"I.HCNT", "I.MAX_ED", "I.MIN_ED",
"I.RPA", "I.RU",
n_sample + "GT", t_sample + "GT",
n_sample + "DP", t_sample + "DP",
n_sample + "QSS", t_sample + "QSS",
n_sample + "AD", t_sample + "AD"]:
if not q in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
else:
# list features
if q.endswith("AD") or q.endswith("QSS") or q.endswith("RPA"):
if type(rec[q]) is not list:
if not q + "_PARSE_FAIL" in has_warned:
logging.warn("Cannot parse %s: %s" % (q, str(rec[q])))
has_warned[q + "_PARSE_FAIL"] = True
rec[q] = [0] * (1 + len(rec["ALT"]))
for xx in range(0, 1 + len(rec["ALT"])):
if len(rec[q]) <= xx:
rec[q].append(0)
else:
try:
rec[q][xx] = float(rec[q][xx])
except ValueError:
rec[q][xx] = 0
else:
try:
rec[q] = int(rec[q])
except ValueError:
rec[q] = -1
rec["tag"] = tag
TLOD = float(rec["I.TLOD"])
NLOD = float(rec["I.NLOD"])
n_DP = float(rec[n_sample + "DP"])
t_DP = float(rec[t_sample + "DP"])
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
if rec["CHROM"] in avg_depth:
n_DP_ratio = n_DP/float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP/float(avg_depth[rec["CHROM"]])
elif not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif not "DPnorm" in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
n_allele_ref_count = rec[n_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
n_allele_alt_count = 0
else:
n_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
n_allele_alt_count += float(rec[n_sample + "AD"][a + 1])
if n_allele_alt_count + n_allele_ref_count == 0:
n_allele_rate = 0
else:
n_allele_rate = n_allele_alt_count / float(n_allele_alt_count + n_allele_ref_count)
t_allele_ref_count = rec[t_sample + "AD"][0]
alleles_alt = rec["ALT"]
if alleles_alt == ['.']:
t_allele_alt_count = 0
else:
t_allele_alt_count = 0
for a in xrange(0, len(alleles_alt)):
t_allele_alt_count += float(rec[t_sample + "AD"][a + 1])
if t_allele_alt_count + t_allele_ref_count == 0:
t_allele_rate = 0
else:
t_allele_rate = t_allele_alt_count / float(t_allele_alt_count + t_allele_ref_count)
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"FILTER": ",".join(rec["FILTER"]),
"DBSNP": rec["I.DB"],
"TLOD": TLOD,
"NLOD": NLOD,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE" : n_DP_ratio,
"T_DP_RATE" : t_DP_ratio,
"N_GT": rec[n_sample + "GT"],
"T_GT": rec[t_sample + "GT"],
"N_AD": rec[n_sample + "AD"],
"T_AD": rec[t_sample + "AD"],
"N_QSS": rec[n_sample + "QSS"],
"T_QSS": rec[t_sample + "QSS"],
"N_AF": n_allele_rate,
"T_AF": t_allele_rate,
"ECNT": rec["I.ECNT"],
"HCNT": rec["I.HCNT"],
"MAX_ED": rec["I.MAX_ED"],
"MIN_ED": rec["I.MIN_ED"],
"I.RPA": rec["I.RPA"],
"I.RU": rec["I.RU"],
"tag" : tag
}
records.append(qrec)
cols = ["CHROM", "POS", "REF", "ALT",
"FILTER", "TLOD", "NLOD", "DBSNP",
"N_DP", "T_DP", "N_DP_RATE", "T_DP_RATE", "N_GT", "T_GT",
"N_AD", "T_AD", "N_QSS", "T_QSS",
"N_AF", "T_AF",
"tag"]
if records:
df = | pandas.DataFrame(records, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_rf)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_rf)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_rf)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_rf))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_rf)
return mape,rmse,mae
# In[29]:
def lstm_model(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX1 = numpy.reshape(X, (X.shape[0],1,X.shape[1]))
testX1 = numpy.reshape(X1, (X1.shape[0],1,X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX1.shape[1], trainX1.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
# model.summary()
# Fitting the RNN to the Training s
model.fit(trainX1, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX1)
y_pred_test = model.predict(testX1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y1=pd.DataFrame(y1)
y_test= sc_y.inverse_transform (y1)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[30]:
###################################################hybrid based ceemdan####################################################
def hybrid_ceemdan_rf(datass,look_back,data_partition,max_features):
import numpy as np
import pandas as pd
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
import pandas as pd
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe= | pd.DataFrame(dfs) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
| Index(['a', np.nan]) | pandas.core.index.Index |
# Standard Library
import asyncio
import logging
import os
from datetime import datetime
# Third Party
import pandas as pd
from elasticsearch import AsyncElasticsearch
from elasticsearch.helpers import async_streaming_bulk
from masker import LogMasker
from nats_wrapper import NatsWrapper
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(message)s")
ES_ENDPOINT = os.environ["ES_ENDPOINT"]
es = AsyncElasticsearch(
[ES_ENDPOINT],
port=9200,
http_auth=("admin", "admin"),
http_compress=True,
verify_certs=False,
use_ssl=True,
timeout=10,
max_retries=5,
retry_on_timeout=True,
)
async def doc_generator(df):
df["_index"] = "logs"
df["anomaly_predicted_count"] = 0
df["nulog_anomaly"] = False
df["drain_anomaly"] = False
df["nulog_confidence"] = -1.0
df["drain_matched_template_id"] = -1.0
df["drain_matched_template_support"] = -1.0
df["anomaly_level"] = "Normal"
for index, document in df.iterrows():
doc_dict = document.to_dict()
yield doc_dict
async def consume_logs(nw, mask_logs_queue):
async def subscribe_handler(msg):
payload_data = msg.data.decode()
await mask_logs_queue.put( | pd.read_json(payload_data, dtype={"_id": object}) | pandas.read_json |
from flask import Flask, render_template, Response, request
import jinja2
from flask_httpauth import HTTPBasicAuth
import hashlib, os, binascii
import json
import numpy as np
import pandas as pd
from os import path
import datetime
app = Flask(__name__)
auth = HTTPBasicAuth()
users = {
"yousef": "3d4dcf6<PASSWORD>63857d19a166007903ae36629ed17c7f9d66f8da981d5fc0ffb371b3ea7ae17e0c9e25987238eaa55cd7a4a64c1a680dcc5718a3be45b65f0d893defe664c894318d1002441740d5aedb327d2f293019a75d96f30346b27e631a"}
@auth.verify_password
def verify_password(username, provided_password):
if username in users:
stored_password=users.get(username)
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac('sha512',
provided_password.encode('utf-8'),
salt.encode('ascii'),
100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
return False
@app.route('/')
@auth.login_required
def index_page():
return render_template('index.html')
@app.route('/fick_room_air', methods=["POST","GET"])
@auth.login_required
def fick_room_air(fra_out={}, err_msg=None, show_exp=False):
if request.method == 'POST':
if (request.form['pat_id'] and request.form['vo2'] and request.form['hg'] and request.form['pv'] and request.form['pa']
and request.form['ao'] and request.form['mv'] and request.form['tp'] and request.form['ts']):
pat_id=request.form['pat_id']
vo2=float(request.form['vo2'])
hg=float(request.form['hg'])
pv=float(request.form['pv'])*0.01
pa=float(request.form['pa'])*0.01
ao=float(request.form['ao'])*0.01
mv=float(request.form['mv'])*0.01
tp=float(request.form['tp'])
ts=float(request.form['ts'])
if (pv-pa)==0:
qp="-"
else:
qp=round(vo2/((13.6*hg)*(pv-pa)),2)
if (ao-mv)==0:
qs="-"
else:
qs=round(vo2/((13.6*hg)*(ao-mv)),2)
qpqs=round(qp/qs,2)
pvr=round(tp/qp,2)
svr=round(ts/qs,2)
rprs=round(pvr/svr,2)
data_out=[[pat_id, str(vo2),str(hg),str(pv),str(pa),str(ao),str(mv),str(tp),str(ts),str(qp),str(qs),str(qpqs),str(pvr),str(svr),str(rprs)]]
tables = pd.DataFrame(data = data_out, columns = ['patient_id','VO2','Hemoglobin','PV(sat)','PA(sat)','Ao(sat)','MV(sat)','TransPulmonary','TransSystemic','Qp (L/min/m^2)','QS (L/min/m^2)','Qp/Qs','PVR (U*m^2)','SVR (U*m^2)','Rp/Rs'])
tables.to_csv('data/fick_room_air.csv', index=False)
fra_out={}
fra_out['out_qp']=qp
fra_out['out_qs']=qs
fra_out['out_qpqs']=qpqs
fra_out['out_pvr']=pvr
fra_out['out_svr']=svr
fra_out['out_rprs']=rprs
return render_template('fick_room_air.html', **fra_out , show_exp=True)
return render_template('fick_room_air.html', err_msg='fill all inputs')
return render_template('fick_room_air.html')
@app.route('/export_fick_room_air')
@auth.login_required
def export_fick_room():
df= pd.read_csv('data/fick_room_air.csv')
csv=df.to_csv(index=False)
return Response(
csv,
mimetype="text/csv",
headers={"Content-disposition":
"attachment; filename=fick_room_air.csv"})
@app.route('/fick_nitric_oxide', methods=["POST","GET"])
@auth.login_required
def fick_nitric_oxide(tables=None, err_msg=None, show_exp=False):
if request.method == 'POST':
if (request.form['pat_id'] and request.form['vo2'] and request.form['hg'] and request.form['pv'] and request.form['pv_paO2']
and request.form['pa'] and request.form['pa_paO2'] and request.form['ao'] and request.form['ao_paO2']
and request.form['mv'] and request.form['mv_paO2'] and request.form['tp'] and request.form['ts']):
pat_id=request.form['pat_id']
vo2=float(request.form['vo2'])
hg=float(request.form['hg'])
pv=float(request.form['pv'])*0.01
pv_paO2=float(request.form['pv_paO2'])
pa=float(request.form['pa'])*0.01
pa_paO2=float(request.form['pa_paO2'])
ao=float(request.form['ao'])*0.01
ao_paO2=float(request.form['ao_paO2'])
mv=float(request.form['mv'])*0.01
mv_paO2=float(request.form['mv_paO2'])
tp=float(request.form['tp'])
ts=float(request.form['ts'])
if (pv-pa)==0:
qp="-"
else:
qp=round(vo2/((13.6*hg*pv+(0.03*pv_paO2)-(13.6*hg*pa+(0.03*pa_paO2)))),2)
if (ao-mv)==0:
qs="-"
else:
qs=round(vo2/((13.6*hg*ao+(0.03*ao_paO2)-(13.6*hg*mv+(0.03*mv_paO2)))),2)
qpqs=round(qp/qs,2)
pvr=round(tp/qp,2)
svr=round(ts/qs,2)
rprs=round(pvr/svr,2)
data_out=[[pat_id,str(vo2),str(hg),str(pv),str(pv_paO2),str(pa),str(pa_paO2),str(ao),str(ao_paO2),str(mv),str(mv_paO2),str(tp),str(ts),str(qp),str(qs),str(qpqs),str(pvr),str(svr),str(rprs)]]
tables = pd.DataFrame(data = data_out, columns = ['patient_id','VO2','Hemoglobin','PV(sat)','PV_PaO2','PA(sat)','PA_PaO2','Ao(sat)','AO_PaO2','MV(sat)','MV_PaO2','TransPulmonary','TransSystemic','Qp (L/min/m^2)','QS (L/min/m^2)','Qp/Qs','PVR (U*m^2)','SVR (U*m^2)','Rp/Rs'])
tables.to_csv('data/fick_nitric_oxide.csv', index=False)
fra_out={}
fra_out['out_qp']=qp
fra_out['out_qs']=qs
fra_out['out_qpqs']=qpqs
fra_out['out_pvr']=pvr
fra_out['out_svr']=svr
fra_out['out_rprs']=rprs
return render_template('fick_nitric_oxide.html', **fra_out, show_exp=True)
return render_template('fick_nitric_oxide.html', err_msg='fill all inputs')
return render_template('fick_nitric_oxide.html')
@app.route('/export_fick_nitric')
@auth.login_required
def export_fick_nitric():
df= pd.read_csv('data/fick_nitric_oxide.csv')
csv=df.to_csv(index=False)
return Response(
csv,
mimetype="text/csv",
headers={"Content-disposition":
"attachment; filename=fick_nitric_oxide.csv"})
@app.route('/MR_Flow', methods=["POST","GET"])
@auth.login_required
def MR_Flow(tables=None, err_msg=None):
if request.method == 'POST':
if (request.form['BSA'] and request.form['HR'] and request.form['AoV'] and request.form['DAo'] and request.form['SVC'] and request.form['IVC']
and request.form['RPA'] and request.form['RPV'] and request.form['LPA'] and request.form['LPV']
and request.form['RUPV'] and request.form['LUPV']):
# Define the input variables input from the form
pat_id = str(request.form['pat_id'])
BSA = float(request.form['BSA'])
HR = float(request.form['HR'])
Aov=float(request.form['AoV'])
DAo=float(request.form['DAo'])
SVC=float(request.form['SVC'])
IVC=float(request.form['IVC'])
RPA=float(request.form['RPA'])
RPV=float(request.form['RPV'])
LPA=float(request.form['LPA'])
LPV=float(request.form['LPV'])
RUPV=float(request.form['RUPV'])
LUPV=float(request.form['LUPV'])
# Perform collateral related calculations
collateralAmts, collateralPcnts1, collateralPcnts2 = calcCollateralAmt(HR, Aov, DAo, SVC, IVC, RPA, RPV, LPA, LPV, RUPV, LUPV)
# Perform the flow related calculations
calcFlowVars1, calcFlowVars2, calcFlowVars3, calcFlowVars4 = calcFlowVariables(HR, BSA, Aov, DAo, SVC, IVC, RPA, RPV, LPA, LPV, RUPV, LUPV, collateralAmts)
# Collateral amounts output into data table, always 4 items
collatAmounts =[str(collateralAmts[0]),str(collateralAmts[1]),str(collateralAmts[2]),str(collateralAmts[3])]
# Build collateral output
collat_out={}
collat_out['collat_m1']= collatAmounts[0]
collat_out['collat_m2']= collatAmounts[1]
collat_out['collat_m3']= collatAmounts[2]
collat_out['collat_m4']= collatAmounts[3]
collat_out['collat_m1p1'] = collateralPcnts1[0]
collat_out['collat_m2p1'] = collateralPcnts1[1]
collat_out['collat_m3p1'] = collateralPcnts1[2]
collat_out['collat_m4p1'] = collateralPcnts1[3]
collat_out['collat_m1p2'] = collateralPcnts2[0]
collat_out['collat_m2p2'] = collateralPcnts2[1]
collat_out['collat_m3p2'] = collateralPcnts2[2]
collat_out['collat_m4p2'] = collateralPcnts2[3]
collat_out['qs_m1'] = calcFlowVars1[0]
collat_out['qes_m1'] = calcFlowVars1[1]
collat_out['qep_m1'] = calcFlowVars1[2]
collat_out['qp_m1'] = calcFlowVars1[3]
collat_out['qpqs_m1'] = calcFlowVars1[4]
collat_out['qepqes_m1'] = calcFlowVars1[5]
collat_out['qepqs_m1'] = calcFlowVars1[6]
collat_out['qs_m2'] = calcFlowVars2[0]
collat_out['qes_m2'] = calcFlowVars2[1]
collat_out['qep_m2'] = calcFlowVars2[2]
collat_out['qp_m2'] = calcFlowVars2[3]
collat_out['qpqs_m2'] = calcFlowVars2[4]
collat_out['qepqes_m2'] = calcFlowVars2[5]
collat_out['qepqs_m2'] = calcFlowVars2[6]
collat_out['qs_m3'] = calcFlowVars3[0]
collat_out['qes_m3'] = calcFlowVars3[1]
collat_out['qep_m3'] = calcFlowVars3[2]
collat_out['qp_m3'] = calcFlowVars3[3]
collat_out['qpqs_m3'] = calcFlowVars3[4]
collat_out['qepqes_m3'] = calcFlowVars3[5]
collat_out['qepqs_m3'] = calcFlowVars3[6]
collat_out['qs_m4'] = calcFlowVars4[0]
collat_out['qes_m4'] = calcFlowVars4[1]
collat_out['qep_m4'] = calcFlowVars4[2]
collat_out['qp_m4'] = calcFlowVars4[3]
collat_out['qpqs_m4'] = calcFlowVars4[4]
collat_out['qepqes_m4'] = calcFlowVars4[5]
collat_out['qepqs_m4'] = calcFlowVars4[6]
qp = calcFlowVars4[3]
qep = calcFlowVars4[2]
qs = calcFlowVars4[0]
qes = calcFlowVars4[1]
qpqs = calcFlowVars4[4]
qepqes = calcFlowVars4[5]
qepqs = calcFlowVars4[6]
# Export data to local csv file
data_out=[[str(pat_id), str(BSA), str(HR), str(Aov), str(DAo), str(SVC), str(IVC), str(RPA), str(RPV), str(LPA), str(LPV), str(RUPV), str(LUPV), str(qp), str(qep), str(qes),str(qs), str(qpqs),str(qepqs), str(qepqes)]]
tables = | pd.DataFrame(data = data_out, columns = ['patient_id', 'BSA', 'HR', 'Aov', 'DAo', 'SVC', 'IVC', 'RPA', 'RPV', 'LPA', 'LPV', 'RUPV', 'LUPV', 'Qp', 'Qep','Qes','Qs','Qp/Qs', 'Qep/Qs','Qep/Qes']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Parser for PreCo dataset
#
# Author: <NAME> <<EMAIL>>
#
# For license information, see LICENSE
import gc
from collections import defaultdict, namedtuple
from enum import Enum
from itertools import chain
from multiprocessing import Pool
from typing import DefaultDict, List, Tuple
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.utils import to_categorical
from nltk import pos_tag
from nltk.data import load
from progress.bar import IncrementalBar
import spacy
from neuralcorefres.feature_extraction.stanford_parse_api import StanfordParseAPI
EMBEDDING_DIM = 300
Cluster = List[str]
Tensor = List[float]
ClusterIndicies = namedtuple('ClusterIndicies', 'sent_idx begin_idx end_idx')
ClusteredSentence = namedtuple('ClusteredSentence', 'sentence clusters')
ClusteredDictKey = namedtuple('ClusteredDictKey', 'id sentence_index sentence')
SPACY_DEP_TAGS = ['acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'dobj', 'expl',
'intj', 'mark', 'meta', 'neg', 'nn', 'nounmod', 'npmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'obj', 'obl', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'prep', 'prt', 'punct', 'quantmod', 'relcl', 'root', 'xcomp']
nlp = spacy.load('en_core_web_sm')
POS_ONE_HOT_LEN = 45
class PreCoDataType(Enum):
TRAIN = 0
TEST = 1
class EntityCluster:
def __init__(self, entity: Cluster, indices: ClusterIndicies):
self.entity = entity
self.indices = ClusterIndicies(*indices)
def __str__(self):
return f'{self.entity} | {self.indices}'
class PreCoCoreferenceDatapoint:
def __init__(self, id, sents: List[Cluster], sorted_entity_clusters: EntityCluster):
self.id = id
self.sents = sents
self.sorted_entity_clusters = self._get_sorted_clusters(sorted_entity_clusters)
def _get_sorted_clusters(self, clusters) -> List[EntityCluster]:
return sorted(clusters, key=lambda cluster: cluster.indices.sent_idx)
@staticmethod
def parse_sorted_entity_clusters(sentences: List[List[str]], sorted_entity_clusters: List[List[List[int]]]):
"""
Per the PreCo website, mention clusters are in the following form:
[ [ [ sentence_idx, begin_idx, end_idx ] ] ]
Where the end index is one past the last word in the cluster, and all
indicies are zero-based.
Example:
Sentences:
[
[ 'Charlie', 'had', 'fun', 'at', 'the', 'park', '.' ],
[ 'He', 'slid', 'down', 'the', 'slide', '.' ]
]
Mention Clusters:
[
[ [0, 0, 1], [1, 0, 1] ], // Charlie, he
[ [0, 5, 6] ], // park
[ [1, 4, 5] ] // slide
]
"""
clusters = [[EntityCluster(sentences[sent_idx][begin_idx:end_idx], (sent_idx, begin_idx, end_idx))
for sent_idx, begin_idx, end_idx in cluster][0] for cluster in sorted_entity_clusters]
return clusters
def __str__(self):
sub_strs = '\t' + '\n\t'.join([str(cluster) for cluster in self.sorted_entity_clusters])
return f'{self.id}\n{sub_strs}'
_BASE_FILEPATH = '../data/PreCo_1.0/'
_FILE_TYPES = {
PreCoDataType.TRAIN: 'train.json',
PreCoDataType.TEST: 'dev.json'
}
class PreCoParser:
@staticmethod
def get_pos_onehot_map():
return pd.get_dummies(list(load('help/tagsets/upenn_tagset.pickle').keys()))
@staticmethod
def get_spacy_deps_onehot():
return pd.get_dummies(SPACY_DEP_TAGS)
@staticmethod
def get_preco_data(data_type: PreCoDataType, basepath: str = _BASE_FILEPATH, class_type: PreCoCoreferenceDatapoint = PreCoCoreferenceDatapoint):
ret_lst = []
full_filepath = basepath + _FILE_TYPES[data_type]
df = | pd.read_json(full_filepath, lines=True, encoding='ascii') | pandas.read_json |
import pandas as pd
import numpy as np
import time
import collections
def df_set(df_trr, df_weapondischarge, df_trrstatus, sub_weapon_refresh):
"""Set necessary df."""
# Set trr_trr_refresh
df_trr["trr_datetime"] = pd.to_datetime(df_trr["trr_datetime"])
df_trr["beat"] = df_trr["beat"].astype(int)
df_trr["officer_appointed_date"] = pd.to_datetime(
reformat_date(df_trr["officer_appointed_date"])
).strftime("%Y-%m-%d")
df_trr["officer_birth_year"] = df_trr["officer_birth_year"].fillna(0.0).astype(int)
df_trr["officer_birth_year"].replace(0, np.nan, inplace=True)
df_trr["officer_age"] = df_trr["officer_age"].astype(int)
df_trr["officer_on_duty"] = df_trr["officer_on_duty"].astype(bool)
df_trr["officer_injured"] = df_trr["officer_injured"].astype(bool)
df_trr["officer_in_uniform"] = df_trr["officer_in_uniform"].astype(bool)
df_trr["subject_birth_year"] = df_trr["subject_birth_year"].astype(int)
df_trr["subject_age"] = df_trr["subject_age"].astype(int)
df_trr["subject_armed"] = df_trr["subject_armed"].astype(bool)
df_trr["subject_injured"] = df_trr["subject_injured"].astype(bool)
df_trr["subject_alleged_injury"] = df_trr["subject_alleged_injury"].astype(bool)
df_trr["notify_oemc"] = df_trr["notify_oemc"].astype(bool)
df_trr["notify_district_sergeant"] = df_trr["notify_district_sergeant"].astype(bool)
df_trr["notify_op_command"] = df_trr["notify_op_command"].astype(bool)
df_trr["notify_det_division"] = df_trr["notify_det_division"].astype(bool)
df_trr["trr_created"] = pd.to_datetime(df_trr["trr_created"])
# Set trr_weapondischarge_refresh
df_weapondischarge["firearm_reloaded"] = df_weapondischarge[
"firearm_reloaded"
].astype(bool)
df_weapondischarge["sight_used"] = df_weapondischarge["sight_used"].astype(bool)
# Set trr_trrstatus_refresh
df_trrstatus["officer_appointed_date"] = pd.to_datetime(
reformat_date(df_trrstatus["officer_appointed_date"])
).strftime("%Y-%m-%d")
df_trrstatus["officer_birth_year"] = (
df_trrstatus["officer_birth_year"].fillna(0).astype(int)
)
df_trrstatus["officer_birth_year"].replace(0, np.nan, inplace=True)
df_trrstatus["status_datetime"] = pd.to_datetime(df_trrstatus["status_datetime"])
# Set trr_subjectweapon_refresh
# Weapon, change specific items to the correct form.
sub_weapon_refresh.loc[
(sub_weapon_refresh.weapon_type == "CHEMICAL WEAPON")
| (sub_weapon_refresh.weapon_type == "TASER / STUN GUN"),
"weapon_type",
] = "OTHER (SPECIFY)"
sub_weapon_refresh.loc[
(sub_weapon_refresh.weapon_type == "VEHICLE"), "weapon_type"
] = "VEHICLE - ATTEMPTED TO STRIKE OFFICER WITH VEHICLE"
# Change location format
# Indoor/outdoor change specific column.
df_trr.loc[(df_trr.indoor_or_outdoor == "OUTDOOR"), "indoor_or_outdoor"] = "Outdoor"
df_trr.loc[(df_trr.indoor_or_outdoor == "INDOOR"), "indoor_or_outdoor"] = "Indoor"
# Street. 1. change to camel case
df_trr["street"] = df_trr["street"].str.title()
# location. 1. change to camel case 2. change specific items to the correct form.
df_trr["location"] = df_trr["location"].str.title()
df_trr.loc[
(df_trr.location == "Cha Hallway / Stairwell / Elevator"), "location"
] = "Cha Hallway/Stairwell/Elevator"
df_trr.loc[
(df_trr.location == "Cha Parking Lot / Grounds"), "location"
] = "Cha Parking Lot/Grounds"
df_trr.loc[
(df_trr.location == "Church / Synagogue / Place Of Worship"), "location"
] = "Church/Synagogue/Place Of Worship"
df_trr.loc[
(df_trr.location == "College / University - Grounds"), "location"
] = "College/University Grounds"
df_trr.loc[
(df_trr.location == "Factory / Manufacturing Building"), "location"
] = "Factory/Manufacturing Building"
df_trr.loc[
(df_trr.location == "Government Building / Property"), "location"
] = "Government Building/Property"
df_trr.loc[
(df_trr.location == "Highway / Expressway"), "location"
] = "Highway/Expressway"
df_trr.loc[
(df_trr.location == "Hospital Building / Grounds"), "location"
] = "Hospital Building/Grounds"
df_trr.loc[(df_trr.location == "Hotel / Motel"), "location"] = "Hotel/Motel"
df_trr.loc[
(df_trr.location == "Movie House / Theater"), "location"
] = "Movie House/Theater"
df_trr.loc[
(df_trr.location == "Nursing / Retirement Home"), "location"
] = "Nursing Home/Retirement Home"
df_trr.loc[(df_trr.location == "Other (Specify)"), "location"] = "Other"
df_trr.loc[
(df_trr.location == "Other Railroad Property / Train Depot"), "location"
] = "Other Railroad Prop / Train Depot"
df_trr.loc[
(df_trr.location == "Parking Lot / Garage (Non Residential)"), "location"
] = "Parking Lot/Garage(Non.Resid.)"
df_trr.loc[
(df_trr.location == "Other Railroad Property / Train Depot"), "location"
] = "Other Railroad Prop / Train Depot"
df_trr.loc[
(df_trr.location == "Police Facility / Vehicle Parking Lot"), "location"
] = "Police Facility/Veh Parking Lot"
df_trr.loc[
(df_trr.location == "Residence - Porch / Hallway"), "location"
] = "Residence Porch/Hallway"
df_trr.loc[
(df_trr.location == "Residence - Garage"), "location"
] = "Residence-Garage"
df_trr.loc[
(df_trr.location == "Residence - Yard (Front / Back)"), "location"
] = "Residential Yard (Front/Back)"
df_trr.loc[
(df_trr.location == "School - Private Building"), "location"
] = "School, Private, Building"
df_trr.loc[
(df_trr.location == "School - Private Grounds"), "location"
] = "School, Private, Grounds"
df_trr.loc[
(df_trr.location == "School - Public Building"), "location"
] = "School, Public, Building"
df_trr.loc[
(df_trr.location == "School - Public Grounds"), "location"
] = "School, Public, Grounds"
df_trr.loc[
(df_trr.location == "Sports Arena / Stadium"), "location"
] = "Sports Arena/Stadium"
df_trr.loc[
(df_trr.location == "Tavern / Liquor Store"), "location"
] = "Tavern/Liquor Store"
df_trr.loc[(df_trr.location == "Vacant Lot / Land"), "location"] = "Vacant Lot/Land"
df_trr.loc[
(df_trr.location == "Vehicle - Other Ride Share Service (Lyft, Uber, Etc.)"),
"location",
] = "Vehicle - Other Ride Service"
df_trr.loc[
(df_trr.location == "Vehicle - Commercial"), "location"
] = "Vehicle-Commercial"
df_trr.loc[
(df_trr.location == "Cta Parking Lot / Garage / Other Property"), "location"
] = "Cta Garage / Other Property"
df_trr.loc[
(df_trr.location == "Lakefront / Waterfront / Riverbank"), "location"
] = "Lakefront/Waterfront/Riverbank"
df_trr.loc[
(df_trr.location == "Medical / Dental Office"), "location"
] = "Medical/Dental Office"
df_trr.loc[
(df_trr.location == "Airport Parking Lot"), "location"
] = "Airport/Aircraft"
df_trr.loc[
(df_trr.location == "Airport Terminal Mezzanine - Non-Secure Area"), "location"
] = "Airport Terminal Lower Level - Non-Secure Area"
def reformat_date(df):
"""REDACTED => NAN, DD-MM-YY =>YYYY=MM-DD, DD-MON-YY => YYYY-MM-DD"""
appoint_date = []
for i in range(len(df)):
day = df[i]
if pd.isna(day):
appoint_date.append(np.nan)
elif day == "REDACTED":
appoint_date.append(np.nan)
elif len(day) == 8:
if int(day[6:8]) + 2000 > 2021:
appoint_date.append("19" + day[6:8] + "-" + day[0:5])
else:
appoint_date.append("20" + day[6:8] + "-" + day[0:5])
else:
mon = day[5:8]
if mon == "JAN":
appoint_date.append(day[0:5] + "01" + day[8:])
elif mon == "FEB":
appoint_date.append(day[0:5] + "02" + day[8:])
elif mon == "MAR":
appoint_date.append(day[0:5] + "03" + day[8:])
elif mon == "APR":
appoint_date.append(day[0:5] + "04" + day[8:])
elif mon == "MAY":
appoint_date.append(day[0:5] + "05" + day[8:])
elif mon == "JUN":
appoint_date.append(day[0:5] + "06" + day[8:])
elif mon == "JUL":
appoint_date.append(day[0:5] + "07" + day[8:])
elif mon == "AUG":
appoint_date.append(day[0:5] + "08" + day[8:])
elif mon == "SEP":
appoint_date.append(day[0:5] + "09" + day[8:])
elif mon == "OCT":
appoint_date.append(day[0:5] + "10" + day[8:])
elif mon == "NOV":
appoint_date.append(day[0:5] + "11" + day[8:])
elif mon == "DEC":
appoint_date.append(day[0:5] + "12" + day[8:])
return appoint_date
def reconciliation_race(df):
"""Replace the format as race_map shows."""
race_map = dict(
{
"AMER IND/ALASKAN NATIVE": "NATIVE AMERICAN/ALASKAN NATIVE",
"AMER INDIAN / ALASKAN NATIVE": "NATIVE AMERICAN/ALASKAN NATIVE",
"ASIAN / PACIFIC ISLANDER": "ASIAN/PACIFIC ISLANDER",
"ASIAN/PACIFIC ISLANDER": "ASIAN/PACIFIC ISLANDER",
"BLACK": "BLACK",
"UNKNOWN": "NULL",
"UNKNOWN / REFUSED": "NULL",
"HISPANIC": "HISPANIC",
"WHITE": "WHITE",
}
)
for k in race_map:
if k[0:2] == "UN":
df.replace(k, np.nan, inplace=True)
else:
df.replace(k, race_map[k], inplace=True)
def reconciliation_gender(df):
race_map = dict({"FEMALE": "F", "MALE": "M"})
for k in race_map:
df.replace(k, race_map[k], inplace=True)
def reconciliation_birth_year(df):
birth_years = df.to_numpy()
birth_years[birth_years < 100] += 1900
birth_years[birth_years < 200] *= 10
for i in range(1000, 2000, 100):
birth_years[birth_years < i] += 2000 - i
df = pd.DataFrame(birth_years)
def age_sanity(row):
if row < 100:
return 1900 + row
if 100 <= row < 1900:
return 1900 + row % 100
return row
def update_last_suffix(df):
df["suffix_name"] = "NULL"
suffix_list = ["I", "II", "III", "IV", "V", "JR", "SR"]
officer_last_name = []
suffix_name = []
for i in df["officer_last_name"]:
target_str = i.upper().split(" ")
if len(target_str) > 1:
k = 0
for j in suffix_list:
if j == "".join(e for e in target_str[-1] if e.isalnum()):
# print(''.join(e for e in target_str[-1] if e.isalnum()))
suffix_name.append(j)
k += 1
if k == 0:
suffix_name.append(None)
officer_last_name.append(" ".join(target_str).title())
else:
officer_last_name.append(i.title())
suffix_name.append(None)
df["officer_last_name"] = officer_last_name
df["suffix_name"] = suffix_name
def update_first_name(df):
officer_first_name = []
for i in df["officer_first_name"]:
officer_first_name.append(i.title())
df["officer_first_name"] = officer_first_name
def update_race(df):
race_map = dict(
{
"AMER IND/ALASKAN NATIVE": "Native American/Alaskan Native",
"ASIAN/PACIFIC ISLANDER": "Asian/Pacific",
"BLACK": "Black",
"UNKNOWN": "Unknown",
"HISPANIC": "Hispanic",
"WHITE": "White",
"WHITE HISPANIC": "Hispanic",
"BLACK HISPANIC": "Hispanic",
}
)
for k in race_map:
df.replace(k, race_map[k], inplace=True)
def add_officer_id(df, df_officer):
df["officer_id"] = "NULL"
fields = [
"first_name",
"middle_initial",
"last_name",
"suffix_name",
"birth_year",
"appointed_date",
"gender",
"race",
]
officer_fields = [
"officer_first_name",
"officer_middle_initial",
"officer_last_name",
"suffix_name",
"officer_birth_year",
"officer_appointed_date",
"officer_gender",
"officer_race",
]
officer_id = []
index = []
for i, officer in df.iterrows():
count = np.zeros(len(df_officer))
for j in range(8):
count += (
pd.isna(df_officer[fields[j]]) | pd.isna(officer[officer_fields[j]])
).to_numpy() * 0.9
count += (df_officer[fields[j]] == officer[officer_fields[j]]).to_numpy()
index.append(np.argmax(count))
officer_id.append(df_officer.loc[index[i]]["id"])
df["officer_id"] = officer_id
def re_order(
df_trr,
df_unit,
df_actionresponse,
df_weapondischarge,
df_trrstatus,
sub_weapon_refresh,
):
df_trr["unit_name_padded"] = df_trr.officer_unit_name.apply(lambda x: x.zfill(3))
df_trr_new = pd.merge(
df_trr, df_unit, how="left", left_on="unit_name_padded", right_on="unit_name"
)
df_trr_new.rename(
{"id_y": "officer_unit_id", "description": "officer_unit_detail_id"},
axis=1,
inplace=True,
)
df_trr_new.drop(
[
"unit_name_padded",
"tags",
"active",
"created_at",
"updated_at",
"unit_name",
"officer_unit_name",
],
axis=1,
inplace=True,
)
# Instructed order.
trr_output_order = "id, crid, event_id, beat, block, direction, street, location, trr_datetime, indoor_or_outdoor, lighting_condition, weather_condition, notify_OEMC, notify_district_sergeant, notify_OP_command, notify_DET_division, party_fired_first, officer_assigned_beat, officer_on_duty, officer_in_uniform, officer_injured, officer_rank, subject_armed, subject_injured, subject_alleged_injury, subject_age, subject_birth_year, subject_gender, subject_race, officer_id, officer_unit_id, officer_unit_detail_id, point"
trr_actionresponse_output_order = (
"person, resistance_type, action, other_description, trr_id"
)
trr_weapondischarge_output_order = "weapon_type,weapon_type_description,firearm_make,firearm_model,firearm_barrel_length,firearm_caliber,total_number_of_shots,firearm_reloaded,number_of_cartridge_reloaded,handgun_worn_type,handgun_drawn_type,method_used_to_reload,sight_used,protective_cover_used,discharge_distance,object_struck_of_discharge,discharge_position,trr_id"
trr_trrstatus_output_order = (
"rank, star, status, status_datetime, officer_id, trr_id"
)
trr_subjectweapon_output_order = (
"weapon_type, firearm_caliber, weapon_description, trr_id"
)
# Rename columns.
df_trr_new.rename(
{
"id_x": "id",
"event_number": "event_id",
"cr_number": "crid",
"notify_oemc": "notify_OEMC",
"notify_op_command": "notify_OP_command",
"notify_det_division": "notify_DET_division",
},
axis=1,
inplace=True,
)
df_actionresponse.rename({"trr_report_id": "trr_id"}, axis=1, inplace=True)
df_weapondischarge.rename({"trr_report_id": "trr_id"}, axis=1, inplace=True)
df_trrstatus.rename(
{"officer_rank": "rank", "officer_star": "star", "trr_report_id": "trr_id"},
axis=1,
inplace=True,
)
sub_weapon_refresh.rename({"trr_report_id": "trr_id"}, axis=1, inplace=True)
# Reorder columns
df_trr = df_trr_new[trr_output_order.replace(" ", "").split(",")]
df_trrstatus = df_trrstatus[trr_trrstatus_output_order.replace(" ", "").split(",")]
return df_trr,df_trrstatus
def check_key(
df_trr,
df_weapondischarge,
df_trrstatus,
df_actionresponse,
df_charge,
sub_weapon_refresh,
):
"""Check if there are unmatched item in supporting tables."""
# Get unique id of each table key
# Primary key
ancher = set(df_trr.id.unique())
# Foreign Key
weapondischarge_set = set(df_weapondischarge.trr_report_id.unique())
trrstatus_set = set(df_trrstatus.trr_report_id.unique())
actionresponse_set = set(df_actionresponse.trr_report_id.unique())
charge_set = set(df_charge.trr_report_id.unique())
sub_weapon_refresh_set = set(sub_weapon_refresh.trr_report_id.unique())
# Discard unused keys
discard_item(
ancher, weapondischarge_set, df_weapondischarge,
)
discard_item(
ancher, trrstatus_set, df_trrstatus,
)
discard_item(
ancher, actionresponse_set, df_actionresponse,
)
discard_item(
ancher, charge_set, df_charge,
)
discard_item(
ancher, sub_weapon_refresh_set, sub_weapon_refresh,
)
def discard_item(ancher, helper, df_name):
unused = helper - ancher
if len(unused):
print("Has unused.")
print(unused)
else:
print("All good.")
for key in unused:
df_name = df_name[df_name.trr_report_id != key]
def main():
print("Loading Data...")
# Import trr_trr_refresh
df_trr = pd.read_csv("data/trr_trr_refresh.csv")
# Import trr_weapondischarge_refresh
df_weapondischarge = pd.read_csv("data/trr_weapondischarge_refresh.csv")
# Import trr_trrstatus_refresh
df_trrstatus = pd.read_csv("data/trr_trrstatus_refresh.csv")
# Import trr_subjectweapon_refresh
sub_weapon_refresh = pd.read_csv("data/trr_subjectweapon_refresh.csv")
# Import trr_charge_refresh
df_charge = pd.read_csv("data/trr_charge_refresh.csv")
# Import trr_actionresponse_refresh
df_actionresponse = pd.read_csv("data/trr_actionresponse_refresh.csv")
# Import data_officer
df_officer = | pd.read_csv("data/data_officer.csv") | pandas.read_csv |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
| tm.assert_series_equal(df["dates"], column) | pandas._testing.assert_series_equal |
"""
This script creates a boolean mask based on rules
1. is it boreal forest zone
2. In 2000, was there sufficent forest
"""
#==============================================================================
__title__ = "Boreal Forest Mask"
__author__ = "<NAME>"
__version__ = "v1.0(19.08.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
import rasterio
import xarray as xr
from dask.diagnostics import ProgressBar
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import regionmask as rm
import itertools
# Import debugging packages
import ipdb
# from rasterio.warp import transform
from shapely.geometry import Polygon
import geopandas as gpd
from rasterio import features
from affine import Affine
# +++++ Import my packages +++++
import myfunctions.corefunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
def main():
# ========== Create the dates ==========
force = False
dates = datefixer(2018, 12, 31)
data = datasets()
dsn = "HansenGFC"
tcf = 0.0
region = "SIBERIA"
# ========== select and analysis scale ==========
mwbox = [2, 5, 10] #in decimal degrees
BPT = 0.4
# ========== Set up the filename and global attributes =========
ppath = "/media/ubuntu/Seagate Backup Plus Drive/Data51/BurntArea/HANSEN"
pptex = ({"treecover2000":"FC2000", "lossyear":"lossyear", "datamask":"mask"})
fpath = "%s/FRI/" % ppath
cf.pymkdir(fpath)
# ========== Setup the paths ==========
def _Hansenfile(ppath, pptex, ft):
dpath = "%s/%s/" % (ppath, pptex[ft])
datafn = "%sHansen_GFC-2018-v1.6_%s_%s.nc" % (dpath, ft, region)
# fnout = "%sHansen_GFC-2018-v1.6_forestmask_%s.nc" % (dpath, region)
return xr.open_dataset(datafn, chunks={'latitude': 100})
# ========== get the datatsets ==========
# ds_tc = _Hansenfile(ppath, pptex, "treecover2000")
ds_ly = _Hansenfile(ppath, pptex, "lossyear")
ds_dm = _Hansenfile(ppath, pptex, "datamask")
# Add in the loss year
# ========== Load in a test dataset and fix the time ==========
# ds_test = xr.open_dataset("./data/veg/GIMMS31g/GIMMS31v1/timecorrected/ndvi3g_geo_v1_1_1982to2017_annualmax.nc").isel(time=-1)
# ds_test["time"] = ds_tc["time"]
# ========== subset the dataset in to match the forest cover ==========
# ds_testSUB = ds_test.sel(dict(
# latitude =slice(ds_tc.latitude.max().values, ds_tc.latitude.min().values),
# longitude=slice(ds_tc.longitude.min().values, ds_tc.longitude.max().values)))
# ========== Loop over the datasets ==========
for mwb in mwbox:
for dsn in data:
# ========== Set up the filename and global attributes =========
# fpath = "./data/other/ForestExtent/%s/" % dsn
# cf.pymkdir(fpath)
# ========== Load the grids =========
DAin, global_attrs = dsloader(data, dsn, dates)
# ========== subset the dataset in to match the forest cover ==========
DAin_sub = DAin.sel(dict(
latitude=slice(ds_ly.latitude.max().values, ds_ly.latitude.min().values),
longitude=slice(ds_ly.longitude.min().values, ds_ly.longitude.max().values)))
fto = dsFRIcal(dsn, data, ds_tc, ds_ly, ds_dm, fpath, mwb, region, dates, tcf, force, DAin_sub)
fta = Annual_dsFRIcal(dsn, data, ds_tc, ds_ly, ds_dm, fpath, mwb, region, dates, tcf, force, DAin_sub)
# ipdb.set_trace()
# sys.exit()
continue
ValueTester(fto, mwb, dates, data, dsn, ds_SUB=DAin_sub)
# ipdb.set_trace()
# sys.exit()
ipdb.set_trace()
sys.exit()
#==============================================================================
def Annual_dsFRIcal(dsn, data, ds_tc, ds_ly, ds_dm, fpath, mwb, region, dates, tcf, force, DAin_sub):
"""
This function will try a different approach to reindexing
"""
# ========== Calculate scale factors ==========
rat = np.round(mwb / np.array(ds_tc["treecover2000"].attrs["res"]) )
if np.unique(rat).shape[0] == 1:
# the scale factor between datasets
SF = int(rat[0])
# RollF = int(SF/4 - 0.5) # the minus 0.5 is correct for rolling windows
RollF = 100
else:
warn.warn("Lat and lon have different scale factors")
ipdb.set_trace()
sys.exit()
warn.warn("\n\n I still need to implement some form of boolean MODIS Active fire mask in order to get Fire only \n\n")
# ========== Setup the masks ==========
maskpath = "./data/other/ForestExtent/%s/" % dsn
maskfn = maskpath + "Hansen_GFC-2018-v1.6_regrid_%s_%s_BorealMask.nc" % (dsn, region)
if os.path.isfile(maskfn):
mask = xr.open_dataset(maskfn)
else:
warn.warn("Mask file is missing")
ipdb.set_trace()
sys.exit()
# ========== Loop over the years ==========
year_fn = []
for yr in range(1, 19):
# ========== Create the outfile name ==========
fnout = "%sHansen_GFC-2018-v1.6_regrided_%s_FRI_20%02d_annual_%ddegMW_%s.nc" % (fpath, dsn, yr, mwb, region)
if os.path.isfile(fnout) and not force:
try:
print("dataset for %d 20%02d deg already exist. going to next year" % (mwb, yr))
ds = xr.open_dataset(fnout)
year_fn.append(fnout)
continue
except Exception as e:
warn.warn(str(e))
warn.warn("Retrying file")
# ========== fetch the dates ==========
dts = datefixer(2000+yr, 12, 31)
# ========== Calculate the amount of forest that was lost ==========
ba_ly = ds_ly == yr
# ========== implement the masks ==========
ba_ly = ba_ly.where((ds_tc > tcf).rename({"treecover2000":"lossyear"})) # mask out the non forest
ba_ly = ba_ly.where((ds_dm == 1.0).rename({"datamask":"lossyear"})) # mask out the non data pixels
# +++++ Moving window Smoothing +++++
MW_lons = ba_ly.rolling({"longitude":SF}, center = True, min_periods=RollF).mean()
MW_lonsRI = MW_lons.reindex({"longitude":DAin_sub.longitude}, method="nearest")
MW_lonsRI = MW_lonsRI.chunk({"latitude":-1, "longitude":1000})
# +++++ Apply the second smooth +++++
MW_FC = MW_lonsRI.rolling({"latitude":SF}, center = True, min_periods=RollF).mean()
ds_con = MW_FC.reindex({"latitude":DAin_sub.latitude}, method="nearest")
# +++++ Fix the dates +++++
ds_con["time"] = dts["CFTime"]
ds_con["lossyear"] = ds_con["lossyear"].where(ds_con["lossyear"]> 0)
# ipdb.set_trace()
# ========== Mask out bad pixels ==========
ds_con = ds_con.where(mask.mask.values == 1)
# ========== Combine the results ==========
# year_lf.append(ds_con)
# ds_con = xr.concat(year_lf, dim="time")
# +++++ Fix the metadata +++++
ds_con.attrs = ds_ly.attrs
ds_con.attrs["history"] = (
"%s: Fraction of burnt forest after a %d degree spatial smoothing, then resampled to match %s grid resolution using %s" %
((str(pd.Timestamp.now())), mwb, dsn, __file__)
+ ds_con.attrs["history"])
ds_con.attrs["FileName"] = fnout
ds_con = ds_con.rename({"lossyear":"lossfrac"})
ds_con.lossfrac.attrs = ds_ly.lossyear.attrs
ds_con.latitude.attrs = ds_ly.latitude.attrs
ds_con.longitude.attrs = ds_ly.longitude.attrs
ds_con.time.attrs["calendar"] = dts["calendar"]
ds_con.time.attrs["units"] = dts["units"]
# ========== Create the new layers ==========
ds_con["FRI"] = (1/ds_con["lossfrac"])
# ========== Build the encoding ==========
if dsn in ["COPERN_BA", "esacci", "MODIS"]:
enc = ({'shuffle':True,
'chunksizes':[1, ds_con.latitude.shape[0], 1000],
'zlib':True,
'complevel':5})
else:
enc = ({'shuffle':True,
'chunksizes':[1, ds_con.latitude.shape[0], ds_con.longitude.shape[0]],
'zlib':True,
'complevel':5})
encoding = OrderedDict()
for ky in ["lossfrac", "FRI"]:
encoding[ky] = enc
delayed_obj = ds_con.to_netcdf(fnout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"],
compute=False)
print("Starting write of 20%02d %s gridded data at:" % (yr, dsn), pd.Timestamp.now())
with ProgressBar():
results = delayed_obj.compute()
ipdb.set_trace()
return xr.open_mfdataset(year_fn)
#==============================================================================
def dsFRIcal(dsn, data, ds_tc, ds_ly, ds_dm, fpath, mwb, region, dates, tcf, force, DAin_sub):
"""
This function will try a different approach to reindexing
"""
# ========== Create the outfile name ==========
fnout = "%sHansen_GFC-2018-v1.6_regrided_%s_FRI_%ddegMW_%s.nc" % (fpath, dsn, mwb, region)
if os.path.isfile(fnout) and not force:
print("dataset for %d deg already exist. going to next window" % (mwb))
return xr.open_dataset(fnout)
# ========== Calculate scale factors ==========
rat = np.round(mwb / np.array(ds_tc["treecover2000"].attrs["res"]) )
if np.unique(rat).shape[0] == 1:
# the scale factor between datasets
SF = int(rat[0])
# RollF = int(SF/4 - 0.5) # the minus 0.5 is correct for rolling windows
RollF = 100
else:
warn.warn("Lat and lon have different scale factors")
ipdb.set_trace()
sys.exit()
# ========== Calculate the amount of forest that was lost ==========
ba_ly = ds_ly > 0
# ========== implement the masks ==========
ba_ly = ba_ly.where((ds_tc > tcf).rename({"treecover2000":"lossyear"})) # mask out the non forest
ba_ly = ba_ly.where((ds_dm == 1.0).rename({"datamask":"lossyear"})) # mask out the non data pixels
warn.warn("\n\n I still need to implement some form of boolean MODIS Active fire mask in order to get Fire only \n\n")
# +++++ Moving window Smoothing +++++
MW_lons = ba_ly.rolling({"longitude":SF}, center = True, min_periods=RollF).mean()
MW_lonsRI = MW_lons.reindex({"longitude":DAin_sub.longitude}, method="nearest")
MW_lonsRI = MW_lonsRI.chunk({"latitude":-1, "longitude":1000})
# +++++ Apply the second smooth +++++
MW_FC = MW_lonsRI.rolling({"latitude":SF}, center = True, min_periods=RollF).mean()
MW_FC_RI = MW_FC.reindex({"latitude":DAin_sub.latitude}, method="nearest")
# +++++ Fix the metadata +++++
MW_FC_RI.attrs = ds_ly.attrs
MW_FC_RI.attrs["history"] = "%s: Fraction of burnt forest after a %d degree spatial smoothing, then resampled to match %s grid resolution using %s" % ((str(pd.Timestamp.now())), mwb, dsn, __file__) +MW_FC_RI.attrs["history"]
MW_FC_RI.attrs["FileName"] = fnout
MW_FC_RI = MW_FC_RI.rename({"lossyear":"lossfrac"})
MW_FC_RI.lossfrac.attrs = ds_ly.lossyear.attrs
MW_FC_RI.latitude.attrs = ds_ly.latitude.attrs
MW_FC_RI.longitude.attrs = ds_ly.longitude.attrs
# ========== Create the new layers ==========
MW_FC_RI["lossfrac"] = MW_FC_RI["lossfrac"].where(MW_FC_RI["lossfrac"]> 0)
MW_FC_RI["FRI"] = (1/MW_FC_RI["lossfrac"]) * 18
maskpath = "./data/other/ForestExtent/%s/" % dsn
maskfn = maskpath + "Hansen_GFC-2018-v1.6_regrid_%s_%s_BorealMask.nc" % (dsn, region)
if os.path.isfile(maskfn):
mask = xr.open_dataset(maskfn)
else:
warn.warn("Mask file is missing")
ipdb.set_trace()
sys.exit()
MW_FC_RI = MW_FC_RI.where(mask.mask == 1)
encoding = OrderedDict()
for ky in ["lossfrac", "FRI"]:
encoding[ky] = ({'shuffle':True,
# 'chunksizes':[1, ensinfo.lats.shape[0], 100],
'zlib':True,
'complevel':5})
delayed_obj = MW_FC_RI.to_netcdf(fnout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"],
compute=False)
print("Starting write of %s gridded data at:" % dsn, | pd.Timestamp.now() | pandas.Timestamp.now |
import math
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import plot
import networkx as nx
from parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT
def task_gantt_plot(df_task, df_status, time_completed=None):
# if the workflow is not recorded as completed, then assume
# that tasks should continue in their last state until now,
# rather than the workflow end time.
if not time_completed:
time_completed = df_status['timestamp'].max()
df_task = df_task.sort_values(by=['task_id'], ascending=False)
parsl_tasks = []
for i, task in df_task.iterrows():
task_id = task['task_id']
description = "Task ID: {}, app: {}".format(task['task_id'], task['task_func_name'])
statuses = df_status.loc[df_status['task_id'] == task_id].sort_values(by=['timestamp'])
last_status = None
for j, status in statuses.iterrows():
if last_status is not None:
last_status_bar = {'Task': description,
'Start': last_status['timestamp'],
'Finish': status['timestamp'],
'Resource': last_status['task_status_name']
}
parsl_tasks.extend([last_status_bar])
last_status = status
# TODO: factor with above?
if last_status is not None:
last_status_bar = {'Task': description,
'Start': last_status['timestamp'],
'Finish': time_completed,
'Resource': last_status['task_status_name']
}
parsl_tasks.extend([last_status_bar])
# colours must assign a colour value for every state name defined
# in parsl/dataflow/states.py
colors = {'unsched': 'rgb(240, 240, 240)',
'pending': 'rgb(168, 168, 168)',
'launched': 'rgb(100, 255, 255)',
'running': 'rgb(0, 0, 255)',
'dep_fail': 'rgb(255, 128, 255)',
'failed': 'rgb(200, 0, 0)',
'exec_done': 'rgb(0, 200, 0)',
'memo_done': 'rgb(64, 200, 64)',
'fail_retryable': 'rgb(200, 128,128)'
}
fig = ff.create_gantt(parsl_tasks,
title="",
colors=colors,
group_tasks=True,
show_colorbar=True,
index_col='Resource',
)
fig['layout']['yaxis']['title'] = 'Task'
fig['layout']['yaxis']['showticklabels'] = False
fig['layout']['xaxis']['title'] = 'Time'
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
def task_per_app_plot(task, status):
try:
task['epoch_time_running'] = (pd.to_datetime(
task['task_try_time_running']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
task['epoch_time_returned'] = (pd.to_datetime(
task['task_time_returned']) - | pd.Timestamp("1970-01-01") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 16:08:12 2021
Compute validation/test metrics for each model
@author: jpeeples
"""
## Python standard libraries
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import os
from operator import itemgetter
from sklearn.metrics import jaccard_score as jsc
import pandas as pd
import pickle
import pdb
## PyTorch dependencies
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
## Local external libraries
from Demo_Parameters import Parameters
def Generate_Dir_Name(split,Network_parameters):
if Network_parameters['hist_model'] is not None:
dir_name = (Network_parameters['folder'] + '/' + Network_parameters['mode']
+ '/' + Network_parameters['Dataset'] + '/'
+ Network_parameters['hist_model'] + '/Run_'
+ str(split + 1) + '/')
#Baseline model
else:
dir_name = (Network_parameters['folder'] + '/'+ Network_parameters['mode']
+ '/' + Network_parameters['Dataset'] + '/' +
Network_parameters['Model_name']
+ '/Run_' + str(split + 1) + '/')
#Location to save figures
fig_dir_name = (Network_parameters['folder'] + '/'+ Network_parameters['mode']
+ '/' + Network_parameters['Dataset'] + '/')
return dir_name, fig_dir_name
def load_metrics(sub_dir, metrics, phase = 'val'):
#Load metrics
temp_file = open(sub_dir + '{}_metrics.pkl'.format(phase), 'rb')
temp_metrics = pickle.load(temp_file)
temp_file.close()
#Return max value for each metric (unless loss or inference time)
metric_vals = np.zeros(len(metrics))
count = 0
for metric in metrics.keys():
metric_vals[count] = temp_metrics[metric]
count += 1
return metric_vals
def add_to_excel(table,writer,model_names,metrics_names,fold=1,overall=False):
if overall:
table_avg = np.nanmean(table,axis=-1)
table_std = np.nanstd(table,axis=-1)
DF_avg = pd.DataFrame(table_avg,index=model_names,columns=metrics_names)
DF_std = pd.DataFrame(table_std,index=model_names,columns=metrics_names)
DF_avg.to_excel(writer,sheet_name='Overall Avg')
DF_std.to_excel(writer,sheet_name='Overall Std')
else:
DF = pd.DataFrame(table,index=model_names,columns=metrics_names)
DF.colummns = metrics_names
DF.index = model_names
DF.to_excel(writer,sheet_name='Fold_{}'.format(fold+1))
#Compute desired metrics and save to excel spreadsheet
def Get_Metrics(metrics,seg_models,args,folds=5):
#Set paramters for experiments
Params = Parameters(args)
model_names = []
metric_names = []
for key in seg_models:
model_names.append(seg_models[key])
for key in metrics:
metric_names.append(metrics[key])
#Intialize validation and test arrays
val_table = np.zeros((len(seg_models),len(metrics),folds))
test_table = np.zeros((len(seg_models),len(metrics),folds))
_, fig_dir = Generate_Dir_Name(0, Params)
fig_dir = fig_dir + 'Metrics/Overall/'
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
val_writer = | pd.ExcelWriter(fig_dir+'Val_Metrics.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
# predict_promoter_signal.py
# given a fasta file of upstream regions for a set of
# genes, run these seqs through bioprospector to search
# for promoter motifs. By default, we search for motifs
# of the structure: hexamer , 15-18bp spacer , hexamer
# and bioprospector is run 20 times then summarized to
# pick the "most popular" signal location for each
# upstream sequence in the fasta.
import argparse
import os
import pandas as pd
from subprocess import Popen, PIPE
import time
import bioprospector_utils as bu
def parse_bioprospector_config(filename):
'''
Parse the arguments out of the bioprospector config file
'''
with open(filename,'r') as f:
biop_args = dict([x.strip().split('=') for x in f.readlines()])
return biop_args
def build_bioprospector_cmds(args):
'''
Build a list of commands to pass to subprocess to run bioprospector
'''
print("Building BioProspector commands...")
cmds_list = [] # store all n cmds
biop_args = parse_bioprospector_config(args.biop_config)
# make output directory for this bioprospector run
base = os.path.basename(args.seq_file).split('.')[0]
biop_str = f"W{biop_args['W']}_w{biop_args['w']}_G{biop_args['G']}_g{biop_args['g']}_d{biop_args['d']}_a{biop_args['a']}_n{biop_args['n']}"
ts = time.strftime("%s",time.gmtime())
biop_base_name = f"{base}_{biop_str}_{ts}"
raw_dir = biop_base_name +"_BIOP_RAW"
raw_dir_path = os.path.join(args.outdir,raw_dir)
mkdir_cmd = ['mkdir',raw_dir_path]
# execute mkdir command and catch error
p = Popen(mkdir_cmd, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise ValueError(f'mkdir command failed:{error.decode("utf-8")}')
# result = subprocess.run(cmds, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# make a command string for each i in num_runs
for i in range(args.num_runs):
# args from config file
cmds = [biop_args['biop_exe']] # executable
cmds += ['-W',biop_args['W']] # width of first block
cmds += ['-w',biop_args['w']] # width of second block
cmds += ['-G',biop_args['G']] # max spacer distance
cmds += ['-g',biop_args['g']] # min spacer distance
cmds += ['-d',biop_args['d']] # strand directions to check
cmds += ['-a',biop_args['a']] # do all inputs have a site
cmds += ['-n',biop_args['n']] # number of BioProspector internal iterations
# input
cmds += ['-i',args.seq_file] # input sequences
# output file
outf = os.path.join(raw_dir_path,f'biop_run{i}.txt')
cmds += ['-o',outf]
cmds_list.append(cmds)
return cmds_list, biop_base_name, raw_dir_path
def run_bioprospector(cmds_list):
'''
Given a list of command line args for bioprospector,
execute the commands
'''
print("Executing BioProspector...")
# get list of processes to execute each BioP command
proc_list = [Popen(cmd, stdout=PIPE, stderr=PIPE) for cmd in cmds_list]
# I think this for loop should exectue the processes in parallel
for i,proc in enumerate(proc_list):
print(f"Run {i+1} of {len(proc_list)}")
# execute bioprospector command and catch error
#p = Popen(cmds, stdout=PIPE, stderr=PIPE)
output, error = proc.communicate()
if proc.returncode != 0:
raise ValueError(f'BioProspector execution failed:{error.decode("utf-8")}')
def tag_max(df):
'''
Given a df of a BioProspector summary file, for each gene, determine the
max block count. Return the df with the max blocks tagged
'''
sub_dfs = []
for seq_name,sub_df in df.groupby('seq_name'):
max_count = sub_df['block_count'].max()
sub_df['is_max'] = sub_df.apply(
lambda row: True if row['block_count'] == max_count else False,axis=1
)
sub_dfs.append(sub_df)
return | pd.concat(sub_dfs) | pandas.concat |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = | pd.Series([0.875, 0.625, 0.375, 0.125]) | pandas.Series |
"""This module contains auxiliary functions for RD predictions used in the main notebook."""
import json
import matplotlib as plt
import pandas as pd
import numpy as np
import statsmodels as sm
from auxiliary.auxiliary_predictions import *
from auxiliary.auxiliary_plots import *
from auxiliary.auxiliary_tables import *
def prepare_data(data):
"""
Adds variables needed for analysis to data.
"""
# Add constant to data to use in regressions later.
data.loc[:, "const"] = 1
# Add dummy for being above the cutoff in next GPA
data["nextGPA_above_cutoff"] = np.NaN
data.loc[data.nextGPA >= 0, "nextGPA_above_cutoff"] = 1
data.loc[data.nextGPA < 0, "nextGPA_above_cutoff"] = 0
# Add dummy for cumulative GPA being above the cutoff
data["nextCGPA_above_cutoff"] = np.NaN
data.loc[data.nextCGPA >= 0, "nextCGPA_above_cutoff"] = 1
data.loc[data.nextCGPA < 0, "nextCGPA_above_cutoff"] = 0
# Remove zeros from total credits for people whose next GPA is missing
data["total_credits_year2"] = data["totcredits_year2"]
data.loc[np.isnan(data.nextGPA) == True, "total_credits_year2"] = np.NaN
# Add variable for campus specific cutoff
data["cutoff"] = 1.5
data.loc[data.loc_campus3 == 1, "cutoff"] = 1.6
return data
def calculate_bin_frequency(data, bins):
"""
Calculates the frequency of different bins in a dataframe.
Args:
------
data(pd.DataFrame): Dataframe that contains the raw data.
bins(column): Name of column that contains the variable that should be assessed.
Returns:
---------
bin_frequency(pd.DataFrame): Dataframe that contains the frequency of each bin in data and and a constant.
"""
bin_frequency = pd.DataFrame(data[bins].value_counts())
bin_frequency.reset_index(level=0, inplace=True)
bin_frequency.rename(columns={"index": "bins", bins: "freq"}, inplace=True)
bin_frequency = bin_frequency.sort_values(by=["bins"])
bin_frequency["const"] = 1
return bin_frequency
def create_groups_dict(data, keys, columns):
"""
Function creates a dictionary containing different subsets of a dataset. Subsets are created using dummies.
Args:
------
data(pd.DataFrame): Dataset that should be split into subsets.
keys(list): List of keys that should be used in the dataframe.
columns(list): List of dummy variables in dataset that are used for creating subsets.
Returns:
---------
groups_dict(dictionary)
"""
groups_dict = {}
for i in range(len(keys)):
groups_dict[keys[i]] = data[data[columns[i]] == 1]
return groups_dict
def create_predictions(data, outcome, regressors, bandwidth):
steps = np.arange(-1.2, 1.25, 0.05)
predictions_df = | pd.DataFrame([]) | pandas.DataFrame |
from urllib.request import urlopen
import json
import pandas as pd
import plotly.express as px
import numpy as np
import re
import plotly.graph_objs as go
from datetime import datetime
us_state_abbrev = {
'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO',
'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA',
'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',
'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ',
'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK',
'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA',
'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY'}
usa_regions = {
'WA' : 'North West','OR' : 'North West','ID' : 'North West','MT' : 'North West','WY' : 'North West',
'CA' : 'West','NV' : 'West','AK' : 'West','HI' : 'West',
'UT' : 'South West','NM' : 'South West','CO' : 'South West','AZ' : 'South West','TX' : 'South West','OK' : 'South West',
'ND' : 'Mid-West','SD' : 'Mid-West','NE' : 'Mid-West','KS' : 'Mid-West','WI' : 'Mid-West','IA' : 'Mid-West','MO' : 'Mid-West',
'MI' : 'Mid-West','IL' : 'Mid-West','IN' : 'Mid-West','KY' : 'Mid-West','OH' : 'Mid-West','MN' : 'Mid-West',
'AR' : 'South East','LA' : 'South East','AL' : 'South East','MS' : 'South East','TN':'South East','GA':'South East','FL':'South East',
'SC' : 'South East','NC' : 'South East',
'VA' : 'Mid-Atlantic','WV' : 'Mid-Atlantic','PA' : 'Mid-Atlantic','MD' : 'Mid-Atlantic','DE' : 'Mid-Atlantic','NJ' : 'Mid-Atlantic',
'NY' : 'Mid-Atlantic','DC' : 'Mid-Atlantic',
'CT' : 'North East','RI' : 'North East','VT' : 'North East','NH' : 'North East','MA' : 'North East','ME' : 'North East'
}
fips = pd.read_excel(r'data/fips_codes.xlsx',dtype={'fips':str})
covid = | pd.read_csv(r'data/covid_confirmed.csv',dtype={'countyFIPS':str}) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 12:49:36 2018
@author: <NAME>
"""
from myo_bt import Myo
import collections
import pandas as pd
import time
#from multiprocessing import Process
import threading
class EMGListener(Myo):
def __init__(self, name, queue_size=10000):
Myo.__init__(self)
self.emg_data_queue = collections.deque(maxlen=queue_size)
self.recording = False
self.df = None
self.label = None
self.name = name
self.start_time=None
self.columns = ['movement', 'timestamp'] + ['emg'+str(i) for i in range(1,9)]
#def on_connect(self, device, timestamp, firmware_version):
# device.set_stream_emg(myo.StreamEmg.enabled)
def on_emg_data(self, emg_data):
if self.recording:
self.emg_data_queue.append((self.start_time, emg_data[:8]))
self.emg_data_queue.append((self.start_time, emg_data[8:]))
def get_emg_data(self):
return list(self.emg_data_queue)
def start_recording(self, label):
self.label = label
self.recording = True
self.start_time = time.time()
def stop_recording(self):
self.recording = False
print('Number of datapoints recorded: ', len(self.emg_data_queue))
print('Frequency: ', len(self.emg_data_queue)/(time.time()-self.start_time))
self.start_time = None
def store_data(self):
data = [(self.label, self.emg_data_queue[0][0])+e[1] for e \
in self.emg_data_queue]
temp_df = pd.DataFrame(data)
temp_df.columns = self.columns
if self.df is None: self.df = temp_df.copy()
else: self.df = | pd.concat((self.df, temp_df)) | pandas.concat |
import re
from os import path
import pandas as pd
from neo4j import GraphDatabase
from config import proteoforms, LEVELS, proteins, genes
from queries import QUERIES_PARTICIPANTS, QUERIES_COMPONENTS, get_query_participants_by_pathway, \
QUERY_GET_COMPLEXES_BY_PATHWAY_OR_REACTION
def get_query_result(query):
"""
Get pandas dataframe with the result of the query to Reactome Graph Database
:param query: Cypher string with the query to get the data
:return: pandas dataframe with the result records
"""
db = GraphDatabaseAccess("bolt://localhost", "neo4j", "")
df = db.get_result(query)
db.close()
return df;
class GraphDatabaseAccess:
def __init__(self, uri, user, password):
self.driver = GraphDatabase.driver(uri, auth=(user, password), encrypted=False)
def close(self):
self.driver.close()
def get_result(self, query):
with self.driver.session() as session:
records = session.read_transaction(self.get_records, query)
if records:
return pd.DataFrame([r.values() for r in records], columns=records[0].keys())
return pd.DataFrame()
@staticmethod
def get_records(tx, query):
result = []
for record in tx.run(query):
result.append(record)
return result
def make_proteoform_string(value):
"""
Create proteoform string in the simple format: isoform;ptm1,ptm2...
Adds a ';' at the end of the proteoform when there are no ptms, to make sure the string represents a proteoform.
Examples: ["P36507", "00046:null", "00047:null"] or ["P28482"]
:param value: array of strings
:return:
"""
if type(value) == str:
return value + ";"
if len(value) == 1:
return value[0] + ";"
if len(value) == 2:
return ";".join(value)
else:
isoform = value[0] + ";"
ptms = ",".join(value[1:])
return isoform + ptms
print(f"Got a weird value: {value}")
return value[0]
def fix_neo4j_values(df, level):
"""
Corrects format of some fields of the resulting records of the query to match the text structure
For the 'Name' and 'Id' column: remove the subcellular location and spaces
For the proteoforms: convert the list of isoform + ptms into a single string
:param df: Dataframe with at least columns 'Id', 'Type, 'Name'
:param level: "genes", "proteins" or "proteoforms"
:return: Pandas dataframe with the values fixed
"""
if len(df) == 0:
return df
df['Id'] = df.apply(lambda x: re.sub(r'\s*\[[\w\s]*\]\s*', '', x.Id) if x.Type == 'SimpleEntity' else x.Id, axis=1)
df['Id'] = df.apply(lambda x: str(x.Id).replace(" ", "_").strip() if x.Type == 'SimpleEntity' else x.Id, axis=1)
if "UniqueId" in df.columns:
df['UniqueId'] = df.apply(
lambda x: re.sub(r'\s*\[[\w\s]*\]\s*', '', x.UniqueId) if x.Type == 'SimpleEntity' else x.Id, axis=1)
df['UniqueId'] = df.apply(lambda x: str(x.UniqueId).replace(" ", "_") if x.Type == 'SimpleEntity' else x.Id,
axis=1)
if level == proteoforms:
df['Id'] = df.apply(
lambda x: make_proteoform_string(x.Id) if x.Type == 'EntityWithAccessionedSequence' else x.Id, axis=1)
df['Name'] = df['Name'].apply(lambda x: re.sub("\s*\[[\s\w]*\]\s*", '', x))
return df
def get_participants(level, location=""):
"""
Gets reaction participants from Reactome in a table with the columns: [Pathway, Reaction, Entity, Name, Type, Id, Database, Role]
:param location: directory where to search for the csv files
:param level: genes, proteins or proteoforms
:return: pandas dataframe
"""
filename = location + "records_reaction_participants_" + level + ".csv"
if not path.exists(filename):
participants = get_query_result(QUERIES_PARTICIPANTS[level])
participants = fix_neo4j_values(participants, level)
participants.to_csv(filename)
return participants
else:
return pd.read_csv(filename)
def get_empty_participants_dataframe(level):
if level == genes:
return pd.DataFrame(
columns=["Pathway", "Reaction", "Entity", "Name", "Type", "Id", "Database", "Role"])
elif level == proteins:
return pd.DataFrame(
columns=["Pathway", "Reaction", "Entity", "Name", "Type", "Id", "PrevId", "Database", "Role"])
elif level == proteoforms:
return pd.DataFrame(
columns=["Pathway", "Reaction", "Entity", "Name", "Type", "Id", "PrevId", "Database", "Role"])
else:
return pd.DataFrame(
columns=["Pathway", "Reaction", "Entity", "Name", "Type", "Id", "UniqueId", "Database", "Role"])
def get_participants_by_pathway(pathway, level, out_path=""):
# print(f"Getting participants for pathway {pathway} for level {level}")
filename = out_path + "participants/pathway_" + pathway + "_" + level + ".csv"
participants = pd.DataFrame()
if not path.exists(filename):
query = get_query_participants_by_pathway(level, pathway)
participants = get_query_result(query)
participants = fix_neo4j_values(participants, level)
if len(participants) == 0:
participants = get_empty_participants_dataframe(level)
participants.to_csv(filename)
else:
participants = pd.read_csv(filename)
if len(participants) == 0:
participants = get_empty_participants_dataframe(level)
return participants
def get_empty_components_dataframe(level):
if level == genes:
return pd.DataFrame(columns=['Complex', 'Entity', 'Name', 'Type', 'Id'])
elif level == proteins:
return pd.DataFrame(columns=['Complex', 'Entity', 'Name', 'Type', 'Id', 'PrevId'])
elif level == proteoforms:
return pd.DataFrame(columns=['Complex', 'Entity', 'Name', 'Type', 'Id', 'PrevId'])
else:
return pd.DataFrame(columns=['Complex', 'Entity', 'Name', 'Type', 'Id', 'UniqueId'])
def get_components(level, location=""):
filename = location + "records_complex_components_" + level + ".csv"
if not path.exists(filename):
components = get_query_result(QUERIES_COMPONENTS[level])
components = fix_neo4j_values(components, level)
components.to_csv(filename)
if len(components) == 0:
return get_empty_components_dataframe(level)
return components
else:
components = pd.read_csv(filename)
if len(components) == 0:
components = get_empty_components_dataframe(level)
return components
def get_complexes_by_pathway(pathway):
query = QUERY_GET_COMPLEXES_BY_PATHWAY_OR_REACTION.replace("Pathway{speciesName:'Homo sapiens'}",
f"Pathway{{speciesName:'Homo sapiens', stId:'{pathway}'}}")
complexes = get_query_result(query)
return complexes
def get_components_by_pathway(pathway, level, out_path=""):
# print(f"Getting components for pathway {pathway} for level {level}")
if len(pathway) > 0:
# Get list of participating complexes
df_complexes = get_complexes_by_pathway(pathway)
if len(df_complexes) > 0:
# Get the components of each complex
dfs_components = [get_complex_components_by_complex(complex, level, out_path) for complex in
df_complexes["Complex"]]
components = pd.concat(dfs_components)
return components
else:
return get_empty_components_dataframe(level)
else:
query = QUERIES_COMPONENTS[level]
components = get_query_result(query)
components = fix_neo4j_values(components, level)
if len(components) == 0:
components = get_empty_components_dataframe(level)
return components
if __name__ == "__main__":
pathway = "R-HSA-70171"
query = f"MATCH (p:Pathway{{stId:\"{pathway}\"}})-[:hasEvent]->(rle:Reaction{{speciesName:'Homo sapiens'}}) RETURN rle.stId"
df = get_query_result(query)
print(df)
def get_pathway_name(pathway):
query = f"MATCH (p:Pathway{{stId:\"{pathway}\", speciesName:\"Homo sapiens\"}})-[:hasEvent]->(rle:ReactionLikeEvent{{speciesName:\"Homo sapiens\"}})" \
f" RETURN DISTINCT p.displayName as Name"
return get_query_result(query)
def get_pathways():
query = """
MATCH (p:Pathway{speciesName:"Homo sapiens"})-[:hasEvent]->(rle:ReactionLikeEvent{speciesName:"Homo sapiens"})
RETURN DISTINCT p.stId as stId, p.displayName as displayName
"""
return get_query_result(query)
def get_low_level_pathways():
query = """
// Gets all low level pathways for human
MATCH (p:Pathway{speciesName:"Homo sapiens"})
WHERE NOT (p)-[:hasEvent]->(:Pathway)
RETURN p.stId as stId, p.displayName as displayName LIMIT 5
"""
return get_query_result(query)
def get_reactions_by_pathway(pathway):
query = f"MATCH (p:Pathway{{stId:\"{pathway}\"}})-[:hasEvent]->(rle:Reaction{{speciesName:'Homo sapiens'}}) RETURN rle.stId as reaction"
return get_query_result(query)
def get_reactions():
query = "MATCH (rle:ReactionLikeEvent{speciesName:\"Homo sapiens\"}) RETURN rle.stId as stId"
return get_query_result(query)
def get_complexes():
query = "MATCH (c:Complex{speciesName:\"Homo sapiens\"}) RETURN c.stId as stId"
return get_query_result(query)
def get_complex_components_by_complex(complex, level, out_path=""):
# print(f"\tGetting components of complex: {complex}")
filename = out_path + "complexes/complex_" + complex + "_" + level + ".csv"
if not path.exists(filename):
query = QUERIES_COMPONENTS[level].replace(
"Complex{speciesName:'Homo sapiens'}",
f"Complex{{speciesName:'Homo sapiens', stId:'{complex}'}}")
components = get_query_result(query)
components = fix_neo4j_values(components, level)
if len(components) == 0:
components = get_empty_components_dataframe(level)
components.to_csv(filename)
return components
else:
components = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
@pytest.mark.usefixtures("missing_data_numeric")
class TestMissingValueFiller(object):
def test_missing_factors(self, missing_data_factors):
# Test filling in missing factors with a string.
prep = MissingValueFiller('Missing')
result = prep.fit_transform(missing_data_factors)
exp_dict = {'c': ['a', 'Missing', 'a', 'b', 'b', 'Missing', 'c', 'a',
'a', 'c'],
'd': ['a', 'a', 'Missing', 'Missing', 'e', 'f', 'Missing',
'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_numeric(self, missing_data_numeric):
# Test filling in missing numeric data with a number.
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data_numeric):
# Test unordered index is handled properly
new_index = list(missing_data_numeric.index)
shuffle(new_index)
missing_data_numeric.index = new_index
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestOverMissingThresholdDropper(object):
def test_drop_20(self, missing_data):
# Test dropping columns with missing over a threshold.
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(1.5)
svatd
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(-1)
svatd
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestValueReplacer(object):
def test_mapper(self, full_data_factors):
# Test replacing values with mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_inverse_mapper(self, full_data_factors):
# Test replacing values with inverse_mapper.
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
prep = ValueReplacer(inverse_mapper=inv_mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, full_data_factors):
# Test throwing error when replacing values with a non-existant column.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
with pytest.raises(ValueError):
prep.fit(full_data_factors)
def test_2_mappers_value_error(self):
# Test throwing error when specifying mapper and inverse_mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
with pytest.raises(ValueError):
prep = ValueReplacer(mapper=mapper, inverse_mapper=inv_mapper)
prep
def test_no_mappers_value_error(self):
# Test throwing error when not specifying mapper or inverse_mapper.
with pytest.raises(ValueError):
prep = ValueReplacer()
prep
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
class TestFactorLimiter(object):
def test_limiter(self, missing_data_factors):
# Test limiting factor levels to specified levels with default.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, missing_data_factors):
# Test throwing error when limiting values with a non-existant column.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'e': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
fl = FactorLimiter(factors)
with pytest.raises(ValueError):
fl.fit(missing_data_factors)
def test_unordered_index(self, missing_data_factors):
# Test unordered index is handled properly
new_index = list(missing_data_factors.index)
shuffle(new_index)
missing_data_factors.index = new_index
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestSingleValueAboveThresholdDropper(object):
def test_drop_70_with_na(self, missing_data):
# test dropping columns with over 70% single value, including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_70_without_na(self, missing_data):
# test dropping columns with over 70% single value, not including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=True)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(1.5)
prep
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(-1)
prep
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("single_values_data")
class TestSingleValueDropper(object):
def test_without_na(self, single_values_data):
# Test dropping columns with single values, excluding NaNs as a value.
prep = SingleValueDropper(dropna=True)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'e': [1, 2, None, None, None, None, None, None, None,
None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_with_na(self, single_values_data):
# Test dropping columns with single values, including NaNs as a value.
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, single_values_data):
# Test unordered index is handled properly
new_index = list(single_values_data.index)
shuffle(new_index)
single_values_data.index = new_index
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnExtractor(object):
def test_extraction(self, missing_data):
# Test extraction of columns from a DataFrame.
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_column_missing_error(self, missing_data):
# Test throwing error when an extraction is requested of a missing.
# column
prep = ColumnExtractor(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnDropper(object):
def test_drop_multiple(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_single(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper('d')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_error(self, missing_data):
# Test throwing error when dropping is requested of a missing column
prep = ColumnDropper(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
@pytest.mark.usefixtures("full_data_factors_subset")
@pytest.mark.usefixtures("missing_data_factors")
class TestDummyCreator(object):
def test_default_dummies(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fit_transform(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
result = prep.fit_transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies(self, full_data_factors):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies_missing_levels(self, full_data_factors,
full_data_factors_subset):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors_subset)
exp_dict = {'c_b': [1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 0, 0, 0, 0, 0],
'd_d': [1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_false_dummies(self, missing_data_factors):
# Test not creating dummies for NaNs.
prep = DummyCreator()
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_true_dummies(self, missing_data_factors):
# Test creating dummies for NaNs.
kwargs = {'dummy_na': True}
prep = DummyCreator(**kwargs)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'c_nan': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'd_nan': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fillin_missing_dummies(self, full_data_factors):
# Test filling missing dummies with a transform data missing levels
# present in the fitting data set.
prep = DummyCreator()
prep.fit(full_data_factors)
new_dict = {'c': ['b', 'c'],
'd': ['a', 'b']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c_a': [0, 0],
'c_b': [1, 0],
'c_c': [0, 1],
'd_a': [1, 0],
'd_b': [0, 1],
'd_c': [0, 0],
'd_d': [0, 0],
'd_e': [0, 0],
'd_f': [0, 0],
'd_g': [0, 0],
'd_h': [0, 0],
'd_j': [0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestColumnValidator(object):
def test_order(self, full_data_factors):
# Test extraction of columns from a DataFrame
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
| tm.assert_frame_equal(result, expected, check_dtype=False) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.6.2
* Pandas
* NumPy
Copyright (c) 2020 <NAME> <<EMAIL>>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"3",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import argparse
import math
import os, sys
import pandas as pd
import numpy as np
def convert_to_df(pvacseq_1_tsv, pvacseq_2_tsv):
if not pvacseq_2_tsv:
pvacseq_1_reader = pd.read_csv(pvacseq_1_tsv, sep="\t")
merged_df = | pd.DataFrame(pvacseq_1_reader) | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
import tensorflow as tf
from gpflow import default_float
from mogpe.training import train_from_config_and_dataset
def load_mcycle_dataset(filename='./mcycle.csv'):
df = pd.read_csv(filename, sep=',')
X = | pd.to_numeric(df['times']) | pandas.to_numeric |
# -*- coding:utf-8 -*-
# @Time : 2019-12-31 10:40
# @Author : liuqiuxi
# @Email : <EMAIL>
# @File : fundfeedswinddatabase.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is class of fund market
import datetime
import copy
import pandas as pd
from datafeeds.winddatabasefeeds import BaseWindDataBase
from datafeeds.utils import BarFeedConfig
from datafeeds import logger
class AFundQuotationWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AFundQuotationWindDataBase"
def __init__(self):
super(AFundQuotationWindDataBase, self).__init__()
self.__need_adjust_columns = ["preClose", "open", "high", "low", "close", "volume"]
self.__table_name_dict = {"securityIds_OTC": "ChinaMutualFundNAV",
"securityIds_EXC": "ChinaClosedFundEODPrice",
"securityIds_OFO": "ChinaMutualFundNAV"}
def get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted=None):
limit_numbers = BarFeedConfig.get_wind().get("LimitNumbers")
if len(securityIds) < limit_numbers:
data = self.__get_quotation(securityIds=securityIds, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime, adjusted=adjusted)
else:
data = | pd.DataFrame() | pandas.DataFrame |
"""
Author: <NAME> (44287207)
Date: Thu Aug 5 2020
Hyperparameter optimisation.
"""
import pandas as pd
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import numpy as np
from sklearn import svm
from sklearn.model_selection import train_test_split, GridSearchCV
# Save generated images?
SAVEIMG = False
# LaTeX rendering, pgf fonts False
# Sets matplotlib settings for LaTeX export
# WARNING: Requires MikTeX installed
LATEX_RENDER = False
if LATEX_RENDER == True: plt.rcParams.update({
'font.family': 'serif',
'font.size' : 10, # 16 standard
"text.usetex": True,
"pgf.rcfonts": False
})
def plot3d(X,Y,Z,wire=False):
"""3D visualisation of supplied mesh grid data and elevation."""
# Meshgrid and elevation data
X, Y = np.meshgrid(X,Y)
for i in range(len(Z)):
for j in range(len(Z[0,:])):
if Z[i,j] < 0.82:
X[i,j] = np.nan
Y[i,j] = np.nan
Z[i,j] = np.nan
# Plotting
fig = plt.figure(dpi=160)
ax = fig.gca(projection='3d')
if wire == True:
ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
else:
surf = ax.plot_surface(X, Y, Z, cmap=cm.viridis,
linewidth=0.2, antialiased=False)
ax.set_zlim3d(0.82,None)
ax.set_xlabel("C value")
ax.set_ylabel("gamma value")
ax.set_zlabel(r"Average 5-fold cross-validated $R^2$ value")
if wire == False: fig.colorbar(surf, shrink=0.4, aspect=5, pad=0.175)
plt.show()
return X, Y, Z
def get_results_matrix(regr, x_list, y_list):
# 3D Plotting
unit_length = len(y_list)
z = regr.cv_results_['mean_test_score'][0:unit_length]
for i in range(1,len(x_list)):
z = np.vstack((z, regr.cv_results_['mean_test_score'][i*unit_length:(i+1)*unit_length]))
z = z.T
return z
# Plotting 2D
def plot2d_gamma(regr):
"""Only works if there are two parameters being varied."""
a = regr.cv_results_['mean_test_score']
b = regr.cv_results_['params']
c = np.vstack((b, a)).T
sns.reset_orig() # get default matplotlib styles back
clrs = sns.color_palette('husl', n_colors=len(c_list)) # a list of RGB tuples
fig, ax = plt.subplots()
length = len(gamma_list)
for num, C_value in enumerate(c_list):
lines=ax.plot(gamma_list, c[:,1][length*(num):length*(num+1)], '.-', label='C={:.4f}'.format(C_value))
lines[0].set_color(clrs[num])
ax.legend(title='C values')
ax.set_ylim([0.75,None])
ax.set_xlabel("Gamma")
ax.set_ylabel("Average 5-fold cross-validated R^2 value")
plt.show()
def plot2d(regr):
"""Only works if there are two parameters being varied."""
a = regr.cv_results_['mean_test_score']
b = regr.cv_results_['params']
c = np.vstack((b, a)).T
sns.reset_orig() # get default matplotlib styles back
clrs = sns.color_palette('husl', n_colors=len(c_list)) # a list of RGB tuples
fig, ax = plt.subplots()
length = len(c_list)
for num, g_value in enumerate(gamma_list):
lines=ax.plot(c_list, c[:,1][length*(num):length*(num+1)], '.-', label='γ={:.4f}'.format(g_value))
lines[0].set_color(clrs[num])
ax.legend(title='γ values')
ax.set_xlabel("Regularisation parameter, C")
ax.set_ylabel(r"Average 5-fold cross-validated $R^2$ value")
fig.tight_layout()
plt.show()
def search(param_grid, X_train, Y_train):
# Initialise grid search SVM and train (5-fold cross validation)
regr = svm.SVR()
regr_gs = GridSearchCV(regr, param_grid)
regr_gs.fit(X_train, Y_train)
# Results table
res = | pd.DataFrame(regr_gs.cv_results_) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
def load_results(load_folders):
rmse_loss_files = ['rmse_net_train_rmse', 'rmse_net_hold_rmse', 'rmse_net_test_rmse',
'task_net_train_rmse', 'task_net_hold_rmse', 'task_net_test_rmse']
task_loss_files = ['rmse_net_train_task', 'rmse_net_hold_task', 'rmse_net_test_task',
'task_net_train_task', 'task_net_hold_task', 'task_net_test_task']
col_names = ['RMSE Net (train)', 'RMSE Net (hold)', 'RMSE Net (test)',
'Task Net (train)', 'Task Net (hold)', 'Task Net (test)']
df_rmse = pd.DataFrame()
df_task = pd.DataFrame()
for folder in load_folders:
rmse_results, task_results = [], []
for filename in rmse_loss_files:
with open(os.path.join(folder, filename), 'rb') as f:
rmse_results.append(np.load(f))
df = pd.DataFrame( | pd.DataFrame(rmse_results) | pandas.DataFrame |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str')
virtuales=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/virtuales.csv',encoding='utf-8',sep=';')
df=datas.copy()
out=salida.copy()
seg=seguimiento.copy()
vir=virtuales.copy()
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],inplace=True)
out=out[out['Repetido CC']=='0']
out=out[~out.duplicated(keep='last')]
## Cleaning
df['Marca Score']=df['Marca Score'].str.strip().fillna('NO REGISTRA')
df['Marca Score'][df['Marca Score']==''] ='NO REGISTRA'
df['Analisis De Habito']=df['Analisis De Habito'].fillna('NO DEFINE')
df['Analisis De Habito'][df['Analisis De Habito']==' '] ='NO DEFINE'
df['Tipo de Cliente'][df['Tipo de Cliente']==' '] ='NO DEFINE'
df['Marca Funcional']=df['Marca Funcional'].str.replace(' ','0')
df['Marca']=df['Marca'].str.replace(' ','0')
df['Antiguedad Cliente'][df['Antiguedad Cliente']==' '] ='NO REGISTRA'
df['Perfil Digital']=df['Perfil Digital'].fillna('Sin perfil')
df['Nivel de riesgo experian']=df['Nivel de riesgo experian'].str.replace(' ','NO REGISTRA')
df['Nivel de Riesgo']=df['Nivel de Riesgo'].str.replace(' ','NO REGISTRA')
df['Nivel Estrategia Cobro']=df['Nivel Estrategia Cobro'].str.replace(' ','NO REGISTRA')
df['Real reportado en central de riesgos']=df['Real reportado en central de riesgos'].str.replace(' ','0')
df['Nivel de Riesgo'][df['Nivel de Riesgo']==' '] ='NO REGISTRA'
df['Estado del Cliente'][df['Estado del Cliente']==' '] ='SIN IDENTIFICAR'
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] ='SIN IDENTIFICAR'
df['Estrategia'][df['Estrategia']==' '] ='SIN ESTRATEGIA'
df['Autopago'][df['Autopago']==' '] ='NO APLICA'
df['Tipo de Cliente']=df['Tipo de Cliente'].fillna('NO DEFINE')
df['Tipo de Reporte a Central de Riesgos'][df['Tipo de Reporte a Central de Riesgos']==' '] ='NO REGISTRA'
df['Codigo edad de mora(para central de riesgos)']=df['Codigo edad de mora(para central de riesgos)'].str.replace(' ','NO REGISTRA')
df['Análisis Vector'][df['Análisis Vector']==' '] ='SIN IDENTIFICAR'
df['Análisis Vector_PAGOS_PARCIAL'] = np.where(df['Análisis Vector'].str.contains("PAGO PARCIAL|PAGOS PARCIAL"),"1",'0')
df['Análisis Vector_PAGO OPORTUNO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_SIN_IDENTIFICAR'] = np.where(df['Análisis Vector'].str.contains("SIN IDENTIFICAR"),"1",'0')
df['Análisis Vector_SIN_PAGO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_suspension'] = np.where(df['Análisis Vector'].str.contains("SUSPENSIO"),"1",'0')
df['Análisis Vector_indeterminado'] = np.where(df['Análisis Vector'].str.contains("PAGO OPORTUNO Y NO OPORTUNO"),"1",'0')
df['Análisis Vector_pago_no_oport'] = np.where(df['Análisis Vector'].str.contains("PAGO NO OPORTUNO"),"1",'0')
df['Análisis Vector_otro_caso'] = np.where(df['Análisis Vector'].str.contains("NUEVO|FACTURAS AJUSTADAS|PROBLEMAS RECLAMACION"),"1",'0')
df['Vector Cualitativo # Suscripción'][df['Vector Cualitativo # Suscripción']==' '] = df["Vector Cualitativo # Suscripción"].mode()[0]
df['Fecha Ult Gestion']=pd.to_datetime(df['Fecha Ult Gestion'],format='%Y-%m-%d')
###PARSE DATES AND CREATE NEW FEATURES
df['Fecha de Asignacion']=pd.to_datetime(df['Fecha de Asignacion'],format='%Y-%m-%d %H:%M:%S')
df['Fecha Ult pago']=pd.to_datetime(df['Fecha Ult pago'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(df['Fecha de cuenta de cobro mas antigua'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["Dias_ult_pago"] = (df['Fecha Ult pago']).dt.day
df["dia_semana_ult_pago"] = (df['Fecha Ult pago']).dt.weekday
df["mes_ult_pago"]=df["Fecha Ult pago"].dt.month
df["semana_ult_pago"]=df["Fecha Ult pago"].dt.week
df["trimestre_ult_pago"] = df["Fecha Ult pago"].dt.quarter
df["año_ult_pago"] = df["Fecha Ult pago"].dt.year
df["DIAS_desde_ult_pago"] = (df["Fecha Ult Gestion"] - df["Fecha Ult pago"]).dt.days
df["Fecha estado corte"]=pd.to_datetime(df["Fecha estado corte"],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["dias_ult_pago_cobro"] = (df["Fecha Ult pago"]-df["Fecha estado corte"]).dt.days
df["dias_ult_pago_fac_ant"] = (df["Fecha Ult pago"]-df["Fecha de cuenta de cobro mas antigua"]).dt.days
df['Fecha de Asignacion_mes']=df["Fecha de Asignacion"].dt.month
df['Fecha de Instalacion']=pd.to_datetime(df['Fecha de Instalacion'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['antiguedad_mes']=(dt.datetime.now()-df['Fecha de Instalacion']).dt.days/365
df['Fecha Retiro']=pd.to_datetime(df['Fecha Retiro'].str.replace('4732','2020'),format='%Y-%m-%d',errors = "coerce")
df['Fecha Vencimiento Sin Recargo']=pd.to_datetime(df['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
df['dias_desde_ult_gestion']=(dt.datetime.now()-df['Fecha Ult Gestion']).dt.days
## Group labels
df['Descripcion subcategoria']=df['Descripcion subcategoria']\
.str.replace('Consumos EPM Telco|INALAMBRICOS NO JAC|unica|COMERCIAL|ENTERPRISE|MONOPRODUCTO|PYME|------------------------------|LINEA BUZON','NO REGISTRA')\
.str.replace('ESTRATO MEDIO ALTO|MEDIO ALTO','ESTRATO 4')\
.str.replace('ESTRATO ALTO|ALTO','ESTRATO 6')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('ESTRATO MEDIO|MEDIO','ESTRATO 3')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('BAJO BAJO|ESTRATO BAJO-BAJO|ESTRATO BAJO|BAJO','ESTRATO 1')
df['Descripcion subcategoria'][df['Descripcion subcategoria']=='-'] ='NO REGISTRA' ## No registra
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] = df["Tipificación Cliente"].mode()[0] ## Reemplazo con la moda
df['Dias Suspension'][df['Dias Suspension']==' ']=0
df['Dias Suspension']=df['Dias Suspension'].astype('int')
## Group labels
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip().str.upper()\
.str.replace('TELEVISION UNE|TELEVISION INTERACTIVA|TV CABLE|TV INTERACTIVA|UNE TV|TELEVISION SIN SEÃƑ‘AL|TELEVISION SIN SEÃƑ‘AL|TV CABLE SIN SEÑAL','TELEVISION')\
.str.replace('INTERNET BANDA ANCHA|SEGUNDA CONEXION INTERNET|BANDA ANCHA|INTERNET EDATEL|INTERNET INSTANTANEO|CABLE MODEM|INTERNET DEDICADO 11|ADSL BASICO','INTERNET')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA|TIGO|ETB','UNEMOVIL')\
.str.replace('TOIP|TELEFONICA TELECOM|TELECOM|TO_SINVOZ','TELEFONIA')\
.str.replace('LÃƑÂNEA BÃƑ¡SICA','LINEA BASICA')
df['Descripcion categoria']=df['Descripcion categoria'].str.replace("[^a-zA-Z ]+", "NO REGISTRA")
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip()\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV|TELEVISIONSIN SEÑAL','TELEVISION')\
.str.replace('Internet Banda Ancha|Internet EDATEL|CABLE MODEM','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA','UNEMOVIL')\
.str.replace('TOIP','TELEFONIA')
df['Descripcion producto']=df['Descripcion producto'].str.strip().str.replace('-','')\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV','TELEVISION')\
.str.replace('Internet Banda Ancha','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')
conteo3=df['Descripcion producto'].value_counts().iloc[:7].index.tolist()
df['Descripcion producto_resumen']=df.apply(
lambda row: row['Descripcion producto'] if (row['Descripcion producto'] in conteo3)
else 'OTRO PRODUCTO',axis=1)
df['Descripcion producto_resumen']=df['Descripcion producto_resumen'].str.strip()
df['Tipo Contactabilidad'][df['Tipo Contactabilidad']==' '] ='NO REGISTRA'
df['Indicador BI'][df['Indicador BI']==' '] ='NO REGISTRA'
## Create variable
df['antiguedad_mes']=df['antiguedad_mes'].astype(int)
col = 'antiguedad_mes'
condi = [ df[col] < 12, df[col].between(12, 24, inclusive = True),df[col]>24 ]
seg_ = [ "SEGMENTO YOUNG", 'SEGMENTO MASTER','SEGMENTO LEGEND']
df["Hogar"] = np.select(condi, seg_, default=np.nan)
df['Calificación A Nivel De Suscripción'][df['Calificación A Nivel De Suscripción']==' ']=df['Calificación A Nivel De Suscripción'].mode()[0]
df['Calificación A Nivel De Suscripción']=df['Calificación A Nivel De Suscripción'].astype('int')
df['Califica_suscr_class']=pd.cut(df['Calificación A Nivel De Suscripción'],bins=5,labels=["A","B","C","D","E"]).astype(str)
df['Tipo De Documento'][df['Tipo De Documento']=='13'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].fillna('NO REGISTRA')
df['Tipo De Documento'][df['Tipo De Documento']=='1'] ='CC'
df['Tipo De Documento'][df['Tipo De Documento']==' '] ='NO REGISTRA'
df['Tipo De Documento'][df['Tipo De Documento']=='C'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].str.replace('3 Cedula Extranjeria|3|1CE','CE')\
.str.replace('1 Cedula','CC')\
.str.replace('2 Nit|2',' Nit')\
.str.replace('4 Tarjeta de Identidad|4',' TI')
#### Create, clean & group variables
df['Banco 1'][df['Banco 1']==' '] ='NO REGISTRA'
df['Banco 2'][df['Banco 2']==' '] ='NO REGISTRA'
df['Banco 1'].fillna('NO REGISTRA',inplace=True)
df['Banco 2'].fillna('NO REGISTRA',inplace=True)
df['Banco 1']=df['Banco 1'].str.upper().str.strip()
df['Banco 2']=df['Banco 2'].str.upper().str.strip()
df['Banco 1']=df['Banco 1'].str.replace('BANCO COLPATRIA','COLPATRIA')\
.str.replace('COLPATRIA ENLINEA','COLPATRIA EN LINEA')\
.str.replace('GANA GANA','GANA')\
.str.replace('GANA GANA','GANA')
df["Banco 1_virtual"] =\
np.where(df["Banco 1"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
df["Banco 2_Virtual"] =\
np.where(df["Banco 2"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
conteo_banco=df['Banco 1'].value_counts().iloc[:10].index.tolist()
df['Banco 1_Cl']=df.apply(
lambda row: row['Banco 1'] if (row['Banco 1'] in conteo_banco)
else 'OTRO BANCO',axis=1)
conteo_banco2=df['Banco 2'].value_counts().iloc[:10].index.tolist()
df['Banco 2_Cl']=df.apply(
lambda row: row['Banco 2'] if (row['Banco 2'] in conteo_banco2)
else 'OTRO BANCO',axis=1)
df['Causal'][df['Causal']==' '] ='NO REGISTRA'
df['Causal_Cl']=df['Causal']\
.str.replace('FACTURA MAYOR A LA CAPACIDAD DE PAGO|CLIENTE SE ACOGE PRODUCTO MINIMO VITAL|PRIORIDAD INGRESOS A LA CANASTA BASICA|INDISPONIBILIDAD DE MEDIOS DE PAGO POR EMERGENCIA SANITARIA|NO TIENE DINERO|INCONVENIENTES ECONOMICOS|INCONVENIENTES ECONOMICOS|CONTINGENCIA COVID-19|DESEMPLEADO|INDEPENDIENTE SIN INGRESOS DURANTE CUARENTENA|DISMINUCIÓN INGRESOS / INCONVENIENTES CON NÓMINA',
'DISMINUCIÓN DE INGRESOS')\
.str.replace('OLVIDO DE PAGO|FUERA DE LA CIUDAD|DEUDOR SE OLVIDO DEL PAGO|OLVIDO DEL PAGO / ESTA DE VIAJE',
'OLVIDO')\
.str.replace('PAGA CADA DOS MESES|PAGO BIMESTRAL','PAGO BIMESTRAL')\
.str.replace('INCONFORMIDAD EN EL VALOR FACTURADO|INCONFORMIDAD POR CAMBIO DE DOMICILIO|INCOMFORMIDAD POR CAMBIO DE DOMICILIO|PQR PENDIENTE|TIENE RECLAMO PENDIENTE','INCONFORMIDAD')\
.str.replace('OTRA PERSONA ES LA ENCARGADA DEL PAGO','OTRA PERSONA ES LA ENCARGADA DEL PAGO').str.strip()\
.str.replace('PROBLEMAS FACTURACIÓN|INCONSISTENCIAS EN CARGOS FACTURADOS|RECLAMACIÓN EN TRÃMITE|NO LE LLEGA LA FACTURA / LLEGO DESPUES DE LA FECHA DE VENCIMIENTO|LLEGO LA FACTURA DESPUES DE LA FECHA DE VENCIMIENTO|NO LLEGO FACTURA',
'FACTURA')\
.str.replace('SE NIEGA A RECIBIR INFORMACION',
'RENUENTE')\
.str.replace('INCONVENIENTES CON CANALES DE PAGO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON EL CANAL DE RECAUDO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON LA ENTIDAD BANCARIA',
'INCONVENIENTES CON PAGO')\
.str.replace('REALIZARA RETIRO DEL SERVICIO|REALIZARA RETIRO / CANCELACION SERVICIO',
'REALIZARA RETIRO')
conteo_Causa=df['Causal_Cl'].value_counts().iloc[:12].index.tolist()
df['Causal_Cl']=df.apply(
lambda row: row['Causal_Cl'] if (row['Causal_Cl'] in conteo_Causa)
else 'OTRA CAUSA',axis=1)
conteo_Corte=df['Descripcion estado de corte'].value_counts().iloc[:12].index.tolist()
df['Descripcion estado de corte_Cl']=df.apply(
lambda row: row['Descripcion estado de corte'] if (row['Descripcion estado de corte'] in conteo_Corte)
else 'OTRA MOTIVO',axis=1)
df['Descripcion estado de corte_conexión'] = np.where(df['Descripcion estado de corte'].str.contains("CONEXION"),"1",'0')
df['Descripcion estado de corte_suspención'] = np.where(df['Descripcion estado de corte'].str.contains("SUSPENSION"),"1",'0')
df['Descripcion estado de corte_retiro'] = np.where(df['Descripcion estado de corte'].str.contains("RETIRO"),"1",'0')
df['Valor Total Cobrar']=df['Valor Total Cobrar'].astype('float64')
df['Valor Vencido']=df['Valor Vencido'].astype('float64')
df['Valor Factura']=df['Valor Factura'].astype('float64')
df['Valor Intereses de Mora']=df['Valor Intereses de Mora'].astype('float64')
df['Valor financiado']=df['Valor financiado'].astype('float64')
## DROPING VARIABLES
df.drop(['Causal','Codigo edad de mora(para central de riesgos)','Codigo edad de mora(para central de riesgos)',
'Estado Adminfo','Celular con mejor Contactabilidad','Archivo Convergente','Usuario','Vector de Pago'],axis=1,inplace=True)
anis=['Teléfono última gestión','Email','Telefono con mejor Contactabilidad','Email',
'Ultimo Celular Grabado','Ultimo Telefono Grabado','Ultimo Email Grabado','Celular con mejor Contactabilidad']
df.dropna(subset = ["Direccion de instalacion"], inplace=True)
df['llave']=df['Identificacion']+"_"+df['Direccion de instalacion']
df=df.sort_values('Fecha de Asignacion',ascending=True)
## Elimino los duplicados presnetados en la combinación de dichas variables
df=df[~df[['llave','# servicio suscrito/abonado','Fecha de Asignacion','Valor Total Cobrar','Valor Vencido','Descripcion localidad']].duplicated()]
df.sort_values(by=['Identificacion','# servicio suscrito/abonado','Fecha de Asignacion'],ascending=[True,True,True]).drop_duplicates('# servicio suscrito/abonado',keep='last',inplace=True)
### Cuidado con esos pendientes por gestionar
## Cantidad de servicios
cant_serv=df.groupby(['Identificacion']).agg({'Descripcion producto':'nunique','Direccion de instalacion':'nunique'})\
.reset_index().sort_values('Descripcion producto',ascending=False)\
.rename(columns={'Descripcion producto':'cantidad_ser_dir','Direccion de instalacion':'serv_dir'})
df=pd.merge(df,cant_serv,on='Identificacion')
df=df[~df.duplicated()]
# Creo dicha variabel para evitar que hayan duplicados el mismo día
df['llave_2']=df['Identificacion']+"_"+(df['Fecha de Asignacion'].astype('str'))
#
conteo=df.groupby(['Identificacion','Fecha de Asignacion','Fecha de Asignacion_mes']).agg({'Identificacion':'nunique'}).rename(columns={'Identificacion':'cantidad_mes'}).reset_index()
conteo.sort_values('Fecha de Asignacion',ascending=True,inplace=True)
conteo=conteo[~conteo['Identificacion'].duplicated(keep='last')]
conteo['llave_2']=conteo['Identificacion']+"_"+(conteo['Fecha de Asignacion'].astype('str'))
#Se crea con el fin de identificar y quedarme con las claves de cada uno
consolidar=pd.merge(df,conteo['llave_2'],on='llave_2')
#Creo variables dummies para identificar en una misma cantidad de servicios
cer1=pd.concat([pd.get_dummies(consolidar['Descripcion producto_resumen']),consolidar],axis=1) # concateno
cer1['llave_2']=cer1['Identificacion']+"_"+(cer1['Fecha de Asignacion'].astype('str'))
cer=cer1.groupby(['Identificacion']).agg({
'Descripcion producto_resumen':np.array,'Descripcion producto_resumen':'sum',
'TELEFONIA':'sum','INTERNET':'sum','TELEVISION':'sum','UNEMOVIL':'sum',
'LARGA DISTANCIA UNE':'sum','PAQUETE':'sum','OTRO PRODUCTO':'sum','LINEA BASICA':'sum',
"Valor Vencido":"sum","Valor Total Cobrar":"sum",
"Valor financiado":"sum",
"Valor Intereses de Mora":"sum"}).reset_index().\
rename(columns={'Valor Vencido':'valor vencido_sum',
'Valor Factura':'Valor Factura_sum',
'Valor financiado':'Valor financiado_sum',
'Valor Total Cobrar':'Valor Total Cobrar_sum',
'Descripcion producto_resumen':'Total servicio',
'Valor Intereses de Mora':'Valor Intereses de Mora_sum'})
cer.drop(['Total servicio'],axis=1,inplace=True)
data=pd.merge(consolidar,cer,on='Identificacion')
data=data.sort_values(['Fecha de Asignacion','Identificacion'],ascending=[True,True]).drop_duplicates('Identificacion',keep='last')
### Base de datos de la salida
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],ascending=[True,True]).drop_duplicates(keep='last',inplace=True)
out.drop(['Unnamed: 19'],axis=1,inplace=True)
## Cruce de bases de datos de salida
full=pd.merge(data,out[['Identificacion Del Cliente','Efectivo Pago','Fecha_Pago']],
left_on='Identificacion',right_on='Identificacion Del Cliente')
full=full[~full.duplicated()]
full=full.sort_values(['Identificacion','Efectivo Pago'],ascending=[True,True]).drop_duplicates(['Identificacion'],keep='first')
full['llave_exp']=full['Identificacion']+full['# servicio suscrito/abonado']
full['valor vencido_sum'][full['valor vencido_sum'] < 0] = 0
full['ratio_vlr_vencido_cobro']=full['valor vencido_sum']/full['Valor Total Cobrar_sum']
full.drop(['llave_2','Direccion de instalacion','Banco 1','Banco 2'],axis=1,inplace=True)
### Exporto y envio a la carpeta para trabajarlo
seg['FECHA DE GESTION']=pd.to_datetime(seg['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
seg=seg.sort_values(['IDENTIFICACIóN','FECHA DE GESTION']).drop_duplicates('IDENTIFICACIóN',keep='last')
vir['Identificación']=vir['Identificación'].astype('str')
fulll=pd.merge(full,seg[['IDENTIFICACIóN','FECHA DE GESTION','CLASE DE GESTION',
'LINEA/AGENCIA/ABOGADO','CAUSAL','CICLO','OTRA GESTION',
'SE DEJO MENSAJE EN BUZON', 'DEUDOR REALIZA PROMESA DE PAGO TOTAL',
'NO CONTESTAN / OCUPADO', 'DEUDOR REALIZA PROMESA DE PAGO PARCIAL',
'NO HUBO ACUERDO', 'SE ENVIA CUPON DE PAGO','SE DEJO MENSAJE CON TERCERO',
'OTRA GESTION_sum', 'Total_segui','Cantidad_de_cobros_diff_mes', 'Cantidad_recontactos_mes',
'class_Cantidad_de_cobros_diff_mes','class_Cantidad_recontactos_mes']],
left_on='Identificacion',right_on='IDENTIFICACIóN',how='left').\
merge(vir,left_on='Identificacion',right_on='Identificación',how='left')
#libero memoria
del cer
del cer1
fulll["Efectivo Pago"] = (fulll["Efectivo Pago"]=="Efectivo").astype(int)
fulll.drop(['Valor financiado_sum','Fecha_Pago','Valor Intereses de Mora_sum','Valor Total Cobrar','Valor Total Cobrar_sum','Valor Intereses de Mora','Agencia B2B Convergente','Codigo Fraude','CAUSAL','LINEA/AGENCIA/ABOGADO',
'Celular','Valor financiado','# servicio suscrito/abonado','Fecha Ult pago','Fecha estado corte','Codigo Departamento','Centrales de riesgos','dias_desde_ult_gestion',
'Valor Honorarios','Dias_ult_pago','dia_semana_ult_pago','mes_ult_pago','semana_ult_pago','Marca','Marca Funcional','Reportado a central de riesgos','Marca Score','Autopago',
'trimestre_ult_pago','año_ult_pago','DIAS_desde_ult_pago','dias_ult_pago_cobro','Primera Mora','CICLO','Codigo Categoria','Subsegmento',
'dias_ult_pago_fac_ant','Fecha de cuenta de cobro mas antigua','Fecha estado corte','Fecha estado corte','Descripcion Gestion Resultado'],axis=1,inplace=True)
dd=fulll.copy()
dd['class_Cantidad_recontactos_mes']=dd['class_Cantidad_recontactos_mes'].fillna('0')
dd['class_Cantidad_de_cobros_diff_mes'].fillna('0',inplace=True)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Fecha de Asignacion']=pd.to_datetime(dd['Fecha de Asignacion'],format='%Y-%m-%d')
dd['Fecha Ult Gestion']=pd.to_datetime(dd['Fecha Ult Gestion'],format='%Y-%m-%d')
dd['Fecha Actualizacion']=pd.to_datetime(dd['Fecha Actualizacion'],format='%Y-%m-%d')
dd['Fecha Vencimiento Sin Recargo']=pd.to_datetime(dd['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
# dd['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(dd['Fecha de cuenta de cobro mas antigua'],format='%Y-%m-%d')
dd['FECHA DE GESTION']=pd.to_datetime(dd['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
dd['Fecha Debido Cobrar']=pd.to_datetime(dd['Fecha Debido Cobrar'],format='%Y-%m-%d %H:%M:%S', errors='coerce')
dd['Score Contactabilidad'][dd['Score Contactabilidad']==' '] =np.nan
dd['Score Contactabilidad']=dd['Score Contactabilidad'].fillna(dd['Score Contactabilidad'].median())
dd['Score Contactabilidad']=dd['Score Contactabilidad'].astype('float')
dd['Tiene Compromiso'] = (dd['Tiene Compromiso']=="S").astype(int)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] =0
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Financiado'] = (dd["Financiado"]=="SI").astype(int)
dd['Obligaciones con celular']= (dd['Obligaciones con celular']=="S").astype(int)
dd['Inscrito Factura Web']= (dd['Inscrito Factura Web']=="S").astype(int)
dd['Real reportado en central de riesgos']= (dd['Real reportado en central de riesgos']=="S").astype(int)
dd['Tipo Habito de Pago'][dd['Tipo Habito de Pago']==' '] ='NO REGISTRA'
dd['Calificación Identificación'][dd['Calificación Identificación']==' '] =dd["Calificación Identificación"].mode()[0]
dd["Calificación Identificación"]=dd["Calificación Identificación"].astype(float)
dd['CLASE DE GESTION'][dd['CLASE DE GESTION']==' ']='NO REGISTRA'
### Clasificaciones
dd['Class_Total valor pendiente suscripcion']=pd.qcut(dd['Total valor pendiente suscripcion'].astype(float), 5,
labels=["A", "B", "C","D","E"]).astype('str')
dd['Total valor pendiente suscripcion']=dd['Total valor pendiente suscripcion'].astype(float)
dd['Valor Pendiente']=dd['Valor Pendiente'].astype(float)
dd['# de Dias De Mora']=dd['# de Dias De Mora'].astype(float)
dd['Dias sin Gestion']=dd['Dias sin Gestion'].astype(float)
dd['antiguedad_mes']=dd['antiguedad_mes'].astype(float)
dd['Minimo Cuentas con Saldo Suscripción']=dd['Minimo Cuentas con Saldo Suscripción'].astype(float)
dd['Maximo Cuentas con Saldo Suscripción']=dd['Maximo Cuentas con Saldo Suscripción'].astype(float)
dd['Total_segui']=dd['Total_segui'].astype(float)
### OULIERS
qtil9_vlrvencido=dd['valor vencido_sum'].quantile(0.95)
qtil9_vlfac=dd['Valor Factura'].quantile(0.90)
qtil9_total=dd['Total valor pendiente suscripcion'].quantile(0.90)
qtil9_total_ven=dd['Valor Vencido'].quantile(0.90)
qtil_75_dia=dd['# de Dias De Mora'].quantile(0.75)
qtil_75_dia_ges=dd['Dias sin Gestion'].quantile(0.80)
qtil_mes=dd['antiguedad_mes'].quantile(0.95)
qtil_min_cuentas=dd['Minimo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_max_cuentas=dd['Maximo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_sus=dd['Dias Suspension'].quantile(0.85)
qtil_segui=dd['Total_segui'].quantile(0.95)
dd['valor vencido_sum']= np.where(dd["valor vencido_sum"] > qtil9_vlrvencido, qtil9_vlrvencido ,dd["valor vencido_sum"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] > qtil9_vlfac, qtil9_vlfac,dd["Valor Factura"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] < 0, dd["Valor Factura"].quantile(0.5),dd["Valor Factura"])
dd['Total valor pendiente suscripcion']=np.where(dd['Total valor pendiente suscripcion'] > qtil9_total, qtil9_total,dd["Total valor pendiente suscripcion"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] > qtil9_total_ven, qtil9_total_ven,dd["Valor Vencido"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] < dd['Valor Vencido'].quantile(0.1), dd['Valor Vencido'].quantile(0.3),dd["Valor Vencido"])
dd['# de Dias De Mora']=np.where(dd['# de Dias De Mora'] > qtil_75_dia, qtil_75_dia,dd['# de Dias De Mora'])
dd['Dias sin Gestion']=np.where(dd['Dias sin Gestion'] > qtil_75_dia_ges, qtil_75_dia_ges,dd['Dias sin Gestion'])
dd['ratio_vlr_vencido_cobro'].fillna(dd['ratio_vlr_vencido_cobro'].median(),inplace=True)
dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].fillna(dd['Calificación Servicio Suscrito'].median())
dd['antiguedad_mes']=np.where(dd['antiguedad_mes'] > qtil_mes, qtil_mes,dd['antiguedad_mes'])
dd['Minimo Cuentas con Saldo Suscripción']=np.where(dd['Minimo Cuentas con Saldo Suscripción'] > qtil_min_cuentas, qtil_min_cuentas,dd['Minimo Cuentas con Saldo Suscripción'])
dd['Maximo Cuentas con Saldo Suscripción']=np.where(dd['Maximo Cuentas con Saldo Suscripción'] > qtil_max_cuentas, qtil_max_cuentas,dd['Maximo Cuentas con Saldo Suscripción'])
dd['Dias Suspension']=np.where(dd['Dias Suspension'] > qtil_sus, qtil_sus,dd['Dias Suspension'])
### Drop
dd.drop(['Descripcion Mejor Codigo Gestion Mes','Codigo de Gestion Resultado Visita','Análisis Vector',
'Fecha de Instalacion','DÃa Pago 3','Descripcion localidad',
'Fecha Ingreso Fraude','Maxima fecha Ult Gestion','Usuario Grabador',
'DÃa Pago 1','DÃa Pago 2','Ultimo Codigo de Gestion Agrupado','# de Suscripción',
'fecha de importacion',
'Fecha de Asignacion_mes','Descripcion producto','Fecha Financiacion','Codigo estado de corte','Descripcion estado de corte'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['retiro']=np.where(dd['Fecha Retiro'].isna(),0,1)
dd.drop(['Nivel de riesgo experian','Fecha Retiro','Nivel de Riesgo','Indicador BI','Tipo Contactabilidad',
'Gestion comercial','Estrategia','Usuario Fraudulento','Tipo de Reporte a Central de Riesgos','Banco 2_Cl'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['Efectivo Pago']=dd['Efectivo Pago'].astype(str)
dd['Class_Total valor pendiente suscripcion']=dd['Class_Total valor pendiente suscripcion'].astype('str')
dd['Califica_suscr_class']=dd['Califica_suscr_class'].astype('str')
dd['# de Dias De Mora'].fillna(0,inplace=True)
breaks3 = jenkspy.jenks_breaks(dd['# de Dias De Mora'], nb_class=8)
dd['class_# de Dias De Mora'] = pd.cut(dd['# de Dias De Mora'] , bins=breaks3, include_lowest=True).astype(str)
breaks2 = jenkspy.jenks_breaks(dd['ratio_vlr_vencido_cobro'], nb_class=5)
dd['class_ratio_vlr_vencido_cobro_class'] = pd.cut(dd['ratio_vlr_vencido_cobro'] , bins=breaks2, include_lowest=True).astype(str)
dd['Total'].fillna(0,inplace=True)
dd['Total_clasificacion_cant_virtuales'] = pd.cut(x=dd['Total'],
bins=[-1,0,1,2,3,6,10,17,30,1000],
labels=["0","1","2","3","4-6","7-10", "11-17","18-30", ">30"]).astype(str).fillna('0')
### Divido
sin_seg=dd[dd['IDENTIFICACIóN'].isna()]
sin_seg.drop(sin_seg[sin_seg.columns[79:139]].columns,axis=1,inplace=True)
# con seguimiento
dd=dd[~dd['IDENTIFICACIóN'].isna()]
grupo=dd.groupby(['Efectivo Pago','Descripcion departamento', 'sistema origen',
'Vector Cualitativo # Suscripción', 'Tipificación Cliente',
'Perfil Digital', 'Descripcion subcategoria', 'Descripcion categoria', 'Estado del Cliente',
'Tipo Habito de Pago', 'Tipo Producto Servicio Suscrito', 'Analisis De Habito','Hogar',
'Califica_suscr_class', 'Banco 1_Cl','Descripcion estado de corte_Cl','class_Cantidad_de_cobros_diff_mes',
'class_Cantidad_recontactos_mes', 'Class_IVR',
'Class_sms','Class_Total valor pendiente suscripcion','Total_clasificacion_cant_virtuales',
'class_ratio_vlr_vencido_cobro_class','class_# de Dias De Mora']).size().reset_index(name='frecuency')
# dic_reg=pd.crosstab(grupo['Descripcion Regional'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_des_dep=pd.crosstab(grupo['Descripcion departamento'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_vec_cua=pd.crosstab(grupo['Vector Cualitativo # Suscripción'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sis_origen=pd.crosstab(grupo['sistema origen'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_tipi_clien=pd.crosstab(grupo['Tipificación Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_per_dig=pd.crosstab(grupo['Perfil Digital'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_sub=pd.crosstab(grupo['Descripcion subcategoria'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_desc_sus=pd.crosstab(grupo['Tipificacion suscripcion'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_ant_clie=pd.crosstab(grupo['Antiguedad Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_desc_cat=pd.crosstab(grupo['Descripcion categoria'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_est_cliente=pd.crosstab(grupo['Estado del Cliente'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_hab=pd.crosstab(grupo['Tipo Habito de Pago'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sus_tipo=pd.crosstab(grupo['Tipo Producto Servicio Suscrito'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_habi=pd.crosstab(grupo['Analisis De Habito'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ana_hogar=pd.crosstab(grupo['Hogar'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_cali=pd.crosstab(grupo['Califica_suscr_class'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ban=pd.crosstab(grupo['Banco 1_Cl'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_corte=pd.crosstab(grupo['Descripcion estado de corte_Cl'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_pend_sus=pd.crosstab(grupo['Class_Total valor pendiente suscripcion'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sms=pd.crosstab(grupo['Class_sms'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ivr=pd.crosstab(grupo['Class_IVR'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
# dic_CE=pd.crosstab(grupo['class_CE'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_mora=pd.crosstab(grupo['class_# de Dias De Mora'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_ratio=pd.crosstab(grupo['class_ratio_vlr_vencido_cobro_class'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
#dd['Descripcion Regional2']=dd['Descripcion Regional'].replace(dic_reg)
dd['Descripcion departamento2']=dd['Descripcion departamento'].replace(dic_des_dep)
dd['Vector Cualitativo # Suscripción2']=dd['Vector Cualitativo # Suscripción'].replace(dic_vec_cua)
dd['sistema origen2']=dd['sistema origen'].replace(dic_sis_origen)
dd['Tipificación Cliente2']=dd['Tipificación Cliente'].replace(dic_tipi_clien)
dd['Perfil Digital2']=dd['Perfil Digital'].replace(dic_per_dig)
dd['Descripcion subcategoria2']=dd['Descripcion subcategoria'].replace(dic_desc_sub)
# dd['Tipificacion suscripcion2']=dd['Tipificacion suscripcion'].replace(dic_desc_sus)
# dd['Antiguedad Cliente2']=dd['Antiguedad Cliente'].replace(dic_ant_clie)
dd['Descripcion categoria2']=dd['Descripcion categoria'].replace(dic_desc_cat)
# dd['Estado del Cliente2']=dd['Estado del Cliente'].replace(dic_est_cliente)
dd['Tipo Habito de Pago2']=dd['Tipo Habito de Pago'].replace(dic_hab)
dd['Tipo Producto Servicio Suscrito2']=dd['Tipo Producto Servicio Suscrito'].replace(dic_sus_tipo)
dd['Analisis De Habito2']=dd['Analisis De Habito'].replace(dic_ana_habi)
dd['Hogar2']=dd['Hogar'].replace(dic_ana_hogar)
dd['Califica_suscr_class2']=dd['Califica_suscr_class'].replace(dic_cali)
dd['Banco 1_Cl2']=dd['Banco 1_Cl'].replace(dic_ban)
dd['Descripcion estado de corte_Cl2']=dd['Descripcion estado de corte_Cl'].replace(dic_corte)
dd['Class_Total valor pendiente suscripcion2']=dd['Class_Total valor pendiente suscripcion'].replace(dic_pend_sus)
dd['Class_sms2']=dd['Class_sms'].replace(dic_sms)
dd['Class_IVR2']=dd['Class_IVR'].replace(dic_ivr)
# dd['class_CE2']=dd['class_CE'].replace(dic_CE)
dd['class_# de Dias De Mora2']=dd['class_# de Dias De Mora'].replace(dic_mora)
dd['class_ratio_vlr_vencido_cobro_class2']=dd['class_ratio_vlr_vencido_cobro_class'].replace(dic_ratio)
dd['Class_sms2'].fillna(0.5,inplace=True)
dd['Class_IVR2'].fillna(0.5,inplace=True)
#dd['class_CE2'].fillna(0.5,inplace=True)
dic_reco=pd.crosstab(grupo['class_Cantidad_de_cobros_diff_mes'].astype(str),grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_reco_mes=pd.crosstab(grupo['class_Cantidad_recontactos_mes'].astype(str),grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dd['class_Cantidad_recontactos_mes2']=dd['class_Cantidad_recontactos_mes'].replace(dic_reco_mes)
dd['class_Cantidad_de_cobros_diff_mes2']=dd['class_Cantidad_de_cobros_diff_mes'].replace(dic_reco)
dd['class_Cantidad_de_cobros_diff_mes2'].fillna('0',inplace=True)
dd['Estandar']=dd[dd.filter(like='2').columns.drop(['Banco 2_Virtual','12', '20', '21', '22', '23'])].sum(axis=1)
labels=["Deficiente", "Malo",'Regular',"Bueno","Muy bueno"]
breaks = jenkspy.jenks_breaks(dd['Estandar'], nb_class=5)
dd['cut_break'] = pd.cut(dd['Estandar'] , bins=breaks, labels=labels, include_lowest=True)
## comienzo con el seguimiento
grupo_2=sin_seg.groupby(['Efectivo Pago','Descripcion departamento', 'sistema origen',
'Vector Cualitativo # Suscripción',
'Perfil Digital', 'Descripcion subcategoria',
'Descripcion categoria', 'Estado del Cliente',
'Tipo Habito de Pago',
'Analisis De Habito', 'Descripcion producto_resumen',
'Hogar', 'Califica_suscr_class', 'Banco 1_Cl', 'Causal_Cl',
'Descripcion estado de corte_Cl','Class_Total valor pendiente suscripcion','Total_clasificacion_cant_virtuales',
'class_ratio_vlr_vencido_cobro_class','class_# de Dias De Mora']).size().reset_index(name='frecuency')
# dic_reg=pd.crosstab(grupo_2['Descripcion Regional'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_des_dep=pd.crosstab(grupo_2['Descripcion departamento'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_vec_cua=pd.crosstab(grupo_2['Vector Cualitativo # Suscripción'],grupo_2['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_sis_origen= | pd.crosstab(grupo_2['sistema origen'],grupo_2['Efectivo Pago']) | pandas.crosstab |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/10/25 16:28
Desc: 新浪财经-所有指数-实时行情数据和历史行情数据
https://finance.sina.com.cn/realstock/company/sz399552/nc.shtml
"""
import datetime
import re
from mssdk.utils import demjson
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.index.cons import (
zh_sina_index_stock_payload,
zh_sina_index_stock_url,
zh_sina_index_stock_count_url,
zh_sina_index_stock_hist_url,
)
from mssdk.stock.cons import hk_js_decode
def _replace_comma(x):
"""
去除单元格中的 ","
:param x: 单元格元素
:type x: str
:return: 处理后的值或原值
:rtype: str
"""
if ',' in str(x):
return str(x).replace(",", "")
else:
return x
def get_zh_index_page_count() -> int:
"""
指数的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 需要抓取的指数的总页数
:rtype: int
"""
res = requests.get(zh_sina_index_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_index_spot() -> pd.DataFrame:
"""
新浪财经-指数
大量采集会被目标网站服务器封禁 IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_s
:return: 所有指数的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_zh_index_page_count()
zh_sina_stock_payload_copy = zh_sina_index_stock_payload.copy()
for page in tqdm(range(1, page_count + 1)):
zh_sina_stock_payload_copy.update({"page": page})
res = requests.get(zh_sina_index_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(res.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df = big_df.applymap(_replace_comma)
big_df["trade"] = big_df["trade"].astype(float)
big_df["pricechange"] = big_df["pricechange"].astype(float)
big_df["changepercent"] = big_df["changepercent"].astype(float)
big_df["buy"] = big_df["buy"].astype(float)
big_df["sell"] = big_df["sell"].astype(float)
big_df["settlement"] = big_df["settlement"].astype(float)
big_df["open"] = big_df["open"].astype(float)
big_df["high"] = big_df["high"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df["low"] = big_df["low"].astype(float)
big_df.columns = [
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'_',
'_',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'_',
'_',
]
big_df = big_df[
[
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
]
]
return big_df
def stock_zh_index_daily(symbol: str = "sh000922") -> pd.DataFrame:
"""
新浪财经-指数-历史行情数据, 大量抓取容易封IP
https://finance.sina.com.cn/realstock/company/sh000909/nc.shtml
:param symbol: sz399998, 指定指数代码
:type symbol: str
:return: 历史行情数据
:rtype: pandas.DataFrame
"""
params = {"d": "2020_2_4"}
res = requests.get(zh_sina_index_stock_hist_url.format(symbol), params=params)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
temp_df = pd.DataFrame(dict_list)
temp_df['date'] = pd.to_datetime(temp_df["date"]).dt.date
temp_df['open'] = pd.to_numeric(temp_df['open'])
temp_df['close'] = pd.to_numeric(temp_df['close'])
temp_df['high'] = pd.to_numeric(temp_df['high'])
temp_df['low'] = pd.to_numeric(temp_df['low'])
temp_df['volume'] = pd.to_numeric(temp_df['volume'])
return temp_df
def _get_tx_start_year(symbol: str = "sh000919") -> pd.DataFrame:
"""
腾讯证券-获取所有股票数据的第一天, 注意这个数据是腾讯证券的历史数据第一天
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票代码
:type symbol: str
:return: 开始日期
:rtype: pandas.DataFrame
"""
url = "http://web.ifzq.gtimg.cn/other/klineweb/klineWeb/weekTrends"
params = {
"code": symbol,
"type": "qfq",
"_var": "trend_qfq",
"r": "0.3506048543943414",
}
r = requests.get(url, params=params)
data_text = r.text
if not demjson.decode(data_text[data_text.find("={") + 1 :])["data"]:
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
params = {
"_var": "kline_dayqfq",
"param": f"{symbol},day,,,320,qfq",
"r": "0.751892490072597",
}
r = requests.get(url, params=params)
data_text = r.text
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][symbol]["day"][0][0]
return start_date
start_date = demjson.decode(data_text[data_text.find("={") + 1 :])["data"][0][0]
return start_date
def stock_zh_index_daily_tx(symbol: str = "sz980017") -> pd.DataFrame:
"""
腾讯证券-日频-股票或者指数历史数据
作为 stock_zh_index_daily 的补充, 因为在新浪中有部分指数数据缺失
注意都是: 前复权, 不同网站复权方式不同, 不可混用数据
http://gu.qq.com/sh000919/zs
:param symbol: 带市场标识的股票或者指数代码
:type symbol: str
:return: 后复权的股票和指数数据
:rtype: pandas.DataFrame
"""
start_date = _get_tx_start_year(symbol=symbol)
url = "https://proxy.finance.qq.com/ifzqgtimg/appstock/app/newfqkline/get"
range_start = int(start_date.split("-")[0])
range_end = datetime.date.today().year + 1
temp_df = | pd.DataFrame() | pandas.DataFrame |
import argparse
from itertools import product
from experiment import *
import pandas as pd
from params_helpers import *
# Search parameters for ILP formulation
def search_ilp(insdir, out, lp1, up1, lp2, up2):
try:
os.mkdir(out)
except OSError:
print("Creation of the directory failed or directory already exists")
for instance_name in os.listdir(insdir):
instance_name = instance_name[:-4]
print("-------------------File ", instance_name, "is processed------------------")
try:
os.mkdir(f"{out}/{instance_name}")
except OSError:
print("Creation of the directory failed")
ins = Instance_ilp(instance_name, insdir)
earliest, latest = ins.earliest, ins.latest
C = ins.C
# Find optimal route by brute force
opt_route, opt_cost = brute_force_tsptw(C, latest, earliest)
# Convert route to array
x = route_to_array_ilp(opt_route, ins)
index = 0
# Parameter search
for p1, p2 in product(np.linspace(lp1, up1, num=(up1 - lp1 + 1)), np.linspace(lp2, up2, num=(up2 - lp2 + 1))):
# Get ising formulation
h, J, of = ising_ilp(ins, p1, p2)
# Run simulated annealing
sampleset = anneal(h, J, BETA_RANGE, NUM_READS, NUM_SWEEPS, BETA_SCHEDULE_TYPE)
# Evaluate each sample
Results = evaluate_sampleset_ilp(ins, sampleset)
# Prepare pandas dataframe for the results
Results.append((False, False, 100, 0, 0, 0, 0))
data = pd.DataFrame(Results)
data.columns = ['valid', 'windows', 'cost', 'energy', 'A', 'B', 'E']
data['A'] = p1
data['B'] = 1
data['E'] = p2
# Energy of the optimal route
energy = dimod.ising_energy(binary_to_spin(x), h, J)
data.loc[len(data)] = [True, True, opt_cost, energy, -1, 0, 0]
# Store the results
data.to_pickle(f"{out}/{instance_name}/{instance_name}_{index}")
index += 1
# Search parameters for edge-based formulation
def search_edge(insdir, out, lp1, up1, lp2, up2):
for instance_name in os.listdir(insdir):
instance_name = instance_name[:-4]
print("-------------------File ", instance_name, "is processed------------------")
try:
os.mkdir(f"{out}/{instance_name}")
except OSError:
print(f"Creation of the directory {out}/{instance_name} failed")
quit()
ins = Instance_edge(instance_name, insdir)
C = ins.C
earliest, latest = ins.earliest, ins.latest
# Find optimal route by brute force
opt_route, opt_cost = brute_force_tsptw(C, latest, earliest)
x = route_to_array_edge(opt_route, ins)
index = 0
# Parameter search
for p1, p2 in product(np.linspace(lp1, up1, num=(up1 - lp1 + 1)), np.linspace(lp2, up2, num=(up2 - lp2 + 1))):
# Get ising formulation
h, J, of = ising_edge(ins, p1, p2)
# Run simulated annealing
sampleset = anneal(h, J, BETA_RANGE, NUM_READS, NUM_SWEEPS, BETA_SCHEDULE_TYPE)
# Evaluate each sample
Results = evaluate_sampleset_edge(ins, sampleset)
# Prepare pandas dataframe for the results
Results.append((False, False, 100, 0, 0, 0, 0))
data = pd.DataFrame(Results)
data.columns = ['valid', 'windows', 'cost', 'energy', 'A', 'B', 'E']
data['A'] = p1
data['B'] = 1
data['E'] = p2
# Energy of the optimal route
energy = dimod.ising_energy(binary_to_spin(x), h, J)
data.loc[len(data)] = [True, True, opt_cost, energy, -1, 0, 0]
# Store the results
data.to_pickle(f"{out}/{instance_name}/{instance_name}_{index}")
index += 1
def probs(out):
for folder in os.listdir(out):
if "summary" not in folder:
columns = ['A', 'E', 'p', 'mp']
results = pd.DataFrame(columns=columns)
for f in os.listdir(f"{out}/{folder}"):
data = | pd.read_pickle(f"{out}/{folder}/{f}") | pandas.read_pickle |
import copy
import time
import calendar
import netCDF4
import numpy
from numpy import savez_compressed
from numpy import load
import pandas
import tensorflow
from tensorflow.keras.utils import to_categorical
import scipy.ndimage
import matplotlib.pyplot as pyplot
import seaborn as sns
import os.path
# Variable names.
NETCDF_X = 'x'
NETCDF_Y = 'y'
MUR_LATITUDE = 'lat'
MUR_LONGITUDE = 'lon'
NETCDF_LATITUDE = 'latitude'
NETCDF_LONGITUDE = 'longitude'
NETCDF_TIME = 'time'
NETCDF_UGRD_10m = 'UGRD_10maboveground'
#NETCDF_UGRD_1000mb= 'UGRD_1000mb'
NETCDF_UGRD_975mb = 'UGRD_975mb'
NETCDF_UGRD_950mb = 'UGRD_950mb'
NETCDF_UGRD_925mb = 'UGRD_925mb'
NETCDF_UGRD_900mb = 'UGRD_900mb'
NETCDF_UGRD_875mb = 'UGRD_875mb'
NETCDF_UGRD_850mb = 'UGRD_850mb'
NETCDF_UGRD_825mb = 'UGRD_825mb'
NETCDF_UGRD_800mb = 'UGRD_800mb'
NETCDF_UGRD_775mb = 'UGRD_775mb'
NETCDF_UGRD_750mb = 'UGRD_750mb'
NETCDF_UGRD_725mb = 'UGRD_725mb'
NETCDF_UGRD_700mb = 'UGRD_700mb'
NETCDF_VGRD_10m = 'VGRD_10maboveground'
#NETCDF_VGRD_1000mb= 'VGRD_1000mb'
NETCDF_VGRD_975mb = 'VGRD_975mb'
NETCDF_VGRD_950mb = 'VGRD_950mb'
NETCDF_VGRD_925mb = 'VGRD_925mb'
NETCDF_VGRD_900mb = 'VGRD_900mb'
NETCDF_VGRD_875mb = 'VGRD_875mb'
NETCDF_VGRD_850mb = 'VGRD_850mb'
NETCDF_VGRD_825mb = 'VGRD_825mb'
NETCDF_VGRD_800mb = 'VGRD_800mb'
NETCDF_VGRD_775mb = 'VGRD_775mb'
NETCDF_VGRD_750mb = 'VGRD_750mb'
NETCDF_VGRD_725mb = 'VGRD_725mb'
NETCDF_VGRD_700mb = 'VGRD_700mb'
#NETCDF_VVEL_1000mb= 'VVEL_1000mb'
NETCDF_VVEL_975mb = 'VVEL_975mb'
NETCDF_VVEL_950mb = 'VVEL_950mb'
NETCDF_VVEL_925mb = 'VVEL_925mb'
NETCDF_VVEL_900mb = 'VVEL_900mb'
NETCDF_VVEL_875mb = 'VVEL_875mb'
NETCDF_VVEL_850mb = 'VVEL_850mb'
NETCDF_VVEL_825mb = 'VVEL_825mb'
NETCDF_VVEL_800mb = 'VVEL_800mb'
NETCDF_VVEL_775mb = 'VVEL_775mb'
NETCDF_VVEL_750mb = 'VVEL_750mb'
NETCDF_VVEL_725mb = 'VVEL_725mb'
NETCDF_VVEL_700mb = 'VVEL_700mb'
#NETCDF_TKE_1000mb = 'TKE_1000mb'
NETCDF_TKE_975mb = 'TKE_975mb'
NETCDF_TKE_950mb = 'TKE_950mb'
NETCDF_TKE_925mb = 'TKE_925mb'
NETCDF_TKE_900mb = 'TKE_900mb'
NETCDF_TKE_875mb = 'TKE_875mb'
NETCDF_TKE_850mb = 'TKE_850mb'
NETCDF_TKE_825mb = 'TKE_825mb'
NETCDF_TKE_800mb = 'TKE_800mb'
NETCDF_TKE_775mb = 'TKE_775mb'
NETCDF_TKE_750mb = 'TKE_750mb'
NETCDF_TKE_725mb = 'TKE_725mb'
NETCDF_TKE_700mb = 'TKE_700mb'
NETCDF_TMP_SFC = 'TMP_surface'
NETCDF_TMP_2m = 'TMP_2maboveground'
#NETCDF_TMP_1000mb= 'TMP_1000mb'
NETCDF_TMP_975mb = 'TMP_975mb'
NETCDF_TMP_950mb = 'TMP_950mb'
NETCDF_TMP_925mb = 'TMP_925mb'
NETCDF_TMP_900mb = 'TMP_900mb'
NETCDF_TMP_875mb = 'TMP_875mb'
NETCDF_TMP_850mb = 'TMP_850mb'
NETCDF_TMP_825mb = 'TMP_825mb'
NETCDF_TMP_800mb = 'TMP_800mb'
NETCDF_TMP_775mb = 'TMP_775mb'
NETCDF_TMP_750mb = 'TMP_750mb'
NETCDF_TMP_725mb = 'TMP_725mb'
NETCDF_TMP_700mb = 'TMP_700mb'
#NETCDF_RH_1000mb = 'RH_1000mb'
NETCDF_RH_975mb = 'RH_975mb'
NETCDF_RH_950mb = 'RH_950mb'
NETCDF_RH_925mb = 'RH_925mb'
NETCDF_RH_900mb = 'RH_900mb'
NETCDF_RH_875mb = 'RH_875mb'
NETCDF_RH_850mb = 'RH_850mb'
NETCDF_RH_825mb = 'RH_825mb'
NETCDF_RH_800mb = 'RH_800mb'
NETCDF_RH_775mb = 'RH_775mb'
NETCDF_RH_750mb = 'RH_750mb'
NETCDF_RH_725mb = 'RH_725mb'
NETCDF_RH_700mb = 'RH_700mb'
NETCDF_DPT_2m = 'DPT_2maboveground'
NETCDF_FRICV = 'FRICV_surface'
NETCDF_VIS = 'VIS_surface'
NETCDF_RH_2m = 'RH_2maboveground'
#
NETCDF_Q975 = 'Q_975mb'
NETCDF_Q950 = 'Q_950mb'
NETCDF_Q925 = 'Q_925mb'
NETCDF_Q900 = 'Q_900mb'
NETCDF_Q875 = 'Q_875mb'
NETCDF_Q850 = 'Q_850mb'
NETCDF_Q825 = 'Q_825mb'
NETCDF_Q800 = 'Q_800mb'
NETCDF_Q775 = 'Q_775mb'
NETCDF_Q750 = 'Q_750mb'
NETCDF_Q725 = 'Q_725mb'
NETCDF_Q700 = 'Q_700mb'
NETCDF_Q = 'Q_surface'
#NETCDF_DQDZ1000SFC = 'DQDZ1000SFC'
NETCDF_DQDZ975SFC = 'DQDZ975SFC'
NETCDF_DQDZ950975 = 'DQDZ950975'
NETCDF_DQDZ925950 = 'DQDZ925950'
NETCDF_DQDZ900925 = 'DQDZ900925'
NETCDF_DQDZ875900 = 'DQDZ875900'
NETCDF_DQDZ850875 = 'DQDZ850875'
NETCDF_DQDZ825850 = 'DQDZ825850'
NETCDF_DQDZ800825 = 'DQDZ800825'
NETCDF_DQDZ775800 = 'DQDZ775800'
NETCDF_DQDZ750775 = 'DQDZ750775'
NETCDF_DQDZ725750 = 'DQDZ725750'
NETCDF_DQDZ700725 = 'DQDZ700725'
NETCDF_LCLT = 'LCLT'
NETCDF_DateVal = 'DateVal'
#+++++++++++++++
NETCDF_SST = 'analysed_sst'
NETCDF_PREDICTOR_NAMES = {
'OldOrder': [NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb,
NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb, NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb,
NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_UGRD_10m, NETCDF_VGRD_10m, NETCDF_FRICV, NETCDF_TKE_975mb,
NETCDF_TKE_950mb, NETCDF_TKE_925mb, NETCDF_TKE_900mb, NETCDF_TKE_875mb, NETCDF_TKE_850mb, NETCDF_TKE_825mb, NETCDF_TKE_800mb,
NETCDF_TKE_775mb, NETCDF_TKE_750mb, NETCDF_TKE_725mb, NETCDF_TKE_700mb, NETCDF_UGRD_975mb, NETCDF_UGRD_950mb,
NETCDF_UGRD_925mb, NETCDF_UGRD_900mb, NETCDF_UGRD_875mb, NETCDF_UGRD_850mb, NETCDF_UGRD_825mb, NETCDF_UGRD_800mb, NETCDF_UGRD_775mb,
NETCDF_UGRD_750mb, NETCDF_UGRD_725mb, NETCDF_UGRD_700mb, NETCDF_VGRD_975mb, NETCDF_VGRD_950mb, NETCDF_VGRD_925mb,
NETCDF_VGRD_900mb, NETCDF_VGRD_875mb, NETCDF_VGRD_850mb, NETCDF_VGRD_825mb, NETCDF_VGRD_800mb, NETCDF_VGRD_775mb, NETCDF_VGRD_750mb,
NETCDF_VGRD_725mb, NETCDF_VGRD_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900, NETCDF_Q875, NETCDF_Q850, NETCDF_Q825, NETCDF_Q800,
NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb, NETCDF_DPT_2m, NETCDF_Q, NETCDF_RH_2m, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb],
'NewOrder': [NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb,
NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb, NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb,
NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_UGRD_10m, NETCDF_VGRD_10m, NETCDF_FRICV, NETCDF_UGRD_975mb, NETCDF_VGRD_975mb, NETCDF_TKE_975mb,
NETCDF_UGRD_950mb, NETCDF_VGRD_950mb, NETCDF_TKE_950mb, NETCDF_UGRD_925mb, NETCDF_VGRD_925mb, NETCDF_TKE_925mb, NETCDF_UGRD_900mb, NETCDF_VGRD_900mb,
NETCDF_TKE_900mb, NETCDF_UGRD_875mb, NETCDF_VGRD_875mb, NETCDF_TKE_875mb, NETCDF_UGRD_850mb, NETCDF_VGRD_850mb, NETCDF_TKE_850mb, NETCDF_UGRD_825mb,
NETCDF_VGRD_825mb, NETCDF_TKE_825mb, NETCDF_UGRD_800mb, NETCDF_VGRD_800mb, NETCDF_TKE_800mb, NETCDF_UGRD_775mb, NETCDF_VGRD_775mb,
NETCDF_TKE_775mb, NETCDF_UGRD_750mb, NETCDF_VGRD_750mb, NETCDF_TKE_750mb, NETCDF_UGRD_725mb, NETCDF_VGRD_725mb, NETCDF_TKE_725mb,
NETCDF_UGRD_700mb, NETCDF_VGRD_700mb, NETCDF_TKE_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900, NETCDF_Q875, NETCDF_Q850,
NETCDF_Q825, NETCDF_Q800, NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb, NETCDF_DPT_2m, NETCDF_Q, NETCDF_RH_2m, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb],
'Physical_G1':[NETCDF_FRICV, NETCDF_UGRD_10m, NETCDF_UGRD_975mb, NETCDF_UGRD_950mb, NETCDF_UGRD_925mb, NETCDF_UGRD_900mb,
NETCDF_UGRD_875mb, NETCDF_UGRD_850mb, NETCDF_UGRD_825mb, NETCDF_UGRD_800mb, NETCDF_UGRD_775mb, NETCDF_UGRD_750mb,
NETCDF_UGRD_725mb, NETCDF_UGRD_700mb, NETCDF_VGRD_10m, NETCDF_VGRD_975mb, NETCDF_VGRD_950mb, NETCDF_VGRD_925mb,
NETCDF_VGRD_900mb, NETCDF_VGRD_875mb, NETCDF_VGRD_850mb, NETCDF_VGRD_825mb, NETCDF_VGRD_800mb, NETCDF_VGRD_775mb, NETCDF_VGRD_750mb,
NETCDF_VGRD_725mb, NETCDF_VGRD_700mb],
'Physical_G2':[NETCDF_TKE_975mb, NETCDF_TKE_950mb, NETCDF_TKE_925mb, NETCDF_TKE_900mb, NETCDF_TKE_875mb, NETCDF_TKE_850mb, NETCDF_TKE_825mb,
NETCDF_TKE_800mb, NETCDF_TKE_775mb, NETCDF_TKE_750mb, NETCDF_TKE_725mb, NETCDF_TKE_700mb, NETCDF_Q975, NETCDF_Q950, NETCDF_Q925, NETCDF_Q900,
NETCDF_Q875, NETCDF_Q850, NETCDF_Q825, NETCDF_Q800, NETCDF_Q775,NETCDF_Q750, NETCDF_Q725, NETCDF_Q700],
'Physical_G3':[NETCDF_TMP_2m, NETCDF_TMP_975mb, NETCDF_TMP_950mb, NETCDF_TMP_925mb, NETCDF_TMP_900mb, NETCDF_TMP_875mb, NETCDF_TMP_850mb,
NETCDF_TMP_825mb, NETCDF_TMP_800mb, NETCDF_TMP_775mb, NETCDF_TMP_750mb, NETCDF_TMP_725mb, NETCDF_TMP_700mb, NETCDF_DPT_2m, NETCDF_RH_2m,
NETCDF_RH_975mb, NETCDF_RH_950mb, NETCDF_RH_925mb,NETCDF_RH_900mb, NETCDF_RH_875mb, NETCDF_RH_850mb, NETCDF_RH_825mb, NETCDF_RH_800mb,
NETCDF_RH_775mb, NETCDF_RH_750mb, NETCDF_RH_725mb, NETCDF_RH_700mb],
'Physical_G4':[NETCDF_Q, NETCDF_LCLT, NETCDF_VIS,
NETCDF_VVEL_975mb, NETCDF_VVEL_950mb, NETCDF_VVEL_925mb, NETCDF_VVEL_900mb, NETCDF_VVEL_875mb, NETCDF_VVEL_850mb, NETCDF_VVEL_825mb,
NETCDF_VVEL_800mb, NETCDF_VVEL_775mb, NETCDF_VVEL_750mb, NETCDF_VVEL_725mb, NETCDF_VVEL_700mb]
}
NETCDF_TMPDPT = 'TMP-DPT'
NETCDF_TMPSST = 'TMP-SST'
NETCDF_DPTSST = 'DPT-SST'
NETCDF_MUR_NAMES = [NETCDF_SST]
NETCDF_TMP_NAMES = [NETCDF_TMP_SFC, NETCDF_TMP_2m, NETCDF_DPT_2m]
NETCDF_MIXED_NAMES = [NETCDF_SST, NETCDF_TMPDPT, NETCDF_TMPSST, NETCDF_DPTSST]
NETCDF_GEN_NAMES = [NETCDF_TMPDPT, NETCDF_TMPSST, NETCDF_DPTSST]
# Directories.
YEAR_FOG_DIR_NAME = '.'
ALL_FOG_DIR_NAME = '..'
DEFAULT_IMAGE_DIR_NAME = ('/data1/fog-data/fog-maps/')
#6HOURS
DEFAULT_TARGET_DIR_NAME = ('../Dataset/TARGET/')
SAVE_CUBE_DIR = '../Dataset/INPUT/MinMax/HIGH/'
SAVE_FILE_NAMES_DIR = '../Dataset/NAMES/'
SAVE_TARGET_DIR = '../Dataset/TARGET/'
DEFAULT_CUBES_12_DIR_NAME = ('../Dataset/INPUT/12Hours/')
DEFAULT_TARGET_DIR_NAME = ('../Dataset/TARGET/12Hours/')
#12HOURS
DEFAULT_12HOURS_TARGET_DIR = ('../Dataset/12HOURS/TARGET/')
DEFAULT_12HOURS_CUBES_DIR = ('../Dataset/12HOURS/INPUT/')
DEFAULT_12HOURS_NAMES_DIR = ('../Dataset/12HOURS/NAMES/')
#24HOURS
DEFAULT_24HOURS_TARGET_DIR = ('../Dataset/24HOURS/TARGET/')
DEFAULT_24HOURS_CUBES_DIR = ('../Dataset/24HOURS/INPUT/')
DEFAULT_24HOURS_NAMES_DIR = ('../Dataset/24HOURS/NAMES/')
### Defult Names and Settings
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
FIG_DEFULT_SIZE = (12, 10)
PREDICTOR_NAMES_KEY = 'predictor_names'
PREDICTOR_MATRIX_KEY = 'predictor_matrix'
CUBE_NAMES_KEY = 'cube_name'
SST_MATRIX_KEY = 'sst_matrix'
SST_NAME_KEY = 'sst_name'
# Misc constants.
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
TIME_CYCLE_FORMAT = '[0-9][0-9][0][0]'
HOUR_PREDICTION_FORMAT = '[0-9][0-9][0-9]'
NUM_VALUES_KEY = 'num_values'
MEAN_VALUE_KEY = 'mean_value'
MEAN_OF_SQUARES_KEY = 'mean_of_squares'
#==========================================================================================#
#================================ preprocessing tools =====================================#
#==========================================================================================#
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def _nc_file_name_to_date(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
date_string = pathless_file_name.replace(pathless_file_name[0:5], '').replace(
pathless_file_name[-18:], '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def _nc_file_name_to_timecycle(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: time-cycle prediction.
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
timecycle_string = pathless_file_name.replace(pathless_file_name[0:14], '').replace(
pathless_file_name[-13:], '')
# Verify.
#time_string_to_unix(time_string=timecycle_string, time_format=TIME_CYCLE_FORMAT)
return timecycle_string
def _nc_file_name_to_hourprediction(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: time-cycle prediction.
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
hourpredic_string = pathless_file_name.replace(pathless_file_name[0:19], '').replace(
pathless_file_name[-9:], '')
return hourpredic_string
def find_match_name (netcdf_file_name):
date = _nc_file_name_to_date(netcdf_file_name)
timecycle = _nc_file_name_to_timecycle(netcdf_file_name)
hourprediction = _nc_file_name_to_hourprediction(netcdf_file_name)
this_cube_match_name = date + timecycle
return {'cube_match_name' : this_cube_match_name,
'date' : date,
'timecycle' : timecycle,
'hourprediction' : hourprediction}
def netcdf_names_check(root_dir, target=None):
# Reading Target data and list all NAM index to check and remove them plus corresponding map data:
nan_index = target[target['VIS_Cat'].isnull().values].index.tolist()
Nannames = target['Date'].iloc[nan_index]
names = Nannames.values
NAN = pandas.DataFrame(columns = ['name'])
NAN['name'] = names
# Reading the directory of map data and check and remove those they are incomplete or target is NAN!
netcef_nams_file_name = [] # we need to return the name of maps which are coomplete!
netcef_murs_file_name = [] # we need to return the name of maps which are coomplete!
for root, dirs, files in os.walk(root_dir):
dirs.sort()
files.sort()
datavalume = len(files)
if datavalume == 149:
namesplit = os.path.split(files[0])[-1]
match_name_1 = namesplit.replace(namesplit[:5], '').replace(namesplit[13:], '')
for f in NAN['name'].isin([int(match_name_1)]):
if f is True:
foldercondition = False
#print(('The Traget for "{0}" day is NAN!').format(match_name_1))
#print('Removed the corresponding map for days with NAN Target!')
#print('=====================================================================')
break
else:
foldercondition = True
if (foldercondition is True):
for name in files:
namesplit = os.path.split(name)[-1]
namOrmur = namesplit.replace(namesplit[4:], '')
if (namOrmur == 'murs'):
name = root +'/'+ name
netcef_murs_file_name.append(name)
netcef_murs_file_name.sort()
elif (namOrmur == 'maps'):
name = root +'/'+ name
netcef_nams_file_name.append(name)
netcef_nams_file_name.sort()
elif datavalume < 149 and datavalume != 0:
if files[0].endswith(".txt"):
print('break')
else:
namesplit = os.path.split(files[0])[-1]
match_name = namesplit.replace(namesplit[:5], '').replace(namesplit[13:], '')
#print(('The expected maps is 149 which there are "{0}" maps for {1} day!').format(datavalume, match_name))
target = target.drop(target[target.Date == int(match_name)].index)
#print('Removed the corresponding target values for days with incomplete data!')
#print('=====================================================================')
target = target.dropna()
target = RenewDf(target)
for d in target['Date']:
if target.loc[target.Date == d, 'Date'].count() < 4:
target = target.drop(target.loc[target.Date == d, 'Date'].index)
target = RenewDf(target)
return [netcef_nams_file_name, netcef_murs_file_name, target]
def RenewDf(df):
newdf = pandas.DataFrame(columns=['Date', 'VIS', 'VIS_Cat'])
dates = df['Date'].values
cat = df['VIS_Cat'].values
vis = df['VIS'].values
newdf['Date'] = dates
newdf['VIS_Cat'] = cat
newdf['VIS'] = vis
return newdf
def copy_mur_name_ntimes(netcdf_file_names, output, n):
for i in range(len(netcdf_file_names)):
name = netcdf_file_names[i]
for j in range(n):
output.append(name)
return output
def map_upsampling(downsampled_cube_file):
upsampled_map = None
upsampled_map = scipy.ndimage.zoom(downsampled_cube_file, 11.75, order=3)
return upsampled_map
def map_downsampling(upsampled_cube_file):
downsampled_map = None
downsampled_map = scipy.ndimage.zoom(upsampled_cube_file, 0.0851, order=3)
return downsampled_map
#===========================================================================================#
#=============== Finding the cubes based on their names ===================================#
#===========================================================================================#
def find_map_name_date(first_date_string, last_date_string, target = None, image_dir_name = DEFAULT_IMAGE_DIR_NAME):
"""Finds image (NetCDF) files in the given date range.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:param image_dir_name: Name of directory with image (NetCDF) files.
:return: netcdf_file_names: 1-D list of paths to image files.
"""
# check the target and return the desierd target index:
Dates = target['Date'].values
good_indices_target = numpy.where(numpy.logical_and(
Dates >= int(first_date_string),
Dates <= int(last_date_string)
))[0]
input_target = target.take(good_indices_target)
target_1 = RenewDf(input_target)
netcdf_nam_file_names, netcdf_mur_file_names, target_2 = netcdf_names_check(image_dir_name, target_1)
target = RenewDf(target_2)
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT)
# NAM Data
file_date_strings = [_nc_file_name_to_date(f) for f in netcdf_nam_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices_nam = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
# MUR Data
file_date_strings_mur = [_nc_file_name_to_date(f) for f in netcdf_mur_file_names]
file_times_unix_sec_mur = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings_mur
], dtype=int)
good_indices_mur = numpy.where(numpy.logical_and(
file_times_unix_sec_mur >= first_time_unix_sec,
file_times_unix_sec_mur <= last_time_unix_sec
))[0]
return [netcdf_nam_file_names[k] for k in good_indices_nam], [netcdf_mur_file_names[k] for k in good_indices_mur], target
def find_nam_cubes_name_hourpredict(netcdf_file_names, hour_prediction_names = ['000', '006', '012', '024']):
"""Depend on the time prediction this function just select the name of selected maps:
for example in this case, the time prediction 000, 003 and 006 hour has been selected.
"""
file_date_strings = [_nc_file_name_to_hourprediction(f) for f in netcdf_file_names]
file_date_strings = pandas.DataFrame(file_date_strings, columns = ['str'])
good_indices = file_date_strings[
(file_date_strings['str'] == hour_prediction_names[0]) |
(file_date_strings['str'] == hour_prediction_names[1]) |
(file_date_strings['str'] == hour_prediction_names[2]) |
(file_date_strings['str'] == hour_prediction_names[3])]
return [netcdf_file_names[k] for k in list(good_indices.index)]
#============================================================================
#==================== Reading Nam and MUR maps =======================
#============================================================================
def read_nam_maps(netcdf_file_name, PREDICTOR_NAMES):
"""Reads fog-centered maps from NetCDF file.
E = number of examples (fog objects) in file
M = number of rows in each fog-centered grid
N = number of columns in each fog-centered grid
C = number of channels (predictor variables)
:param netcdf_file_name: Path to input file.
:return: image_dict: Dictionary with the following keys.
image_dict['predictor_names']: length-C list of predictor names.
image_dict['predictor_matrix']: E-by-M-by-N-by-C numpy array of predictor
values.
"""
NETCDF_PREDICTOR_NAMES = PREDICTOR_NAMES
dataset_object = netCDF4.Dataset(netcdf_file_name)
lons = numpy.array(dataset_object.variables[NETCDF_LONGITUDE][:], dtype=float)
lats = numpy.array(dataset_object.variables[NETCDF_LATITUDE][:], dtype=float)
predictor_matrix = None
for this_predictor_name in NETCDF_PREDICTOR_NAMES:
this_predictor_matrix = numpy.array(
dataset_object.variables[this_predictor_name][:], dtype=float
)
this_predictor_matrix = numpy.expand_dims(
this_predictor_matrix, axis=-1)
if predictor_matrix is None:
predictor_matrix = this_predictor_matrix + 0.
else:
predictor_matrix = numpy.concatenate(
(predictor_matrix, this_predictor_matrix), axis=-1
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: NETCDF_PREDICTOR_NAMES,
NETCDF_LONGITUDE: lons,
NETCDF_LATITUDE: lats}
def read_mur_map(netcdf_file_name, PREDICTOR_NAMES):
NETCDF_PREDICTOR_NAMES = PREDICTOR_NAMES
dataset_object = netCDF4.Dataset(netcdf_file_name)
lons = numpy.array(dataset_object.variables[MUR_LONGITUDE][:], dtype=float)
lats = numpy.array(dataset_object.variables[MUR_LATITUDE][:], dtype=float)
predictor_matrix = None
for this_predictor_name in NETCDF_PREDICTOR_NAMES:
this_predictor_matrix = numpy.array(
dataset_object.variables[this_predictor_name][:], dtype=float
)
this_predictor_matrix = numpy.expand_dims(
this_predictor_matrix, axis=-1)
if predictor_matrix is None:
predictor_matrix = this_predictor_matrix + 0.
else:
predictor_matrix = numpy.concatenate(
(predictor_matrix, this_predictor_matrix), axis=-1
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: NETCDF_PREDICTOR_NAMES,
MUR_LONGITUDE: lons,
MUR_LATITUDE: lats}
def read_many_nam_cube(netcdf_file_names, PREDICTOR_NAMES):
"""Reads storm-centered images from many NetCDF files.
:param netcdf_file_names: 1-D list of paths to input files.
:return: image_dict: See doc for `read_image_file`.
"""
image_dict = None
keys_to_concat = [PREDICTOR_MATRIX_KEY]
for this_file_name in netcdf_file_names:
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES)
if image_dict is None:
image_dict = copy.deepcopy(this_image_dict)
continue
for this_key in keys_to_concat:
image_dict[this_key] = numpy.concatenate(
(image_dict[this_key], this_image_dict[this_key]), axis=0
)
return image_dict
#======================================================================================================
#=========================== Concatenate the cubes in order to create Tensor: =========================
#======================================================================================================
# in this section there are two function just to generate three manually features include of
# [TMPsurface -SST, TMPsurface-DPT, DPT -SST]
def highres_features(nam_file_name, nam_feature_name, mur_file_name, mur_feature_name):
"""
to generate the features first it needs to upsample the nam features to the same resolution of SST maps
, then geerate the new feattures and make a cube form them.
"""
keys_to_concat = [PREDICTOR_MATRIX_KEY]
this_nam_file_name = nam_file_name
this_nam_dict = read_nam_maps(this_nam_file_name, nam_feature_name)
#print(this_nam_dict[PREDICTOR_MATRIX_KEY].shape)
this_nam_tmpsurf = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,0]
this_nam_tmp2m = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,1]
this_nam_dpt = this_nam_dict[PREDICTOR_MATRIX_KEY][0, :,:,2]
#print('BEFORE UPSAMPLING: ', this_nam_dpt.shape)
up_this_nam_tmpsurf= map_upsampling(this_nam_tmpsurf)
up_this_nam_tmp2m = map_upsampling(this_nam_tmp2m)
up_this_nam_dpt = map_upsampling(this_nam_dpt)
#print('AFTER UPSAMPLING: ', up_this_nam_dpt.shape)
this_mur_file_name = mur_file_name
this_mur_dict = read_mur_map(this_mur_file_name, mur_feature_name)
this_mur_file = this_mur_dict[PREDICTOR_MATRIX_KEY][0, :, :, 0]
#print('MUR SIZE: ', this_mur_file.shape)
# filling the mur map with tmp surface:
for l in range(len(this_mur_file)):
for w in range(len(this_mur_file)):
if this_mur_file[l, w] == -32768:
this_mur_file[l, w] = up_this_nam_tmpsurf[l, w]
NETCDF_SST = this_mur_file
NETCDF_TMPDPT = numpy.subtract(up_this_nam_tmp2m , up_this_nam_dpt)
NETCDF_TMPSST = numpy.subtract(up_this_nam_tmp2m , this_mur_file)
NETCDF_DPTSST = numpy.subtract(up_this_nam_dpt , this_mur_file)
# downsampling:
NETCDF_TMPDPT = map_downsampling(NETCDF_TMPDPT)
NETCDF_TMPSST = map_downsampling(NETCDF_TMPSST)
NETCDF_DPTSST = map_downsampling(NETCDF_DPTSST)
THIS_NETCDF_SST = numpy.expand_dims(NETCDF_SST, axis=-1)
THIS_NETCDF_TMPDPT = numpy.expand_dims(NETCDF_TMPDPT, axis=-1)
THIS_NETCDF_TMPSST = numpy.expand_dims(NETCDF_TMPSST, axis=-1)
THIS_NETCDF_DPTSST = numpy.expand_dims(NETCDF_DPTSST, axis=-1)
THIS_NETCDF_SST_CUBE = numpy.expand_dims(THIS_NETCDF_SST, axis=0)
THIS_NETCDF_MIXED_CUBE = numpy.concatenate((THIS_NETCDF_TMPDPT, THIS_NETCDF_TMPSST, THIS_NETCDF_DPTSST),
axis = -1)
THIS_NETCDF_MIXED_CUBE = numpy.expand_dims(THIS_NETCDF_MIXED_CUBE, axis=0)
return { SST_MATRIX_KEY: THIS_NETCDF_SST_CUBE,
PREDICTOR_MATRIX_KEY: THIS_NETCDF_MIXED_CUBE,
PREDICTOR_NAMES_KEY: NETCDF_MIXED_NAMES}
def SST_Upsampling(lowe_res_sst_cube):
length = lowe_res_sst_cube.shape[0]
high_res_sst_cube = None
for m in range(length):
low_res_map = lowe_res_sst_cube[m, :, : , 0]
this_high_res_map = scipy.ndimage.zoom(low_res_map, 1.021, order=3)
this_high_res_map = numpy.expand_dims(this_high_res_map, axis = 0)
if high_res_sst_cube is None:
high_res_sst_cube = this_high_res_map
else:
high_res_sst_cube = numpy.concatenate([high_res_sst_cube, this_high_res_map], axis = 0)
high_res_sst_cube = numpy.expand_dims(high_res_sst_cube, axis = -1)
return high_res_sst_cube
def SST_384_cubes(sst_376_cube_names, cubes_dir):
for key in sst_376_cube_names:
print(('Process for {0} just started!').format(key))
low_res_cube_name = sst_376_cube_names[key][0]
low_res_cube_name = cubes_dir + low_res_cube_name
low_res_cube = load_cube(low_res_cube_name)
high_res_cube = SST_Upsampling(low_res_cube)
print('The shape after upsampling: ', high_res_cube.shape)
sst_cube_name = 'NETCDF_SST_CUBE_' + key + '.npz'
sst_cube_path = os.path.join(SAVE_CUBE_DIR, sst_cube_name)
savez_compressed(sst_cube_path, high_res_cube)
def mixed_cubes(nam_file_names, nam_feature_name, mur_file_names, mur_feature_name):
length = len(nam_file_names)
NETCDF_MIXED_CUBES = None
NETCDF_SST_CUBES = None
for i in range(length):
this_nam_file_name = nam_file_names[i]
this_mur_file_name = mur_file_names[i]
this_highres_cube = highres_features(this_nam_file_name, nam_feature_name, this_mur_file_name, mur_feature_name)
if NETCDF_MIXED_CUBES is None:
NETCDF_MIXED_CUBES = this_highres_cube[PREDICTOR_MATRIX_KEY]
NETCDF_SST_CUBES = this_highres_cube[SST_MATRIX_KEY]
else:
NETCDF_MIXED_CUBES = numpy.concatenate((NETCDF_MIXED_CUBES, this_highres_cube[PREDICTOR_MATRIX_KEY]), axis = 0)
NETCDF_SST_CUBES = numpy.concatenate((NETCDF_SST_CUBES, this_highres_cube[SST_MATRIX_KEY]), axis = 0)
return {SST_MATRIX_KEY : NETCDF_SST_CUBES,
PREDICTOR_MATRIX_KEY: NETCDF_MIXED_CUBES,
PREDICTOR_NAMES_KEY: NETCDF_GEN_NAMES,
SST_NAME_KEY: NETCDF_MUR_NAMES
}
#======================================================================================================
#=========================== Concatenate the cubes in order to create Tensor: =========================
#======================================================================================================
def concate_nam_cubes_files(netcdf_file_names, PREDICTOR_NAMES):
"""
concatenate the input maps for each day based on lead time prediction.
for instance the lead time is 6 hours and there are 3 cubes per each time cycle include 000, 003 and 006
based on "find_nam_cubes_name_hourpredict" function their names are selected, and this function creates the cube
of all three time prediction using concatenation.
input: netcdf_file_names
output: 3D cube
"""
cube_tensor = None
cubes_dict = {}
cancat_cube = None
match_name = None
cubenumber = 0
Depth = 4*len(PREDICTOR_NAMES)
cube_names = netcdf_file_names[1]
for this_file in range(len(cube_names)):
this_cube_name = cube_names[this_file]
this_cube_name_details = find_match_name (this_cube_name)
this_cube_match_name = this_cube_name_details ['cube_match_name']
this_cube_date_name = this_cube_name_details ['date']
this_cube_timecycle_name = this_cube_name_details ['timecycle']
#print('Name this_cube_match_name before if: ', this_cube_match_name)
this_cube_tensor = netcdf_file_names[0][this_file]
#print('Size this_cube_tensor before if: ', this_cube_tensor['predictor_matrix'].shape)
if cube_tensor is None:
cube_tensor = this_cube_tensor
match_name = this_cube_match_name
#print('Name cube_match_name after if: ', cube_match_name)
#print('Size cube_tensor after if: ', cube_tensor['predictor_matrix'].shape)
#print(this_cube_match_name)
elif match_name == this_cube_match_name:
#print(True)
cube_tensor = numpy.concatenate((cube_tensor[PREDICTOR_MATRIX_KEY], this_cube_tensor[PREDICTOR_MATRIX_KEY]), axis=-1)
cube_tensor = {PREDICTOR_MATRIX_KEY: cube_tensor}
#print('New Size of cube: ', cube_tensor['predictor_matrix'].shape)
Depth_cube = cube_tensor[PREDICTOR_MATRIX_KEY].shape[3]
if Depth_cube == Depth:
cube_name = 'cube_' + this_cube_date_name + '_' + this_cube_timecycle_name +'_036' + '_input.nc'
cube_tensor = {PREDICTOR_MATRIX_KEY: cube_tensor[PREDICTOR_MATRIX_KEY],
PREDICTOR_NAMES_KEY: 4 * PREDICTOR_NAMES,
CUBE_NAMES_KEY: cube_name}
cubes_dict[cubenumber] = cube_tensor
cubenumber = cubenumber + 1
else:
match_name = this_cube_match_name
#print('Name cube_match_name after else: ', cube_match_name)
cube_tensor = this_cube_tensor
#print('Size cube_tensor after else: ', cube_tensor['predictor_matrix'].shape)
return cubes_dict
def nam_cubes_dict(netcdf_file_names):
cubes_dict = None
keys_to_concat = [PREDICTOR_MATRIX_KEY]
numerator = len(netcdf_file_names)
for this_file in range(numerator):
cube_tensor = netcdf_file_names[this_file]
#print('Size this_cube_tensor before if: ', this_cube_tensor['predictor_matrix'].shape
if cubes_dict is None:
cubes_dict = copy.deepcopy(cube_tensor)
else:
cubes_dict = numpy.concatenate(
(cubes_dict[PREDICTOR_MATRIX_KEY], cube_tensor[PREDICTOR_MATRIX_KEY]), axis=0
)
cubes_dict = {PREDICTOR_MATRIX_KEY: cubes_dict}
return cubes_dict
def concate_mixed_cubes_files(normalized_mur_cubes):
"""
Mixed_cube means three generated feature manually include TMPsurface-SST, DPT-SST and TMPsurface-DPT.
This function using the name of maps generate the cube with depth 3 of having three mentioned maps.
"""
cancat_mur_cube = None
length = len(normalized_mur_cubes)
i = 0
while i < length:
m00 = normalized_mur_cubes[i][PREDICTOR_MATRIX_KEY]
m06 = normalized_mur_cubes[i+1][PREDICTOR_MATRIX_KEY]
m09 = normalized_mur_cubes[i+2][PREDICTOR_MATRIX_KEY]
m12 = normalized_mur_cubes[i+3][PREDICTOR_MATRIX_KEY]
this_cube_timecycle = numpy.concatenate(
(m00, m06, m09, m12), axis=-1)
this_cube_timecycle = numpy.expand_dims(
this_cube_timecycle, axis=0)
if cancat_mur_cube is None:
cancat_mur_cube = copy.deepcopy(this_cube_timecycle)
else:
cancat_mur_cube = numpy.concatenate(
(cancat_mur_cube, this_cube_timecycle), axis=0
)
i+=4
return cancat_mur_cube
def concate_sst_cubes_files(normalized_mur_cubes):
cancat_sst_cube = None
length = len(normalized_mur_cubes)
i = 0
while i < length:
m06 = normalized_mur_cubes[i+2][SST_MATRIX_KEY]
this_cube_timecycle = numpy.expand_dims(
m06, axis=0)
if cancat_sst_cube is None:
cancat_sst_cube = copy.deepcopy(this_cube_timecycle)
else:
cancat_sst_cube = numpy.concatenate(
(cancat_sst_cube, this_cube_timecycle), axis=0
)
i+=4
return cancat_sst_cube
#============================================================================
#==================== Normalization Step =============================
#============================================================================
def _update_normalization_params(intermediate_normalization_dict, new_values):
"""Updates normalization params for one predictor.
:param intermediate_normalization_dict: Dictionary with the following keys.
intermediate_normalization_dict['num_values']: Number of values on which
current estimates are based.
intermediate_normalization_dict['mean_value']: Current estimate for mean.
intermediate_normalization_dict['mean_of_squares']: Current mean of squared
values.
:param new_values: numpy array of new values (will be used to update
`intermediate_normalization_dict`).
:return: intermediate_normalization_dict: Same as input but with updated
values.
"""
if MEAN_VALUE_KEY not in intermediate_normalization_dict:
intermediate_normalization_dict = {
NUM_VALUES_KEY: 0,
MEAN_VALUE_KEY: 0.,
MEAN_OF_SQUARES_KEY: 0.
}
these_means = numpy.array([
intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)
])
these_weights = numpy.array([
intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size
])
intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(
these_means, weights=these_weights)
these_means = numpy.array([
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],
numpy.mean(new_values ** 2)
])
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(
these_means, weights=these_weights)
intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size
return intermediate_normalization_dict
def _get_standard_deviation(intermediate_normalization_dict):
"""Computes stdev from intermediate normalization params.
:param intermediate_normalization_dict: See doc for
`_update_normalization_params`.
:return: standard_deviation: Standard deviation.
"""
num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])
multiplier = num_values / (num_values - 1)
return numpy.sqrt(multiplier * (
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -
intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2
))
def get_nam_normalization_params(netcdf_file_names, PREDICTOR_NAMES):
"""Computes normalization params (mean and stdev) for each predictor.
:param netcdf_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
for this_file_name in netcdf_file_names:
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES)
if predictor_names is None:
predictor_names = this_image_dict[PREDICTOR_NAMES_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor[m],
new_values=this_image_dict[PREDICTOR_MATRIX_KEY][..., m]
)
print('\n')
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
return normalization_dict
def normalize_sst_map(
predictor_matrix, predictor_names, normalization_dict=None):
normalization_dict = {}
this_mean = numpy.mean(predictor_matrix)
this_stdev = numpy.std(predictor_matrix, ddof=1)
normalization_dict = numpy.array(
[this_mean, this_stdev]
)
predictor_matrix = (
(predictor_matrix - this_mean) / float(this_stdev))
return {
SST_MATRIX_KEY: predictor_matrix,
SST_NAME_KEY: predictor_names
}
def get_sst_normalization_params(NETCDF_HIGHRES_CUBES, PREDICTOR_NAMES):
"""Computes normalization params (mean and stdev) for each predictor.
:param netcdf_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
length = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY].shape[0]
#print(length)
for i in range(length):
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY][i, :, :, :]
if predictor_names is None:
predictor_names = NETCDF_HIGHRES_CUBES[SST_NAME_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
norm_dict_by_predictor = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor,
new_values = this_image_dict
)
print('\n')
normalization_dict = {}
this_mean = norm_dict_by_predictor[MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor)
normalization_dict = numpy.array([this_mean, this_stdev])
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format('SST', this_mean, this_stdev)
print(message_string)
return normalization_dict
## this function work when we are using 4 high resolution maps cube!!!
def get_mixed_normalization_params(NETCDF_HIGHRES_CUBES, PREDICTOR_NAMES):
predictor_names = None
norm_dict_by_predictor = None
length = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY].shape[0]
#print(length)
for i in range(length):
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY][i, :, :, :]
#print(this_image_dict.shape)
if predictor_names is None:
predictor_names = NETCDF_HIGHRES_CUBES[PREDICTOR_NAMES_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
#print(len(predictor_names))
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict = norm_dict_by_predictor[m],
new_values=this_image_dict[..., m]
)
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
def normalize_nam_maps(
predictor_matrix, predictor_names, normalization_dict=None):
"""Normalizes images to z-scores.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param normalization_dict: Dictionary. Each key is the name of a predictor
value, and the corresponding value is a length-2 numpy array with
[mean, standard deviation]. If `normalization_dict is None`, mean and
standard deviation will be computed for each predictor.
:return: predictor_matrix: Normalized version of input.
:return: normalization_dict: See doc for input variable. If input was None,
this will be a newly created dictionary. Otherwise, this will be the
same dictionary passed as input.
"""
num_predictors = len(predictor_names)
if normalization_dict is None:
normalization_dict = {}
for m in range(num_predictors):
this_mean = numpy.mean(predictor_matrix[..., m])
this_stdev = numpy.std(predictor_matrix[..., m], ddof=1)
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
(predictor_matrix[..., m] - this_mean) / float(this_stdev)
)
return {
PREDICTOR_MATRIX_KEY: predictor_matrix,
PREDICTOR_NAMES_KEY: predictor_names
}
def denormalize_nam_maps(predictor_matrix, predictor_names, normalization_dict):
"""Denormalizes images from z-scores back to original scales.
:param predictor_matrix: See doc for `normalize_images`.
:param predictor_names: Same.
:param normalization_dict: Same.
:return: predictor_matrix: Denormalized version of input.
"""
num_predictors = len(predictor_names)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
this_mean + this_stdev * predictor_matrix[..., m]
)
return predictor_matrix
def normalize_many_cubes(netcdf_file_names, normalization_dict, predictor_names):
normmalized_cubes_dict = {}
for m in range(len(netcdf_file_names)):
this_cube = read_nam_maps(netcdf_file_names[m], predictor_names)
normmalized_cubes_dict[m] = normalize_nam_maps(
predictor_matrix = this_cube[PREDICTOR_MATRIX_KEY],
predictor_names = this_cube[PREDICTOR_NAMES_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict, netcdf_file_names
def normalize_mixed_cubes(NETCDF_HIGHRES_CUBES, normalization_dict):
normmalized_cubes_dict = {}
length = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY].shape[0]
for m in range(length):
this_cube = NETCDF_HIGHRES_CUBES[PREDICTOR_MATRIX_KEY][m, :, :, :]
normmalized_cubes_dict[m] = normalize_nam_maps(
predictor_matrix = this_cube,
predictor_names = NETCDF_HIGHRES_CUBES[PREDICTOR_NAMES_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict
def normalize_sst_cubes(NETCDF_HIGHRES_CUBES, normalization_dict):
normmalized_cubes_dict = {}
length = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY].shape[0]
for m in range(length):
this_cube = NETCDF_HIGHRES_CUBES[SST_MATRIX_KEY][m, :, :, :]
normmalized_cubes_dict[m] = normalize_sst_map(
predictor_matrix = this_cube,
predictor_names = NETCDF_HIGHRES_CUBES[SST_NAME_KEY],
normalization_dict = normalization_dict)
message_string = (
'The normalization of ' + 'netcdf_file_names is done!')
print(message_string)
return normmalized_cubes_dict
#===============================================================================
#============================ Target preparation ===============================
#===============================================================================
def plot_visibility_cases(data, year, margin):
vis = data['VIS_Cat'].value_counts()
nan = data['VIS_Cat'].isna().sum()
values = vis.values
fvalues = numpy.insert(values, 0, nan)
names = ["VIS_nan", "VIS > 4mi", "1mi< VIS<= 4mi", "VIS =<1mi"]
df = pandas.DataFrame(columns = ["VIS-Cat", "VIS_Count"])
df["VIS-Cat"] = names
df["VIS_Count"] = fvalues
# plot the count
fig, ax = pyplot.subplots(figsize = FIG_DEFULT_SIZE)
ax = sns.barplot(x = "VIS-Cat", y="VIS_Count", data=df,
palette="Blues_d")
xlocs, xlabs = pyplot.xticks()
pyplot.xlabel('Visibility class')
pyplot.ylabel('The number of cases')
txt = ('The number of visibility cases for {0}').format(year)
pyplot.title(txt)
for i, v in enumerate(df["VIS_Count"]):
pyplot.text(xlocs[i] , v + margin, str(v),
fontsize=12, color='red',
horizontalalignment='center', verticalalignment='center')
pyplot.show()
def reading_csv_target_file(csv_file_name):
data = pandas.read_csv(csv_file_name, header=0, sep=',')
for i in range(len(data)):
namesplit = os.path.split(data['Name'][i])[-1]
year = namesplit.replace(namesplit[4:], '')
month = namesplit.replace(namesplit[0:4], '').replace(namesplit[6:], '')
day = namesplit.replace(namesplit[0:6], '').replace(namesplit[8:], '')
timecycle = namesplit.replace(namesplit[0:9], '')
data['Year'] = year
data['Month']= month
data['Day']= day
data['TimeCycle'] = timecycle
return data
class targets():
def __init__(self, targets_file_names, training_years, validation_years, testing_years, DEFAULT_TARGET_DIR_NAME, priority_calss):
self.targets_file_names = targets_file_names
self.DEFAULT_TARGET_DIR_NAME = DEFAULT_TARGET_DIR_NAME
self.training_years = training_years
self.validation_years = validation_years
self.testing_years = testing_years
self.priority_calss = priority_calss
def multiclass_target(self):
training_targets = pandas.DataFrame()
validation_targets = pandas.DataFrame()
testing_targets = pandas.DataFrame()
TRAIN_FRAMES = []
for i in self.training_years:
year_name = self.targets_file_names[i]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
TRAIN_FRAMES.append(year_data)
training_targets = pandas.concat(TRAIN_FRAMES)
#print(training_targets.shape)
categorical_training_targets = to_categorical(training_targets)
#print(categorical_training_targets.shape)
VALID_FRAMES = []
for j in self.validation_years:
year_name = self.targets_file_names[j]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
VALID_FRAMES.append(year_data)
validation_targets = pandas.concat(VALID_FRAMES)
#print(validation_targets.shape)
categorical_validation_targets = to_categorical(validation_targets)
#print(categorical_validation_targets.shape)
TEST_FRAMES = []
for k in self.testing_years:
year_name = self.targets_file_names[k]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
TEST_FRAMES.append(year_data)
testing_targets = pandas.concat(TEST_FRAMES)
#print(testing_targets.shape)
categorical_testing_targets = to_categorical(testing_targets)
#print(categorical_testing_targets.shape)
return [training_targets, categorical_training_targets,
validation_targets, categorical_validation_targets,
testing_targets, categorical_testing_targets]
def binary_target(self):
training_targets = pandas.DataFrame()
validation_targets = pandas.DataFrame()
testing_targets = pandas.DataFrame()
TRAIN_FRAMES = []
for i in self.training_years:
year_name = self.targets_file_names[i]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = pandas.read_csv(file_name, header=0, sep=',')
year_data = year_data['VIS_Cat']
TRAIN_FRAMES.append(year_data)
training_targets = pandas.concat(TRAIN_FRAMES)
#print(training_targets.shape)
VALID_FRAMES = []
for j in self.validation_years:
year_name = self.targets_file_names[j]
file_name = self.DEFAULT_TARGET_DIR_NAME + year_name
year_data = | pandas.read_csv(file_name, header=0, sep=',') | pandas.read_csv |
from Data import Data
import itertools
import joblib
import numpy as np
import pandas as pd
import pickle
import re
import statsmodels.api as sm
import sys
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from sklearn.preprocessing import OneHotEncoder
import os
class Predict(object):
"""This class makes predictions of house prices"""
def __init__(self, features = ['tfarea', 'numberrooms', 'propertytype', 'oldnew'], LR=True,
RF=False, GB=False, test_prop=0.2, regions=[], seed = 1704, outcome = ['ln_y'],
hot_load_models=True, save_models=True, model_append = [''], output_geo=True,
merge_lsoa=False, gb_model = GradientBoostingRegressor()):
self.model_dir = os.path.join(os.path.abspath(''), 'models')
self.data_dir = os.path.join(os.path.abspath(''), 'data')
self.features = features
self.LR = LR
self.RF = RF
self.GB = GB
self.gb_model = gb_model
self.test_prop = test_prop
self.regions = regions
self.seed = seed
self.outcome = outcome
self.hot_load_models = hot_load_models
self.save_models = save_models
self.merge_lsoa = merge_lsoa
self.model_append = model_append
self.feature_acronyms = [i[0:3] for i in self.features]
if self.model_append == ['']:
self.model_append = '_'.join(self.regions + self.feature_acronyms)
else:
self.model_append = '_'.join(self.regions + self.feature_acronyms + self.model_append)
self.output_geo = output_geo
self.data = Data(regions=self.regions, merge_lsoa = self.merge_lsoa).data
self.generate_outcome()
self.generate_features()
self.train_test_split()
self.estimate_model(LR = self.LR, RF = self.RF, GB = self.GB)
self.oos_r2()
if self.output_geo:
self.output_geo_df()
def train_test_split(self):
self.X_train, self.X_test, self.y_train, self.y_test =\
train_test_split(self.data[self.features], self.data['outcome'],
test_size = self.test_prop, random_state = self.seed)
print("Training set dimensions: {}".format(self.X_train.shape))
def generate_outcome(self):
self.data['y'] = self.data['price']
self.data['ln_y'] = self.data['price'].apply(np.log)
self.data['rel_y'] = self.data['priceper']
self.data['outcome'] = self.data[self.outcome]
def generate_features(self):
""" Generate features to include into the predictions"""
# identify categorical versus continuous features
self.cat_features =\
list(itertools.compress(self.features, [i == 'object' for i in self.data[self.features].dtypes]))
self.other_features=\
list(itertools.compress(self.features, [i != 'object' for i in self.data[self.features].dtypes]))
print("Categorical features identified: {}".format(self.cat_features))
print("Continous features identified: {}".format(self.other_features))
# one-hot encode all categorical observations
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(self.data[self.cat_features])
self.data[enc.get_feature_names(self.cat_features)] = enc.\
transform(self.data[self.cat_features]).toarray()
# new features
self.features = list(itertools.chain(*[self.other_features,
list(enc.get_feature_names(self.cat_features))]))
def estimate_model(self, LR, RF, GB):
if LR:
self.lr()
else:
self.lr_predictions = np.nan
if RF:
self.rf()
else:
self.rf_predictions = np.nan
if GB:
self.gb(gb_model = self.gb_model)
else:
self.gb_predictions = np.nan
def output_geo_df(self):
assert pd.Series(self.X_test.index).isin(pd.Series(self.data.index)).mean() == 1
assert | pd.Series(self.y_test.index) | pandas.Series |
import numpy as np
import pytest
from anndata import AnnData
from pandas import DataFrame
from pandas.testing import assert_frame_equal
from ehrapy.api.anndata_ext import ObsEmptyError, anndata_to_df, df_to_anndata
class TestAnndataExt:
def test_df_to_anndata_simple(self):
df, col1_val, col2_val, col3_val = TestAnndataExt._setup_df_to_anndata()
expected_x = np.array([col1_val, col2_val, col3_val], dtype="object").transpose()
adata = df_to_anndata(df)
assert adata.X.dtype == "object"
assert adata.X.shape == (100, 3)
np.testing.assert_array_equal(adata.X, expected_x)
def test_df_to_anndata_index_column(self):
df, col1_val, col2_val, col3_val = TestAnndataExt._setup_df_to_anndata()
expected_x = np.array([col2_val, col3_val], dtype="object").transpose()
adata = df_to_anndata(df, index_column="col1")
assert adata.X.dtype == "object"
assert adata.X.shape == (100, 2)
np.testing.assert_array_equal(adata.X, expected_x)
assert list(adata.obs.index) == col1_val
def test_df_to_anndata_cols_obs_only(self):
df, col1_val, col2_val, col3_val = TestAnndataExt._setup_df_to_anndata()
adata = df_to_anndata(df, columns_obs_only=["col1", "col2"])
assert adata.X.dtype == "float32"
assert adata.X.shape == (100, 1)
assert_frame_equal(
adata.obs, DataFrame({"col1": col1_val, "col2": col2_val}, index=[str(idx) for idx in range(100)])
)
def test_df_to_anndata_all_num(self):
test_array = np.random.randint(0, 100, (4, 5))
df = DataFrame(test_array, columns=["col" + str(idx) for idx in range(5)])
adata = df_to_anndata(df)
assert adata.X.dtype == "float32"
np.testing.assert_array_equal(test_array, adata.X)
def test_anndata_to_df_simple(self):
col1_val, col2_val, col3_val = TestAnndataExt._setup_anndata_to_df()
expected_df = DataFrame({"col1": col1_val, "col2": col2_val, "col3": col3_val}, dtype="object")
adata_x = np.array([col1_val, col2_val, col3_val], dtype="object").transpose()
adata = AnnData(
X=adata_x,
obs=DataFrame(index=[idx for idx in range(100)]),
var=DataFrame(index=["col" + str(idx) for idx in range(1, 4)]),
dtype="object",
)
anndata_df = anndata_to_df(adata)
assert_frame_equal(anndata_df, expected_df)
def test_anndata_to_df_all_from_obs(self):
col1_val, col2_val, col3_val = TestAnndataExt._setup_anndata_to_df()
expected_df = | DataFrame({"col1": col1_val, "col2": col2_val, "col3": col3_val}) | pandas.DataFrame |
import pandas as pd
from xbbg import blp, pipeline, const
class BloombergData:
def get_hack_data():
df_IBOV = blp.bdh('BOVV11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
df_SP = blp.bdh('SPXI11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
df_IMAB = blp.bdh('IMAB11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
df_IRFM = blp.bdh('IRFM11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
df_IBOV.index = pd.to_datetime(df_IBOV.index)
df_SP.index = | pd.to_datetime(df_SP.index) | pandas.to_datetime |
from typing import List, Tuple
import numpy
import pandas
import pytest
from openff.units import unit
from pydantic import ValidationError
from openff.evaluator.datasets import (
MeasurementSource,
PhysicalPropertyDataSet,
PropertyPhase,
)
from openff.evaluator.datasets.curation.components.filtering import (
FilterByCharged,
FilterByChargedSchema,
FilterByElements,
FilterByElementsSchema,
FilterByEnvironments,
FilterByEnvironmentsSchema,
FilterByIonicLiquid,
FilterByIonicLiquidSchema,
FilterByMoleFraction,
FilterByMoleFractionSchema,
FilterByNComponents,
FilterByNComponentsSchema,
FilterByPressure,
FilterByPressureSchema,
FilterByPropertyTypes,
FilterByPropertyTypesSchema,
FilterByRacemic,
FilterByRacemicSchema,
FilterBySmiles,
FilterBySmilesSchema,
FilterBySmirks,
FilterBySmirksSchema,
FilterByStereochemistry,
FilterByStereochemistrySchema,
FilterBySubstances,
FilterBySubstancesSchema,
FilterByTemperature,
FilterByTemperatureSchema,
FilterDuplicates,
FilterDuplicatesSchema,
)
from openff.evaluator.datasets.utilities import data_frame_to_substances
from openff.evaluator.properties import Density, EnthalpyOfMixing
from openff.evaluator.substances import Component, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.checkmol import ChemicalEnvironment
def _build_entry(*smiles: str) -> Density:
"""Builds a density data entry measured at ambient conditions
and for a system containing the specified smiles patterns in
equal amounts.
Parameters
----------
smiles
The smiles to build components for.
Returns
-------
The built components.
"""
assert len(smiles) > 0
return Density(
thermodynamic_state=ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components(*smiles),
)
def _build_data_frame(
property_types: List[str],
substance_entries: List[Tuple[Tuple[str, ...], Tuple[bool, ...]]],
) -> pandas.DataFrame:
data_rows = []
for substance, include_properties in substance_entries:
for property_type, include_property in zip(property_types, include_properties):
if not include_property:
continue
data_row = {
"N Components": len(substance),
f"{property_type} Value (unit)": 1.0,
}
for index, component in enumerate(substance):
data_row[f"Component {index + 1}"] = component
data_rows.append(data_row)
data_frame = | pandas.DataFrame(data_rows) | pandas.DataFrame |
"""apiclient.py"""
from json.decoder import JSONDecodeError
import sys
from enum import Enum
from datetime import datetime
from datetime import timedelta
from re import compile as re_compile
import pandas as pd
import numpy as np
from requests import get as requests_get
from requests import ConnectionError as requests_ConnectionError
from requests import Timeout as requests_Timeout
from requests.exceptions import HTTPError as requests_HTTPError
from rich.console import Console
from rich.progress import track
# minimal traceback
sys.tracebacklimit = 1
class Interval(Enum):
"""Enum: infraday"""
MINUTE = "1m"
FIVEMINUTES = "5m"
HOUR = "1h"
ONEDAY = "1d"
class DateUtils:
"""Utility class"""
@staticmethod
def str2datetime(_datetime: str):
"""Convert yyyy-mm-dd hh:mm:ss to datetime"""
# Validate string datetime
prog = re_compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")
if not prog.match(_datetime):
raise ValueError("Incorrect datetime format: yyyy-mm-dd hh:mm:ss")
return datetime.strptime(_datetime, "%Y-%m-%d %H:%M:%S")
@staticmethod
def str2epoch(_datetime: str):
"""Convert yyyy-mm-dd hh:mm:ss to datetime"""
# Validate string datetime
prog = re_compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$")
if not prog.match(_datetime):
raise ValueError("Incorrect datetime format: yyyy-mm-dd hh:mm:ss")
return int(datetime.strptime(_datetime, "%Y-%m-%d %H:%M:%S").timestamp())
@staticmethod
def previous_day_last_second():
"""Returns the last second of the previous day"""
yesterday = datetime.today() - timedelta(days=1)
return str(yesterday.date()) + " 23:59:59"
@staticmethod
def previous_day_last_minute():
"""Returns the last minute of the previous day"""
yesterday = datetime.today() - timedelta(days=1)
return str(yesterday.date()) + " 23:59:00"
class APIClient:
"""API class"""
def __init__(self, api_key: str) -> None:
# Validate API key
prog = re_compile(r"^[A-z0-9.]{16,32}$")
if not prog.match(api_key):
raise ValueError("API key is invalid")
self._api_key = api_key
self._api_url = "https://eodhistoricaldata.com/api"
self.console = Console()
def _rest_get(
self, endpoint: str = "", uri: str = "", querystring: str = ""
) -> | pd.DataFrame() | pandas.DataFrame |
# AutoEncoders
# Importing the libraries
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
#Configs
train_file = '../datasets/Escamb_wtdata_train_alarm_86400.csv.gz'
exclude_columns = ['alarm_block_code', 'alarm_all', 'alarm_all_block_code', 'ot', 'ot_block_code', 'ot_all', 'ot_all_block_code']
include_columns = ['VelViento_avg','Pot_avg','VelRotor_avg','TempAceiteMultip_avg','TempAmb_avg','TempRodamMultip_avg'] #Escamb multi
target_name = 'alarm'
datetime_name = 'date_time'
train_per = 80
# Importing the dataset
#movies = pd.read_csv('ml-1m/movies.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
#users = pd.read_csv('ml-1m/users.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
#ratings = pd.read_csv('ml-1m/ratings.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')
# Preparing the training set and the test set
wtdata_train = | pd.read_csv(train_file,sep=',', compression='gzip',parse_dates=[datetime_name]) | pandas.read_csv |
from sqlalchemy import func
import pandas as pd
import numpy as np
from cswd.sql.base import session_scope
from cswd.sql.models import (Issue, StockDaily, Adjustment, DealDetail,
SpecialTreatment, SpecialTreatmentType)
DAILY_COLS = ['symbol', 'date',
'open', 'high', 'low', 'close',
'prev_close', 'change_pct',
'volume', 'amount', 'turnover', 'cmv', 'tmv']
OHLCV_COLS = ['open', 'high', 'low', 'close', 'volume']
MINUTELY_COLS = ['symbol', 'date'] + OHLCV_COLS
ADJUSTMENT_COLS = ['symbol', 'date', 'amount', 'ratio',
'record_date', 'pay_date', 'listing_date']
def get_exchange(code):
if code[0] in ('0', '3'):
return "SZSE"
else:
return "SSE"
def get_start_dates():
"""
股票上市日期
Examples
--------
>>> df = get_start_dates()
>>> df.head()
symbol start_date
0 000001 1991-04-03
1 000002 1991-01-29
2 000003 1991-01-14
3 000004 1991-01-14
4 000005 1990-12-10
"""
col_names = ['symbol', 'start_date']
with session_scope() as sess:
query = sess.query(
Issue.code,
Issue.A004_上市日期
).filter(
Issue.A004_上市日期.isnot(None)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def get_end_dates():
"""
股票结束日期。限定退市或者当前处于暂停上市状态的股票
Examples
--------
>>> df = get_end_dates()
>>> df.head()
symbol end_date
0 000003 2002-06-14
1 000013 2004-09-20
2 000015 2001-10-22
3 000024 2015-12-30
4 000033 2017-07-07
"""
col_names = ['symbol', 'end_date']
with session_scope() as sess:
query = sess.query(
SpecialTreatment.code,
func.max(SpecialTreatment.date)
).group_by(
SpecialTreatment.code
).having(
SpecialTreatment.treatment.in_(
[SpecialTreatmentType.delisting, SpecialTreatmentType.PT]
)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def get_latest_short_name():
"""
获取股票最新股票简称
Examples
--------
>>> df = get_end_dates()
>>> df.head()
symbol asset_name
0 000001 平安银行
1 000002 万 科A
2 000003 PT金田A
3 000004 国农科技
4 000005 世纪星源
"""
col_names = ['symbol', 'asset_name']
with session_scope() as sess:
query = sess.query(
StockDaily.code,
StockDaily.A001_名称
).group_by(
StockDaily.code
).having(
func.max(StockDaily.date)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def gen_asset_metadata(only_in=True):
"""
生成股票元数据
Paras
-----
only_in : bool
是否仅仅包含当前在市的股票,默认为真。
Examples
--------
>>> df = gen_asset_metadata()
>>> df.head()
symbol asset_name first_traded last_traded exchange auto_close_date \
0 000001 平安银行 1991-01-02 2018-04-19 SZSE 2018-04-20
1 000002 万 科A 1991-01-02 2018-04-19 SZSE 2018-04-20
2 000004 国农科技 1991-01-02 2018-04-19 SZSE 2018-04-20
3 000005 世纪星源 1991-01-02 2018-04-19 SZSE 2018-04-20
4 000006 深振业A 1992-04-27 2018-04-19 SZSE 2018-04-20
start_date end_date
0 1991-04-03 2018-04-19
1 1991-01-29 2018-04-19
2 1991-01-14 2018-04-19
3 1990-12-10 2018-04-19
4 1992-04-27 2018-04-19
"""
columns = ['symbol', 'first_traded', 'last_traded']
with session_scope() as sess:
query = sess.query(
StockDaily.code,
func.min(StockDaily.date),
func.max(StockDaily.date)
).filter(
~StockDaily.code.startswith('2')
).filter(
~StockDaily.code.startswith('9')
).group_by(
StockDaily.code
)
df = pd.DataFrame.from_records(query.all())
df.columns = columns
df['exchange'] = df['symbol'].map(get_exchange)
df['auto_close_date'] = df['last_traded'].map(
lambda x: x + pd.Timedelta(days=1))
latest_name = get_latest_short_name()
start_dates = get_start_dates()
end_dates = get_end_dates()
df = df.merge(
latest_name, 'left', on='symbol'
).merge(
start_dates, 'left', on='symbol'
).merge(
end_dates, 'left', on='symbol'
)
# 对于未退市的结束日期,以最后交易日期代替
df.loc[df.end_date.isna(), 'end_date'] = df.loc[df.end_date.isna(),
'last_traded']
if only_in:
df = df[~df.symbol.isin(end_dates.symbol)]
df.reset_index(inplace=True, drop=True)
return df
def _fill_zero(df):
"""填充因为停牌ohlc可能存在的0值"""
# 将close放在第一列
ohlc_cols = ['close', 'open', 'high', 'low']
ohlc = df[ohlc_cols].copy()
ohlc.replace(0.0, np.nan, inplace=True)
ohlc.close.fillna(method='ffill', inplace=True)
# 按列填充
ohlc.fillna(method='ffill', axis=1, inplace=True)
for col in ohlc_cols:
df[col] = ohlc[col]
return df
def fetch_single_equity(stock_code, start, end):
"""
从本地数据库读取股票期间日线交易数据
注
--
1. 除OHLCV外,还包括涨跌幅、成交额、换手率、流通市值、总市值、流通股本、总股本
2. 使用bcolz格式写入时,由于涨跌幅存在负数,必须剔除该列!!!
Parameters
----------
stock_code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> symbol = '000333'
>>> start_date = '2017-4-1'
>>> end_date = pd.Timestamp('2018-4-16')
>>> df = fetch_single_equity(symbol, start_date, end_date)
>>> df.iloc[:,:8]
symbol date open high low close prev_close change_pct
0 000333 2018-04-02 53.30 55.00 52.68 52.84 54.53 -3.0992
1 000333 2018-04-03 52.69 53.63 52.18 52.52 52.84 -0.6056
2 000333 2018-04-04 52.82 54.10 52.06 53.01 52.52 0.9330
3 000333 2018-04-09 52.91 53.31 51.00 51.30 53.01 -3.2258
4 000333 2018-04-10 51.45 52.80 51.18 52.77 51.30 2.8655
5 000333 2018-04-11 52.78 53.63 52.41 52.98 52.77 0.3980
6 000333 2018-04-12 52.91 52.94 51.84 51.87 52.98 -2.0951
7 000333 2018-04-13 52.40 52.47 51.01 51.32 51.87 -1.0603
8 000333 2018-04-16 51.31 51.80 49.15 49.79 51.32 -2.9813
"""
start = pd.Timestamp(start).date()
end = pd.Timestamp(end).date()
with session_scope() as sess:
query = sess.query(
StockDaily.code,
StockDaily.date,
StockDaily.A002_开盘价,
StockDaily.A003_最高价,
StockDaily.A004_最低价,
StockDaily.A005_收盘价,
StockDaily.A009_前收盘,
StockDaily.A011_涨跌幅,
StockDaily.A006_成交量,
StockDaily.A007_成交金额,
StockDaily.A008_换手率,
StockDaily.A013_流通市值,
StockDaily.A012_总市值
).filter(
StockDaily.code == stock_code,
StockDaily.date.between(start, end)
)
df = pd.DataFrame.from_records(query.all())
df.columns = DAILY_COLS
df = _fill_zero(df)
df['circulating_share'] = df.cmv / df.close
df['total_share'] = df.tmv / df.close
return df
def _handle_minutely_data(df, exclude_lunch):
"""
完成单个日期股票分钟级别数据处理
"""
dts = pd.to_datetime(df[1].map(str) + ' ' + df[2])
ohlcv = pd.Series(data=df[3].values, index=dts).resample('T').ohlc()
ohlcv.fillna(method='ffill', inplace=True)
# 成交量原始数据单位为手,换为股
volumes = pd.Series(data=df[4].values, index=dts).resample('T').sum() * 100
ohlcv.insert(4, 'volume', volumes)
if exclude_lunch:
# 默认包含上下界
# 与交易日历保持一致,自31分开始
pre = ohlcv.between_time('9:25', '9:31')
def key(x): return x.date()
grouped = pre.groupby(key)
opens = grouped['open'].first()
highs = grouped['high'].max()
lows = grouped['low'].min() # 考虑是否存在零值?
closes = grouped['close'].last()
volumes = grouped['volume'].sum()
index = pd.to_datetime([str(x) + ' 9:31' for x in opens.index])
add = pd.DataFrame({'open': opens.values,
'high': highs.values,
'low': lows.values,
'close': closes.values,
'volume': volumes.values
},
index=index)
am = ohlcv.between_time('9:32', '11:30')
pm = ohlcv.between_time('13:00', '15:00')
return pd.concat([add, am, pm])
else:
return ohlcv
def fetch_single_minutely_equity(stock_code, start, end, exclude_lunch=True):
"""
从本地数据库读取单个股票期间分钟级别交易明细数据
注
--
1. 仅包含OHLCV列
2. 原始数据按分钟进行汇总,first(open),last(close),max(high),min(low),sum(volume)
Parameters
----------
stock_code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
exclude_lunch : bool
是否排除午休时间,默认”是“
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> symbol = '000333'
>>> start_date = '2018-4-1'
>>> end_date = pd.Timestamp('2018-4-19')
>>> df = fetch_single_minutely_equity(symbol, start_date, end_date)
>>> df.tail()
close high low open volume
2018-04-19 14:56:00 51.55 51.56 51.50 51.55 376400
2018-04-19 14:57:00 51.55 51.55 51.55 51.55 20000
2018-04-19 14:58:00 51.55 51.55 51.55 51.55 0
2018-04-19 14:59:00 51.55 51.55 51.55 51.55 0
2018-04-19 15:00:00 51.57 51.57 51.57 51.57 353900
"""
start = | pd.Timestamp(start) | pandas.Timestamp |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
This script computes features on the GEFCom2017_D dataset. It is
parameterized so that a selected set of features specified by a feature
configuration list are computed and saved as csv files.
"""
import os
from math import ceil
import pandas as pd
from functools import reduce
from sklearn.pipeline import Pipeline
from fclib.feature_engineering.lag import (
SameWeekOfYearLagFeaturizer,
SameDayOfYearLagFeaturizer,
)
from fclib.feature_engineering.temporal import (
TemporalFeaturizer,
DayTypeFeaturizer,
AnnualFourierFeaturizer,
DailyFourierFeaturizer,
WeeklyFourierFeaturizer,
)
from fclib.feature_engineering.rolling_window import SameDayOfWeekRollingWindowFeaturizer
from fclib.feature_engineering.normalization import (
YearNormalizer,
DateNormalizer,
DateHourNormalizer,
)
from fclib.dataset.energy.benchmark_paths import DATA_DIR
print("Data directory used: {}".format(DATA_DIR))
pd.set_option("display.max_columns", None)
OUTPUT_DIR = os.path.join(DATA_DIR, "features")
TRAIN_DATA_DIR = os.path.join(DATA_DIR, "train")
TEST_DATA_DIR = os.path.join(DATA_DIR, "test")
TRAIN_BASE_FILE = "train_base.csv"
TRAIN_FILE_PREFIX = "train_round_"
TEST_FILE_PREFIX = "test_round_"
NUM_ROUND = 6
# A dictionary mapping each feature name to the featurizer for computing the
# feature
FEATURE_MAP = {
"temporal": TemporalFeaturizer,
"annual_fourier": AnnualFourierFeaturizer,
"weekly_fourier": WeeklyFourierFeaturizer,
"daily_fourier": DailyFourierFeaturizer,
"normalized_date": DateNormalizer,
"normalized_datehour": DateHourNormalizer,
"normalized_year": YearNormalizer,
"day_type": DayTypeFeaturizer,
"recent_load_lag": SameDayOfWeekRollingWindowFeaturizer,
"recent_temp_lag": SameDayOfWeekRollingWindowFeaturizer,
"previous_year_load_lag": SameWeekOfYearLagFeaturizer,
"previous_year_temp_lag": SameDayOfYearLagFeaturizer,
}
# List of features that requires the training data when computing them on the
# testing data
FEATURES_REQUIRE_TRAINING_DATA = [
"recent_load_lag",
"recent_temp_lag",
"previous_year_load_lag",
"previous_year_temp_lag",
]
# List of features that requires the max_horizon argument to be set
FEATURES_REQUIRE_MAX_HORIZON = [
"recent_load_lag",
"recent_temp_lag",
"previous_year_load_lag",
"previous_year_temp_lag",
]
# Configuration for computing a scaling factor that captures year over year
# trend. These scaling factors can be used to scale forecasting results if no
# features for capturing the year over year trend are included in the model.
# To compute the load ratios, first, SameDayOfWeekRollingWindowFeaturizer is
# used to compute moving average of the DEMAND of the same hour of day and same
# day of week of seven four-week windows. There is a 10 week gap between the
# latest four-week window and the current week, because of the forecasting
# horizon of this problem.
# Second SameWeekOfYearLagFeaturizer is used to compute the moving average
# features of the same week of year of previous 5 years.
# Finally, the load ratios are computed by dividing the moving average DEMAND
# of previous years by the moving average DEMAND of the current year. The
# idea is that there is a correlation between the DEMAND between the
# current time point and earlier time point, and the ratio between the DEMAND
# of earlier time point of previous years and the current year can be used to
# scale the forecasting results of the current year.
LOAD_RATIO_CONFIG = {
"same_day_of_week_rolling_args": {
"window_size": 4,
"start_week": 10,
"agg_count": 7,
"output_col_suffix": "recent_moving_average",
"round_agg_result": True,
},
"same_week_of_year_lag_args": {
"n_years": 5,
"week_window": 0,
"output_col_suffix": "lag",
"round_agg_result": True,
},
}
def parse_feature_config(feature_config, feature_map):
"""
A helper function parsing a feature_config to feature name,
featurizer class, and arguments to use to initialize the featurizer.
"""
feature_name = feature_config[0]
feature_args = feature_config[1]
featurizer = feature_map[feature_name]
return feature_name, feature_args, featurizer
def compute_training_features(train_df, df_config, feature_config_list, feature_map, max_horizon):
"""
Creates a pipeline based on the input feature configuration list and the
feature_map. Fit the pipeline on the training data and transform
the training data.
Args:
train_df(pd.DataFrame): Training data to fit on and transform.
df_config(dict): Configuration of the time series data frame to compute
features on.
feature_config_list(list of tuples): The first element of each
feature configuration tuple is the name of the feature,
which must be a key in feature_map. The second element of each
feature configuration tuple is a dictionary of arguments to pass
to the featurizer corresponding the feature name in feature_map.
feature_map(dict): Maps each feature name (key) to corresponding
featurizer(value).
max_horizon(int): Maximum number of steps ahead to forecast.
The step unit is the frequency of the data.
This value is needed to prevent creating features on the
training data that are not available for the testing data. For
example, the features and models are created on week 7 to
forecast week 8 to week 10. It would not make sense to create a
feature using data from week 8 and week 9, because they are not
available at the forecast creation time. Thus, it does not make
sense to create a feature using data from week 5 and week 6 for
week 7.
Returns:
(pd.DataFrame, sklearn.pipeline): (training features, feature
engineering pipeline fitted on the training data.
"""
pipeline_steps = []
for feature_config in feature_config_list:
feature_name, feature_args, featurizer = parse_feature_config(feature_config, feature_map)
if feature_name in FEATURES_REQUIRE_MAX_HORIZON:
feature_args["max_horizon"] = max_horizon
pipeline_steps.append((feature_name, featurizer(df_config=df_config, **feature_args)))
feature_engineering_pipeline = Pipeline(pipeline_steps)
feature_engineering_pipeline_fitted = feature_engineering_pipeline.fit(train_df)
train_features = feature_engineering_pipeline_fitted.transform(train_df)
return train_features, feature_engineering_pipeline_fitted
def compute_testing_features(
test_df, feature_engineering_pipeline, feature_config_list=None, train_df=None,
):
"""
Computes features on the testing data using a fitted feature engineering
pipeline.
Args:
test_df(pd.DataFrame): Testing data to fit on and transform.
feature_engineering_pipeline(sklearn.pipeline): A feature engineering
pipeline fitted on the training data.
feature_config_list(list of tuples, optional): The first element of
each feature configuration tuple is the name of the feature,
which must be a key in feature_map. The second element of each
feature configuration tuple is a dictionary of arguments to pass
to the featurizer corresponding the feature name in feature_map.
A value is required if train_df is not None.
train_df(pd.DataFrame, optional): Training data needed to compute
some lag features on testing data.
Returns:
pd.DataFrame: Testing features.
"""
if train_df is not None and feature_config_list is not None:
train_df_arguments = {}
for feature_config in feature_config_list:
feature_step_name = feature_config[0]
if feature_step_name in FEATURES_REQUIRE_TRAINING_DATA:
train_df_arguments[feature_step_name + "__train_df"] = train_df
if len(train_df_arguments) > 0:
feature_engineering_pipeline.set_params(**train_df_arguments)
test_features = feature_engineering_pipeline.transform(test_df)
return test_features
def compute_features_one_round(
train_base_df,
train_delta_df,
test_df,
df_config,
feature_config_list,
feature_map,
filter_by_month,
compute_load_ratio=False,
):
"""
Computes features on one round of training and testing data.
Args:
train_base_df(pd.DataFrame): Training data common to all rounds.
train_delta_df(pd.DataFrame): Additional training data for the
current round.
test_df(pd.DataFrame): Testing data of the current round.
df_config: Configuration of the input dataframes.
feature_config_list(list of tuples, optional): The first element of
each feature configuration tuple is the name of the feature,
which must be a key in feature_map. The second element of each
feature configuration tuple is a dictionary of arguments to pass
to the featurizer corresponding the feature name in feature_map.
feature_map(dict): Maps each feature name (key) to corresponding
featurizer(value).
filter_by_month(bool): If filter the training data by the month of
the testing data.
compute_load_ratio(bool): If computes a scaling factor that capture
the year over year trend and can be used to scale the forecasting
result. If True, load ratios are computed on the testing data
according to the LOAD_RATIO_CONFIG.
Returns:
(pd.DataFrame, pd.DataFrame): (training features, testing features)
"""
train_round_df = | pd.concat([train_base_df, train_delta_df]) | pandas.concat |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse( | lib.isneginf_scalar(np.inf) | pandas.lib.isneginf_scalar |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[ | DataFrame(np.nan, index=df.index, columns=[0]) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = | pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")]) | pandas.MultiIndex.from_tuples |
"""Test attributing simple impact."""
import numpy as np
import pandas as pd
import pytest
from nbaspa.data.endpoints.pbp import EventTypes
from nbaspa.player_rating.tasks import SimplePlayerImpact
@pytest.mark.parametrize(
"evt",
[
EventTypes.REBOUND,
EventTypes.FREE_THROW,
EventTypes.VIOLATION,
EventTypes.FIELD_GOAL_MISSED
]
)
def test_basic_impact(evt):
"""Test attributing simple impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": evt,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_foul_impact():
"""Test attributing foul impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.FOUL,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": [456, 123],
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([-0.1, 0.1]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_deadball_impact():
"""Test attributing deadball turnover impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.TURNOVER,
"NBA_WIN_PROB_CHANGE": 0.1,
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.0, 0.0]))
def test_steal_impact():
"""Test attributing steal impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.TURNOVER,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["STL", None],
"VISITORDESCRIPTION": [None, "STL"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": [456, 123],
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([-0.1, 0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.1, -0.1]))
def test_block_impact():
"""Test attributing block impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.FIELD_GOAL_MISSED,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["BLK", None],
"VISITORDESCRIPTION": [None, "BLK"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": [456, 123],
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals(pd.Series([0.1, -0.1]))
def test_uast():
"""Test attributing unassisted field goals."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": EventTypes.FIELD_GOAL_MADE,
"NBA_WIN_PROB_CHANGE": [0.1, -0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, 0.1]))
assert output["PLAYER2_IMPACT"].equals( | pd.Series([0.0, 0.0]) | pandas.Series |
'''
Dataset related tests
'''
__author__ = '<NAME>'
import unittest
import numpy as np
import pandas as pd
import itertools
from pandas.testing import assert_frame_equal, assert_series_equal
from simpleml.datasets import PandasDataset, SingleLabelPandasDataset, MultiLabelPandasDataset, NumpyDataset
from simpleml.datasets.base_dataset import Dataset, AbstractDataset
from simpleml.datasets.abstract_mixin import AbstractDatasetMixin
from simpleml.datasets.numpy_mixin import NumpyDatasetMixin
from simpleml.datasets.pandas_mixin import BasePandasDatasetMixin, \
SingleLabelPandasDatasetMixin, MultiLabelPandasDatasetMixin, DATAFRAME_SPLIT_COLUMN
from simpleml.utils.errors import DatasetError
class AbstractMixinTests(unittest.TestCase):
'''
Tests for abstract mixin class
'''
@property
def dummy_dataset(self):
class TestMixinClass(AbstractDatasetMixin):
pass
return TestMixinClass()
def test_abstract_methods(self):
dataset = self.dummy_dataset
with self.assertRaises(NotImplementedError):
dataset.X
with self.assertRaises(NotImplementedError):
dataset.y
with self.assertRaises(NotImplementedError):
dataset.get('', '')
with self.assertRaises(NotImplementedError):
dataset.get_feature_names()
with self.assertRaises(NotImplementedError):
dataset.get_split_names()
class _PandasTestHelper(object):
'''
All mixins should run these tests with the appropriate setups per class
- self.dummy_dataset
- self.y_equality_function
- self._data
- self.expected_dataframe
- self.expected_x
- self.expected_y
- self.expected_train_dataframe
- self.expected_train_x
- self.expected_train_y
'''
'''
property tests
'''
def test_dataframe_set_validation(self):
'''
Check requirement for pd.DataFrame
'''
dataset = self.dummy_dataset
with self.assertRaises(DatasetError):
dataset.dataframe = 'blah'
dataset.dataframe = pd.DataFrame()
def test_y(self):
'''
Test property wrapper - same as get y
'''
dataset = self.dummy_dataset
get_y = dataset.get(column='y', split=None)
self.y_equality_function(self.expected_y, get_y)
self.y_equality_function(self.expected_y, dataset.y)
def test_x(self):
'''
Test property wrapper - same as get X
'''
dataset = self.dummy_dataset
get_x = dataset.get(column='X', split=None)
assert_frame_equal(self.expected_x, get_x)
assert_frame_equal(self.expected_x, dataset.X)
def test_dataframe(self):
'''
Should return a copy of the full dataset
'''
dataset = self.dummy_dataset
get_dataframe = dataset._dataframe
assert_frame_equal(self._data, get_dataframe)
'''
get tests
'''
def test_get_nonexistent_column_error(self):
'''
Should raise an error
'''
dataset = self.dummy_dataset
with self.assertRaises(ValueError):
dataset.get(column='other', split=None)
def test_missing_split_column_error(self):
'''
Attempt to query a split from a dataframe without the split column
Would otherwise throw a KeyError
'''
dataset = self.dummy_dataset
dataset._external_file.drop(DATAFRAME_SPLIT_COLUMN, axis=1, inplace=True)
with self.assertRaises(DatasetError):
dataset.get(column='X', split='Nonsense')
def test_get_nonexistent_split(self):
'''
Should return an empty frame
'''
dataset = self.dummy_dataset
X = dataset.get(column='X', split='NONSENSE')
y = dataset.get(column='y', split='NONSENSE')
data = dataset.get(column=None, split='NONSENSE')
assert_frame_equal(X, self.expected_x.head(0))
self.y_equality_function(y, self.expected_y.head(0))
assert_frame_equal(data, self.expected_dataframe.head(0))
def test_get_with_split(self):
'''
Should return df slices
'''
dataset = self.dummy_dataset
X = dataset.get(column='X', split='TRAIN')
y = dataset.get(column='y', split='TRAIN')
data = dataset.get(column=None, split='TRAIN')
assert_frame_equal(self.expected_train_x, X)
self.y_equality_function(self.expected_train_y, y)
assert_frame_equal(self.expected_train_dataframe, data)
def test_get_with_null_parameters(self):
'''
Should return all columns and rows except the split column
'''
dataset = self.dummy_dataset
get_dataframe = dataset.get(column=None, split=None)
assert_frame_equal(self.expected_dataframe, get_dataframe)
'''
references
'''
def test_dataframe_reference(self):
'''
Calling Dataset.dataframe returns copy
'''
dataset = self.dummy_dataset
self.assertNotEqual(id(dataset._dataframe), id(dataset._external_file))
def test_dataframe_mutability(self):
'''
Test mutating dataframe doesnt affect raw data
'''
dataset = self.dummy_dataset
copy = dataset._dataframe
copy.drop(DATAFRAME_SPLIT_COLUMN, axis=1, inplace=True)
with self.assertRaises(AssertionError):
assert_frame_equal(dataset._dataframe, copy)
def test_get_X_mutability(self):
'''
Pandas dataframes often return copies and views for efficiency.
Views can cause inplace mutations to propagate back to the original
dataframe. That is not allowed to maintain the integrity of the persisted
data
Tests for:
- memory pointers (object id)
- df._is_copy is not None (weakref when attached to a parent df)
- df._is_view is False (True for certain slices)
'''
dataset = self.dummy_dataset
unmodified_copy = dataset.dataframe.copy(deep=True)
for column, split in itertools.product(
['X'],
['TRAIN', 'TEST', 'VALIDATION', None]
):
with self.subTest(column=column, split=split):
copy = dataset.get(column=column, split=split)
# Test for pandas references
self.assertIsNone(copy._is_copy)
# Not fully understood behavior causes pandas to return views for certain
# operations that morph into copies when modified (appears subject to mem optimizations)
self.assertFalse(copy._is_view)
# Modify copy and see if the source changed
copy.loc[1, 'a'] = 9876
| assert_frame_equal(dataset.dataframe, unmodified_copy) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from dataIO import textfile
from pathlib_mate import Path
from sfm import lines_count
import pandas as pd
def process_rare_prefix():
nlines = lines_count.count_lines("rare-prefix.txt") * 1.0
prefix_list = list(textfile.readlines("rare-prefix.txt"))
df = list()
for i, prefix in enumerate(prefix_list):
i += 1
percentage = "%.2f%%" % (i / nlines * 100,)
row = [prefix, i, percentage]
df.append(row)
df = | pd.DataFrame(df, columns=["prefix", "index", "percentage"]) | pandas.DataFrame |
import pandas as pd
import glob, os
from datetime import datetime
from collections import OrderedDict
def display_original_bp_list(IN_FOLDER, POSE_TOOL, FILE_FORMAT):
if POSE_TOOL == 'DLC':
found_files = glob.glob(IN_FOLDER + '/*.' + FILE_FORMAT)
# for h5
if FILE_FORMAT == 'h5':
try:
first_df = | pd.read_hdf(found_files[0]) | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 03:24:14 2020
@author: Alice
to use on the lemon dataset: https://github.com/softwaremill/lemon-dataset/
"""
import json
import pandas as pd
import os
import cv2
from fruit_classification import *
dim = 100
with open(r"./lemon-dataset/annotations/instances_default.json", "r") as read_file:
data = json.load(read_file)
images_df = pd.DataFrame(data['images'])
annotations_df = | pd.DataFrame(data['annotations']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 09:15:54 2016
@author: <NAME>
"""
import pandas as pd
import numpy as np
###### Import packages needed for the make_vars functions
from scipy.interpolate import interp1d
import pywt
from skimage.filters.rank import entropy
from skimage.morphology import rectangle
from skimage.util import img_as_ubyte
def make_dwt_vars_cD(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
cA_4, cD_4, cD_3, cD_2, cD_1 = pywt.wavedec(temp_data,wave,level=4,mode='symmetric')
dict_cD_levels = {1:cD_1, 2:cD_2, 3:cD_3, 4:cD_4}
for i in levels:
new_depth = np.linspace(min(depth),max(depth),len(dict_cD_levels[i]))
fA = interp1d(new_depth,dict_cD_levels[i],kind='nearest')
temp_df[log + '_cD_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_dwt_vars_cA(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for i in levels:
cA_cD = pywt.wavedec(temp_data,wave,level=i,mode='symmetric')
cA = cA_cD[0]
new_depth = np.linspace(min(depth),max(depth),len(cA))
fA = interp1d(new_depth,cA,kind='nearest')
temp_df[log + '_cA_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_entropy_vars(wells_df,logs,l_foots):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
image = np.vstack((temp_data,temp_data,temp_data))
image -= np.median(image)
image /= np.max(np.abs(image))
image = img_as_ubyte(image)
for l_foot in l_foots:
footprint = rectangle(l_foot,3)
temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(image,footprint)[0,:]
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_gradient_vars(wells_df,logs,dx_list):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for dx in dx_list:
temp_df[log + 'gradient_dx' + str(dx)] = np.gradient(temp_data,dx)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_av_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_av_' + str(window) + 'ft'] = pd.rolling_mean(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_std_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_std_' + str(window) + 'ft'] = pd.rolling_std(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_max_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_max_' + str(window) + 'ft'] = pd.rolling_max(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_min_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_min_' + str(window) + 'ft'] = pd.rolling_min(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_rolling_marine_ratio_vars(wells_df, windows):
grouped = wells_df.groupby(['Well Name'])
new_var = | pd.DataFrame() | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
# This is the desired future behavior
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = date_range('2016-01-01', periods=3, tz='US/Central')
with pytest.raises(TypeError):
DatetimeIndex(dti, tz='Asia/Tokyo')
def test_construction_with_nat_and_tzlocal(self):
tz = dateutil.tz.tzlocal()
result = DatetimeIndex(['2018', 'NaT'], tz=tz)
expected = DatetimeIndex([Timestamp('2018', tz=tz), pd.NaT])
tm.assert_index_equal(result, expected)
class TestTimeSeries(object):
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range('2014', '2015', freq='M', tz=tz)
expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)
rng2 = | date_range('2014', '2015', freq='MS', tz=tz) | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
#import pybedtools #not used
import pandas as pd
import numpy as np
import time
import argparse
import sys
from matplotlib import pyplot as plt
# In[ ]:
# %matplotlib inline
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_name = 'repeat_masker.mapable.k50.Umap.hg38'
# genome_GC_frequency = '/fh/fast/ha_g/user/adoebley/projects/griffin_paper/genome_GC_frequency/results'
# out_dir = 'tmp'
# size_range = [15,500]
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_name', help='name of mapable regions file (with .bed removed)', required=True)
parser.add_argument('--genome_GC_frequency',help='folder containing GC counts in the reference sequence (made by generate_reference_files.snakemake)',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
args = parser.parse_args()
bam_file_name = args.bam_file_name
mapable_name=args.mapable_name
genome_GC_frequency = args.genome_GC_frequency
out_dir = args.out_dir
size_range = args.size_range
# In[ ]:
print('arguments provided:')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_name = "'+mapable_name+'"')
print('\tgenome_GC_frequency = "'+genome_GC_frequency+'"')
out_dir = out_dir.rstrip('/')
print('\tout_dir = "'+out_dir+'"')
print('\tsize_range = '+str(size_range))
# In[ ]:
#For now I'm going to keep the smoothing bin size as a set variable
GC_smoothing_step = 20
# In[ ]:
#input is the out_file from the previous step
in_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('in_file:',in_file)
#output is smoothed version
smoothed_out_file = out_dir +'/'+mapable_name+'/GC_bias/'+ bam_file_name+'.GC_bias.txt'
#plot files
plot_file1 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.pdf'
plot_file2 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.summary.pdf'
plot_file3 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.key_lengths.pdf'
print('out_file:',smoothed_out_file)
sys.stdout.flush()
# In[ ]:
#create output folders if needed
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_plots/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_plots/')
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_bias/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_bias/')
# In[ ]:
#import the GC info from the genome
frequency_prefix = genome_GC_frequency+'/'+mapable_name+'.'
GC_freq = pd.DataFrame()
for i in range(size_range[0],size_range[1]+1):
current_path = frequency_prefix+str(i)+'bp.GC_frequency.txt'
current_data = pd.read_csv(current_path,sep='\t')
GC_freq = GC_freq.append(current_data, ignore_index=True)
GC_freq['GC_content']=GC_freq['num_GC']/GC_freq['length']
GC_freq = GC_freq.sort_values(by=['GC_content','length']).reset_index(drop=True)
# In[ ]:
#import GC counts from the sample
GC_df = pd.read_csv(in_file, sep='\t')
GC_df['GC_content']=GC_df['num_GC']/GC_df['length']
GC_df = GC_df.sort_values(by=['GC_content','length']).reset_index(drop=True)
# In[ ]:
#calculate the GC_bias
new_df = | pd.DataFrame() | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import sys
import pandas as pd
import numpy as np
from os.path import isfile, splitext
from routine_qiime2_analyses._routine_q2_xpbs import run_xpbs, print_message
from routine_qiime2_analyses._routine_q2_io_utils import (
read_yaml_file,
read_meta_pd,
get_taxonomy_classifier,
get_job_folder,
get_analysis_folder,
parse_g2lineage,
get_raref_tab_meta_pds,
simple_chunks
)
from routine_qiime2_analyses._routine_q2_cmds import (
write_barplots,
write_seqs_fasta,
write_taxonomy_sklearn,
write_collapse_taxo,
run_export,
run_import
)
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def get_padded_new_rows_list(new_rows, max_new_rows):
# create a new 'Not available' matrix of shape (n_features x n_fields)
padded_new_rows_list = []
for padded_row in new_rows:
# update row if not of max length
n_pad = max_new_rows - len(padded_row)
if n_pad:
to_pad = ['Not_available']*n_pad
padded_row = padded_row + to_pad
padded_new_rows_list.append(padded_row)
return padded_new_rows_list
def add_alpha_level_label(taxa, padded_new_rows_list, max_new_rows):
# add a alpha label for rank-identification prupose
ALPHA = 'ABCDEFGHIJKL'
cols = ['Node_level_%s' % (ALPHA[idx]) for idx in range(1, max_new_rows + 1)]
padded_new_rows_pd = pd.DataFrame(
padded_new_rows_list,
index = taxa,
columns = cols
)
return padded_new_rows_pd
def extend_split_taxonomy(split_taxa_pd: pd.DataFrame):
to_concat = []
for col in split_taxa_pd.columns.tolist():
if split_taxa_pd[col].unique().size > 50:
continue
split_taxa_dummy = split_taxa_pd[col].str.get_dummies()
split_taxa_dummy.columns = ['%s__%s' % (x, col) for x
in split_taxa_dummy.columns]
to_concat.append(split_taxa_dummy)
if len(to_concat):
return | pd.concat(to_concat, axis=1) | pandas.concat |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError, InvalidParameterError
class UcecConf(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0", "1.1", "1.2", "2.0", "2.0.1"]
data_files = {
"1.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_Direct_SRM_tumor_v1.0.cct.gz", #SRM not to be included in 1.0
#"UCEC_confirmatory_IMAC_SRM_tumor_v1.0.cct.gz",
"UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.0.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.0.maf.gz",
#"UCEC_confirmatory_WGS_SV_tumor_v1.0.txt.gz" #structural_variation - not to be included in 1.0
],
"1.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.1.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.1.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.1.maf.gz",
],
"1.2": [
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_SRM_Direct_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.2.txt.gz",
# "UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.2.maf.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v1.2.txt.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
# "UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
],
"2.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WES_somatic_mutation_category_level_V1.2.txt.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v2.0.txt.gz",
],
"2.0.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
],
}
# Call the parent class __init__ function
super().__init__(cancer_type="ucecconf", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
if file_name in ["UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["acetylproteomics_gene"] = df
elif file_name in ["UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df[['Name','Database_ID','Site']] = df.idx.str.split("@", expand=True)
df['Site'] = df['Site'].str.rsplit('-',1,expand=True)[1]
df = df.set_index(["Name", "Site", "Database_ID"])
df = df.drop(columns=["idx"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["acetylproteomics"] = df
elif file_name in ["UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx"]:
df = pd.read_excel(file_path)
df.insert(6, "Proteomics_Tumor_Normal", df["Group"])
df.loc[df['Group'] == 'Enriched_Normal', 'Idx'] = df['Idx'] + '.N'
df.loc[df['Group'] == 'Adjacent_normal', 'Idx'] = df['Idx'].str[:-2] + '.N'
df = df.set_index("Idx")
df.loc[df['Group'] != 'Tumor', 'Group'] = 'Normal'
df = df.rename({'Group': 'Sample_Tumor_Normal'}, axis=1)
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["clinical"] = df
elif file_name in ["UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0, na_values=' NA')
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["methylation"] = df
elif file_name in ["UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["miRNA"] = df
elif file_name in ["UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = | pd.read_csv(file_path, sep='\t', index_col=0) | pandas.read_csv |
#standard python libraries
import json
import atexit
import datetime
import os
import warnings
import math
import shutil
import joblib
#external libraries
from binance.client import Client
import numpy as np
import pandas as pd
import ta
from sklearn import preprocessing
import torch
#external methods
from utils import read_config, read_json
from hyperparameters import HyperParameters, CandlestickInterval, Derivation, Scaling, Balancing, Shuffle, ScalerType
class dbid():
"""
Description:
Class which can be used like a dictionary.
This Class is not threadsafe! The changes to the dictionary, only get written to disk when the instance goes out of scope!
Arguments:
-path (string): Path of the database
"""
def __init__(self, path):
self.path = f"{path}/dbid.json"
#load in the dbid
with open(self.path) as json_file:
self.dbid = json.load(json_file)
#register the dump at the end of lifetime
atexit.register(self.dump)
def __getitem__(self, key):
return self.dbid[key]
def __setitem__(self, key, item):
#change the dict in ram
self.dbid[key] = item
def dump(self):
#save changes to json file
with open(self.path, 'w') as fp:
json.dump(self.dbid, fp, indent=4)
class DataBase():
"""
Description:
This is the base Database class, on which every other Database Objects builds upon.
Arguments:
-path[string]: Path of the Database
"""
def __init__(self, path):
#save the params
self.path = path
#check if the path exists and is a database
if not os.path.isdir(path):
raise Exception("The path you chose is not existing")
if not os.path.isfile(f"{path}/dbid.json"):
raise Exception("The path you chose is not a DataBase")
#setup dbid
self.dbid = dbid(path=self.path)
def __getitem__(self, index):
"""
Description:
Method for accessing data of the database. The access is direct from the harddrive (slower but more memory efficient)
Arguments:
-index[string, list]: Generally: [candlestick_interval, list of features]. To access the whole dataframe only specify the candlestick_interval you want e.g. db["5m"].
To access only one feature specify the datatype and the feature you want e.g. db["5m", "close"]
To access multiple features specify the datatype and a list of features you want e.g. db["5m", ["close", "open"]]
Return:
-data[pd.DataFrame]: Returns always a DataFrame in the shape (rows, number of specified features)
"""
#make sure that candlestick interval is of type CandlestickInterval
if type(index) == tuple:
if not isinstance(index[0], CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index[0])}")
elif not isinstance(index, CandlestickInterval):
raise Exception(f"Make sure your candlestick interval is of type CandlestickInterval and not {type(index)}")
#set the path
if type(index) == tuple:
path = f"{self.path}/{index[0].value}"
elif isinstance(index, CandlestickInterval):
path = f"{self.path}/{index.value}"
else:
raise Exception("Your chosen index is not valid")
#check if path is available
if not os.path.isdir(path):
raise Exception("Your chosen kline-interval is not available")
#access whole dataframe of certain kline-interval
if isinstance(index, CandlestickInterval):
#load in the data and return
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index.value}", index_col="index")
#convert the date columns
data["close_time"]= pd.to_datetime(data["close_time"])
data["open_time"]= pd.to_datetime(data["open_time"])
return data
except:
raise Exception("Your chosen kline-interval is not available in this DataBase")
#access all the labels
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and index[1] == "labels":
try:
#get all the label names
label_names = next(os.walk(f"{path}/labels"))[1]
#load in all the labels
labels = pd.DataFrame()
for label_name in label_names:
df = pd.read_csv(filepath_or_buffer=f"{path}/labels/{label_name}/labels.csv", header=None, index_col=0, names=["index", "labels"])
labels[label_name] = df["labels"]
return labels
except:
raise Exception("There are no labels in your database")
#access one label
elif type(index) == tuple and len(index) == 3 and isinstance(index[0], CandlestickInterval) and index[1] == "labels" and type(index[2]) == str:
try:
#load in the labels
labels = pd.read_csv(filepath_or_buffer=f"{path}/labels/{index[2]}/labels.csv", header=None, index_col=0, names=["index", index[2]])
return labels
except:
raise Exception("Your chosen label-type is not available")
#access a list of labels
elif type(index) == tuple and len(index) == 3 and isinstance(index[0], CandlestickInterval) and index[1] == "labels" and type(index[2]) == list:
try:
#load in the labels
labels = pd.DataFrame()
for label_name in index[2]:
df = pd.read_csv(filepath_or_buffer=f"{path}/labels/{label_name}/labels.csv", header=None, index_col=0, names=["index", label_name])
labels[label_name] = df[label_name]
return labels[index[2]]
except:
raise Exception("Your chosen label-type is not available")
#access one feature of a kline-interval
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and type(index[1]) == str:
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index[0].value}", usecols=[index[1]])
#convert the date columns
if "close_time" in data.columns:
data["close_time"]= pd.to_datetime(data["close_time"])
if "open_time" in data.columns:
data["open_time"]= pd.to_datetime(data["open_time"])
return data
except:
raise Exception("Your chosen feature is not available in this DataBase")
#access list of features of a kline-interval
elif type(index) == tuple and len(index) == 2 and isinstance(index[0], CandlestickInterval) and type(index[1]) == list:
try:
data = pd.read_csv(filepath_or_buffer=f"{path}/{index[0].value}", usecols=index[1])
#convert the date columns
if "close_time" in data.columns:
data["close_time"]= pd.to_datetime(data["close_time"])
if "open_time" in data.columns:
data["open_time"]= pd.to_datetime(data["open_time"])
return data[index[1]]
except:
raise Exception("One/multiple of your chosen feature/s is/are not available in this DataBase")
#throw error on all other accesses
else:
raise Exception("Your index is not possible, please check your index and the documentation on the DataBase object")
@staticmethod
def _download_kline_interval(symbol, start_date, end_date, candlestick_interval, config_path):
#read in the config
config = read_config(path=config_path)
#create the client
client = Client(api_key=config["binance"]["key"], api_secret=config["binance"]["secret"])
#download the data and safe it in a dataframe
print(f"Downloading {candlestick_interval} klines...")
raw_data = client.get_historical_klines(symbol=symbol, interval=candlestick_interval, start_str=start_date, end_str=end_date)
data = pd.DataFrame(raw_data)
#clean the dataframe
data = data.astype(float)
data.drop(data.columns[[7,8,9,10,11]], axis=1, inplace=True)
data.rename(columns = {0:'open_time', 1:'open', 2:'high', 3:'low', 4:'close', 5:'volume', 6:'close_time'}, inplace=True)
#set the correct times
data['close_time'] += 1
data['close_time'] = | pd.to_datetime(data['close_time'], unit='ms') | pandas.to_datetime |
import math
import itertools
import logging
from numba import njit
import numpy as np
import pandas as pd
from parallelpipe import Stage
import rasterio
from rasterio.windows import Window
logger = logging.getLogger("raster2points")
def raster2csv(
*files,
col_names=None,
separator=",",
max_block_size=4096,
calc_area=False,
workers=1
):
"""
Convert rasters to CSV.
Input rasters must match cell size and extent.
Tool writes final result text file
:param src_rasters: list of input rasters
:param csv_file: output file
:param separator: separator used in CSV file
:param max_block_size: max block size to process
:return: None
"""
if not len(files) >= 2:
raise ValueError("No output file provided")
csv_file = files[-1]
src_rasters = files[:-1]
logger.info(
"Extract data using {} worker{}".format(workers, "" if workers == 1 else "s")
)
table = raster2df(
*src_rasters,
col_names=col_names,
max_block_size=max_block_size,
calc_area=calc_area,
workers=workers,
)
logger.info("Write to file: " + csv_file)
table.to_csv(csv_file, sep=separator, header=True, index=False)
logger.info("Done.")
def raster2df(
*src_rasters, col_names=None, max_block_size=4096, calc_area=False, workers=1
):
"""
Converts raster into Panda DataFrame.
Input rasters must match cell size and extent.
The first raster determines number of output rows.
Only cells which are are above given Threshold/ not NoData are processed
The tool calculates lat lon for every grid cell and extract the cell value.
If more than one input raster is provided tool adds additional columns to CSV with coresponing values.
:param src_rasters: Input rasters (one or many)
:param col_names: Column names for input raster values (optional, default: val1, val2, ...)
:param max_block_size: maximum block size to process in at once
:param calc_area: Calculate geodesic area
:param workers: number of parallel workers
:return: Pandas data frame
"""
if col_names and not len(src_rasters) == len(col_names):
raise ValueError(
"Number of named columns does not match number of input rasters. Abort."
)
sources = _assert_sources(src_rasters)
src = sources[0]
affine = src.transform
step_height, step_width = _get_steps(src, max_block_size)
kwargs = {
"col_size": affine[0],
"row_size": affine[4],
"step_width": step_width,
"step_height": step_height,
"width": src.width,
"height": src.height,
"calc_area": calc_area,
}
cols = range(0, src.width, step_width)
rows = range(0, src.height, step_height)
blocks = itertools.product(cols, rows)
pipe = blocks | Stage(_process_blocks, sources, **kwargs).setup(workers=workers)
data_frame = pd.DataFrame()
for df in pipe.results():
if data_frame.empty:
data_frame = df[0] # unpack data frame from tuple
else:
data_frame = | pd.concat([data_frame, df[0]]) | pandas.concat |
"""
draftsimtools.bots
~~~~~~~~~~~~~~~~~~
Machine learning based bot logic.
"""
from random import shuffle
from copy import deepcopy
import pandas as pd
class Bot(object):
"""The bot object is used to simulate drafts. It has functions for creating new drafts,
making picks, and tracking it's color commit.
A draft can be simulated in the following manner:
p = Bot()
p.new_draft(drafts[0])
for x in range(45):
p.make_pick()
"""
#Some constants from the website.
COLOR_COMMIT_THRESHOLD=3.5 #Determines how many good cards are needed to commit to a color
RATING_THRESH=2.0 #Baseline playability rating for color_commit
MAX_BONUS_SPEC=.9 #The maximum bonus during the speculation phase at the start of a draft
ON_COLOR_BONUS=2.0 #Bonus cards receive after player locks into 2 colors
OFF_COLOR_PENALTY=1.0 #Penalty for off color cards after the player locks into 2 colors
SECOND_COLOR_FRAC=0.8 #When committed to one color, the second color bonus is this fraction of the on color bonus
MULTICOLOR_PENALTY=0.6 #P1P1 penalty for multicolored cards
SING_COLOR_BIAS_FACTOR=2.0 #If the player only has cards of 1 color, reduce the bonus by this fraction
def __init__(self, rating_dict, draft=None):
"""Create a new Bot instance. Bots should be restricted to a single set.
:param rating_dict: Rating dictionary for the set being drafted.
:param draft: (Optional) Attach a draft to the bot.
"""
self.rating_dict = rating_dict
self.draft_count = 0
self.loss_current = 0
self.loss_history = []
if draft is not None:
self.new_draft(draft)
def new_draft(self, draft):
"""Update the Bot object with a single new draft. New drafts can be created by
draftsimtools.process_drafts.
Calling new_draft resets all information in the Bot object and allows numerous drafts
to be simulated using a single Bot object.
Fields:
self.draft - a single draft object (list of list of cardnames)
self.collection - a list of cardnames currently picked
self.color_commit - the color_commit vector of the current collection
self.num_colors - number of colors bot is commited to
self.ps - number of cards in a pack
:param draft: Attach a draft to the bot.
"""
self.draft = deepcopy(draft)
self.collection = []
self.color_bonus = [0,0,0,0,0]
self.color_commit = [0,0,0,0,0]
self.num_colors = 0
self.ps = int(len(self.draft)/3)
self.draft_count += 1
def make_pick(self):
"""Makes a pick and updates the bot's collection and color_commit.
This method picks the first card in each pack. Note that the draft lists are set up
such that the first element of each list is the card that was picked by a human.
"""
cur_pick = len(self.collection)
if cur_pick < len(self.draft):
self.collection.append(self.draft[cur_pick][0])
self.update_color_commit(self.draft[cur_pick][0])
self.update_num_colors()
self.update_color_bonus()
else:
print("All picks made.")
def update_color_commit(self, card):
"""Updates the color_commit of the bot.
:param card: Card name (string).
"""
#Collect card info.
card_color_vector = self.rating_dict[card][0]
card_rating = self.rating_dict[card][1]
#Update each component of color_commit.
for i in range(len(self.color_commit)):
if card_color_vector[i] > 0:
self.color_commit[i] += max(0, card_rating-self.RATING_THRESH)
def update_num_colors(self):
"""Update number of committed colors for the bot.
"""
#Update committed colors from color_commit.
temp_num_colors = 0
for c in self.color_commit:
if c > self.COLOR_COMMIT_THRESHOLD:
temp_num_colors += 1
#Update committed colors based on pick number.
if len(self.collection) >= (self.ps+5):
temp_num_colors = 2
#Final update.
self.num_colors = min(temp_num_colors, 2)
def update_color_bonus(self):
"""Updates color bonuses during speculation phase (0-0.9).
Additional modifiers are applied in get_color_bias().
"""
#Compute color bonus.
temp_color_bonus = []
for i in range(len(self.color_commit)):
cur_bonus = self.color_commit[i] * self.MAX_BONUS_SPEC/self.COLOR_COMMIT_THRESHOLD
temp_color_bonus.append(min(cur_bonus, self.MAX_BONUS_SPEC))
#Update color bonus.
self.color_bonus = temp_color_bonus
def get_color_bias(self, card):
"""Returns the color bias for a given card.
"""
#Get card information.
card_color_vector = self.rating_dict[card][0]
num_card_colors = sum([c>0 for c in card_color_vector])
#Uncastable card.
if num_card_colors >= 4:
return 0
#print("card", str(card))
#print("card_color_vector", str(card))
#print("color_commit", self.color_commit)
#Speculation phase - bot committed to 0-1 colors.
if self.num_colors == 0 or self.num_colors == 1:
#Colorless card.
if num_card_colors == 0:
#Reduce bonus when only 1 color is picked.
picked_colors = sum([x>0 for x in self.color_commit])
if picked_colors == 1:
return max(self.color_bonus) / self.SING_COLOR_BIAS_FACTOR
else:
return max(self.color_bonus)
#Mono colored.
elif num_card_colors == 1:
#When commited to one color, mono colored cards in the support color get a bonus.
second_color_commit = sorted(self.color_commit)[-2]
second_color_index = self.color_commit.index(second_color_commit)
if self.num_colors == 1 and card_color_vector[second_color_index]>0:
#As implemented in draftsim code. Maybe a good idea to use lower bound instead.
return self.SING_COLOR_BIAS_FACTOR * self.SECOND_COLOR_FRAC
else:
#Reduce bonus when only 1 color is picked.
picked_colors = sum([x>0 for x in self.color_commit])
cur_bonus = self.color_bonus[card_color_vector.index(max(card_color_vector))]
if picked_colors == 1:
return cur_bonus / self.SING_COLOR_BIAS_FACTOR
else:
return cur_bonus
#Multi colored.
elif num_card_colors == 2 or num_card_colors == 3:
#Base multicolored penalty.
cur_bonus = -1 * self.MULTICOLOR_PENALTY
#Reward on-color cards.
for c in range(5):
if card_color_vector[c] > 0:
cur_bonus += self.color_bonus[c]
else:
cur_bonus -= self.color_bonus[c]
return cur_bonus
#Committed phase - bot committed to two colors.
elif self.num_colors == 2:
#Compute committed colors.
color1 = self.color_commit.index(sorted(self.color_commit)[-1])
color2 = self.color_commit.index(sorted(self.color_commit)[-2])
#Count off-color mana symbols.
off_color_mana_symbols = 0
for c in range(5):
if c != color1 and c != color2:
off_color_mana_symbols += card_color_vector[c]
#Return color bias.
if off_color_mana_symbols == 0:
return self.ON_COLOR_BONUS
else:
return 1 - off_color_mana_symbols*self.OFF_COLOR_PENALTY
#Catch-all.
#Broken.
print("Unable to compute color bias for: " + str(card) + ", draft_count: " + str(self.draft_count))
print("self.num_colors", self.num_colors)
return 0
def create_rating_list(self, pack):
"""Creates a relative rating list from a pack.
"""
#Create list of updates.
rating_list = []
for cardname in pack:
#Compute rating.
base_rating = self.rating_dict[cardname][1]
color_bonus = self.get_color_bias(cardname)
total_rating = base_rating + color_bonus
#Compute rating difference. Positive means better than picked card.
if len(rating_list) == 0:
picked_rating = total_rating
rating_difference = 0
else:
rating_difference = total_rating - picked_rating
#Create relative list of ratings.
rating_list.append([cardname, rating_difference])
return rating_list
def write_rating_dict(self, filename = "test.tsv"):
"""Writes the rating dict to filename.
"""
#Create name/rating dictionary.
name_rating = {}
for k in self.rating_dict.keys():
name_rating[k] = self.rating_dict[k][1]
#Create data frame.
rating_df = | pd.DataFrame.from_dict(name_rating, orient="index") | pandas.DataFrame.from_dict |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import databricks.koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class CategoricalTest(ReusedSQLTestCase, TestUtils):
def test_categorical_frame(self):
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf, pdf)
self.assert_eq(kdf.a, pdf.a)
self.assert_eq(kdf.b, pdf.b)
self.assert_eq(kdf.index, pdf.index)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
kser = ks.Series([1, 2, 3], dtype="category")
self.assert_eq(kser, pser)
self.assert_eq(kser.cat.categories, pser.cat.categories)
self.assert_eq(kser.cat.codes, pser.cat.codes)
self.assert_eq(kser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(kser.astype("category"), pser.astype("category"))
self.assert_eq(
kser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = kser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype( | CategoricalDtype(["b", "c", "a"]) | pandas.api.types.CategoricalDtype |
import glob
import os
import os.path
from pathlib import Path
from lxml import etree
import pandas as pd
from datetime import datetime
import time
#never ever do this
from itertools import *
start = time.time()
datestring = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
list_of_attribute_value = []
Attribute_to_extract = ['first','second','third','last_tag']
for path in Path('C:/RTC/Scripts & Tools & Files/Python/COUNT SKU PY').glob('**/*.xml'):
tree = etree.iterparse(str(path))
for event, node in tree:
if node.tag == 'first':
sku = node.text
t = 0
myocc = ''.join([sku,'-',str(t)])
newmyocc = ''.join([sku,'-',str(t)])
if node.tag == Attribute_to_extract[-1]:
t = t + 1
newmyocc = ''.join([sku,'-',str(t)])
if node.tag in Attribute_to_extract:
list_of_attribute_value.append([myocc, node.tag, node.text])
myocc = newmyocc
#solution 2: much slower
def parser_xml(tags, path):
tree = etree.iterparse(str(path))
for event, node in tree:
if node.tag == 'A0001':
sku = node.text
t = 0
myocc = ''.join([sku,'-',str(t)])
newmyocc = ''.join([sku,'-',str(t)])
if node.tag == Attribute_to_extract[-1]:
t = t + 1
newmyocc = ''.join([sku,'-',str(t)])
if node.tag in tags:
yield [myocc, node.tag, node.text]
myocc = newmyocc
list_of_attribute_value2 = list(chain.from_iterable(parser_xml(Attribute_to_extract,file)
for file in Path('C:/RTC/Scripts & Tools & Files/Python/COUNT SKU PY').glob('**/*.xml')))
#print(list_of_attribute_value2)
df_attributes_value = | pd.DataFrame(list_of_attribute_value2, columns=['myocc','Attribute','Value']) | pandas.DataFrame |
import copy
import itertools
import numpy as np
import pandas as pd
from scipy.special import erf
from scipy import stats
class JointNormal(object):
"""
:type labels: list
:param labels: A list of string labels for the variables in this distribution
:type mu: iterable of numbers or N x 1 numpy.matrix
:param mu: The mean vector
:type cov: list of lists or numpy.matrix
:param cov: a list containing the rows of the covariance matrix
:type N: int or float
:param N: The number of observations that went into this distribution. This is used
for weighting the relative importance of new observations added through the
:code:`.ingest()` method.
:type n_max: int
:param n_max: The maximum value N can have. Limiting N has the effect of allowing
the distrubtion to "forget" old measurements.
This class provides an abstraction for creating, manipulating and querying multi-variate
normal distributions.
"""
def __init__(self, labels, mu=None, cov=None, N=0., n_max=None):
dim = len(labels)
# private attributes holding
# _mu: mean vector
# _cov: covariance matrix
self._mu = None
self._cov = None
# N: the number of observations used to generate the current distribution
self.N = N
# n_max: cap the number of observations at this value to enable forgetting
self.n_max = n_max
self.labels = [ll for ll in labels]
self._index_for_label = {label: ind for (ind, label) in enumerate(labels)}
# use setters after labels have been defined to catch misshaped inputs
self.mu = self._vectorize(np.zeros(dim)) if mu is None else mu
self.cov = np.matrix(np.zeros((dim, dim))) if cov is None else cov
@property
def mu(self):
"""
:rtype: numpy.matrix
:return: N x 1 mean vector
"""
return copy.deepcopy(self._mu)
@mu.setter
def mu(self, mu):
"""
:type mu: iterable
:param mu: assign mu to this list of numbers
"""
if len(mu) != len(self.labels):
raise ValueError('Setting mu with wrong dimensions')
self._mu = self._vectorize(mu)
@property
def cov(self):
"""
:rtype: numpy.matrix
:return: N x N covariance matrix
"""
return copy.deepcopy(self._cov)
@cov.setter
def cov(self, cov):
"""
:type cov: list of lists or numpy matrix
:param cov: list of covariance matrix rows or an N x N numpy matrix
"""
dim = len(self.labels)
new_cov = np.matrix(cov)
if new_cov.shape != (dim, dim):
raise ValueError('Setting covariance with wrong dimensions')
self._cov = new_cov
@property
def mu_cov_n(self):
"""
:rtype: `tuple (numpy.matrix, numpy.matrix, int)`
:return: `(mu, cov, N)`
"""
return (self._mu, self._cov, self.N)
def _index_for(self, labels):
bad_labels = set(labels) - set(self.labels)
if len(bad_labels) > 0:
raise ValueError('Unrecognized labels: {}'.format(bad_labels))
return [self._index_for_label[label] for label in labels]
def _labels_for(self, index):
return [self.labels[ind] for ind in index]
@property
def mu_frame(self):
"""
:rtype: pandas.Dataframe
:return: Dataframe holding mean vector in column labeled 'mu'
"""
return pd.DataFrame(self._mu[:, 0], index=self.labels, columns=['mu'])
@property
def cov_frame(self):
"""
:rtype: pandas.Dataframe
:return: Dataframe holding covariance matrix with columns and index holding variable names
"""
return pd.DataFrame(self.cov, index=self.labels, columns=self.labels)
def _validate_frame(self, df):
missing_vars = set(self.labels) - set(df.columns)
if missing_vars:
raise ValueError('Input dataframe missing columns {}'.format(missing_vars))
def _vectorize(self, x):
return np.matrix(np.reshape(x, (len(x), 1)))
def _update_cov(self, x, mu, cov, n):
# x: a vector of observations
# mu: the mean vector
# cov: the covariance matrix
# k: the "kalman gain" constant
# n: the number of observations observed before the update
k = 1. / (n + 1.)
cov_prime = cov + k * (x - mu) * (x - mu).T
return (1. - k) * cov_prime
def _update_mu(self, x, mu, n):
# x: a vector of observations
# mu: the mean vector
# n: the number of observations observed before the update
# k: the "kalman gain" constant
k = 1. / (n + 1.)
return mu + k * (x - mu)
def _update_n(self, n, n_max=None):
# n: the number of observations observed before the update
if self.n_max and n >= self.n_max:
return self.n_max
else:
return n + 1
def _ingest_point(self, x):
# x: a vector of observations
# mu: the mean vector
# cov: the covariance matrix
# N: the number of observations
mu = self._update_mu(x, self._mu, self.N)
cov = self._update_cov(x, self._mu, self._cov, self.N)
N = self._update_n(self.N)
# save new state (must compute this in two steps to avoid
# ensure that all updated values are computed
# using previous values)
self._mu, self._cov, self.N = mu, cov, N
def ingest(self, data):
"""
:type data: valid argument to pandas.Dataframe constructor
:param data: A frameable set of observations to assimilate into the distribution
Iterates through every row in the dataframe and updates the mean/covariance
with the values in that row.
"""
# transorm dataframe into ndarray with properly ordered columns
if isinstance(data, pd.DataFrame):
self._validate_frame(data)
data = data[self.labels].values
if data.shape[1] != len(self._mu):
raise ValueError('JointNormal tried to ingest data with wrong number of dimensions')
for ind in range(data.shape[0]):
x = self._vectorize(data[ind, :])
self._ingest_point(x)
def _prob_for_records(self, data, variable, op):
"""
:type data: iterable
:param data: An iterable of variables at which to evaluate threshold probability
:type variable: str
:param variable: the name of the thresholding variable
:type op: str
:param op: one of ['__lt', '__gt']
:rtype: scalar or iterable depending on data
:return: the output probability value(s)
"""
# extract the single non-marginalized variable from the data
x = data
# get a distribution that marginalizes out all other variables
p = self.marginal(variable)
# get normal params that are now scalers since we've reduced to 1-d
mu = p._mu[0, 0]
sigma = np.sqrt(float(p.cov[0, 0]))
# find probability of x being less than data value using the z-score
z = (x - mu) / sigma
prob_less_than_x = 0.5 * (1 + erf(z / np.sqrt(2)))
prob_for_op = {
'__lt': lambda x: list(prob_less_than_x),
'__gt': lambda x: list(1. - prob_less_than_x)
}
out = prob_for_op[op](x)
return out if len(out) > 1 else out[0]
def _log_density_for_records(self, data):
"""
:type data: pandas.Dataframe
:param data: Dataframe with rows of observations
:rtype: scalar or iterable depending on data
:return: the output probability value(s)
"""
# make sure input is an nd-array with rows holding records
x_matrix = data[self.labels].values
# subtract mu from every record (note result is transposed)
x_matrix = np.matrix(x_matrix.T - np.array(self._mu))
# hit each row (record) with precision matrix
y_matrix = self.cov.getI() * x_matrix
# compute the exponential density argument for each record
exp_argument = -0.5 * np.sum(np.array(x_matrix) * np.array(y_matrix), axis=0)
# compute the log of the density prefactor
# k: dimension of distribution
# det: determinant of covariance matrix
# log_prefactor: log of the multivariate normal normalization constant
k = self._mu.shape[0]
det = np.abs(np.linalg.det(self.cov))
log_prefactor = np.log((2. * np.pi) ** -(k / 2.) / np.sqrt(det))
# compute and return log probability
out = list(log_prefactor + exp_argument)
return out[0] if len(out) == 1 else out
def marginal(self, *labels):
"""
:type labels: string arguments
:param labels: marginalize out all variables not passed in these string arguments
:rtype: JointNormal
:return: A joint normal distribution marginalized to include only specified variables
Example:
.. code-block:: python
N = JointNormal(
labels=['x', 'y', 'z'],
mu=[0, 0, 0],
cov=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
)
# marginalize out the z variable
N_cond = N.marginal('x', 'y')
"""
# get the index of the labels you don't want to marginalize over
ind = self._index_for(labels)
# create a list of all combinations indexes you want to keep
in_els = itertools.product(ind, ind)
# create list of all combinations of row, columns indexes for the output covariance matrix
out_els = itertools.product(range(len(ind)), range(len(ind)))
# initialize an output covariance matrix
cov_out = np.matrix(np.zeros((len(ind), len(ind))))
# map appropriate input covariance matrix elements to output covariance matrix
for in_el, out_el in zip(in_els, out_els):
cov_out[out_el] = self.cov[in_el]
# extract the mean elements into the output mean vector
mu_out = self._mu[ind, 0]
return JointNormal(mu=mu_out, cov=cov_out, N=self.N, labels=self._labels_for(ind), n_max=None)
def _check_args(self, free_ind=None, fixed_ind=None):
free_ind = [] if free_ind is None else free_ind
fixed_ind = [] if fixed_ind is None else fixed_ind
# k is dimensionality of space
k = self._mu.shape[0]
# make sure all indices are within dimensionality
if any([ind >= k for ind in free_ind + fixed_ind]):
raise ValueError('All indices must be less than {}'.format(k))
# make sure there are no overlaps
if len(set(free_ind).intersection(set(fixed_ind))) > 0:
raise ValueError('An index cannot appear in both free_ind and fixed_ind')
# make sure no dups
if len(set(free_ind)) != len(free_ind) or len(set(fixed_ind)) != len(fixed_ind):
raise ValueError('free_ind and fixed_ind cannot contain duplicate entries')
def conditional(self, **constraints):
"""
:type constraints: numbers
:param constraints: constraints expressed as `variable_name=variable_value`
:rtype: JointNormal
:return: A joint normal distribution conditioned on supplied constraints
Example:
.. code-block:: python
N = JointNormal(
labels=['x', 'y', 'z'],
mu=[0, 0, 0],
cov=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
)
# Returns a distribution over z
N_cond = N.conditional(x=1, y=1)
"""
# separate constraints into index and values
fixed_labels, fixed_vals = zip(*constraints.items())
fixed_ind = self._index_for(fixed_labels)
# the original index is just all integers up to dimension
ind_orig = range(self._mu.shape[0])
# get all indices that haven't been specified as fixed
non_fixed_ind = list(set(ind_orig) - set(fixed_ind))
# check indices for errors
self._check_args(non_fixed_ind, fixed_ind)
# permute covariance and mu to have non-fixed elements first, then fixed
ind_perm = list(non_fixed_ind) + list(fixed_ind)
P = _compute_permutation_matrix(ind_orig, ind_perm)
mu_perm = P * self._mu
cov_perm = P * self.cov * P.T
# Follow the notation from the wikipedia multivariate normal article to partition the
# covariance matrix into fixed and non-fixed parts.
#
# https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Conditional_distributions
#
# N1: dimension of non-fixed covariance partition
# mu1: non-fixed mean vector
# mu2: fixed mean vector
# S11: fixed covariance matrix
# S22: non-fixed covariance matrix
# S12: covariance betwen fixed and not fixed variables
# S21: transpose of S12
# a: vector of fixed values on which the distribution is conditioned
# mu: mean vector of conditioned distrubtion
# S: covariance matrix of condidtioned distribution
N1 = len(non_fixed_ind)
mu1 = mu_perm[:N1, 0]
mu2 = mu_perm[N1:, 0]
S11 = cov_perm[:N1, :N1]
S22 = cov_perm[N1:, N1:]
S12 = cov_perm[:N1, N1:]
S21 = cov_perm[N1:, :N1]
a = np.matrix([[x] for x in fixed_vals])
# formula for conditional distribution of partitioned normal distribution
mu = mu1 + S12 * S22.getI() * (a - mu2)
S = S11 - S12 * S22.getI() * S21
# create a joint distrubtion out of the non-fixed params
return JointNormal(mu=mu, cov=S, N=self.N, labels=self._labels_for(non_fixed_ind), n_max=None)
@property
def _info(self):
return 'N({})'.format(','.join([str(s) for s in self.labels]))
def __mul__(self, other):
return multiply_independent_normals(self, other)
def __add__(self, other):
return add_independent_normals(self, other)
def __sub__(self, other):
other = copy.deepcopy(other)
other._mu = - other._mu
return add_independent_normals(self, other)
def __gt__(self, other):
return probability_first_gt_second(self, other)
def __lt__(self, other):
return probability_first_gt_second(other, self)
def __repr__(self):
return self._info
def __str__(self):
return self._info
def estimate(self, variable, **constraints):
"""
:type variable: str
:param variable: The name of the variable to estimate
:type constraints: numbers
:param constraints: constraints expressed as `variable_name=variable_value`
:rtype: tuple
:return: (mean, standard_deviation)
Returns a tuple of (mu, sigma) representing the mean and standard deviation
of a particular variable given optional constraints for conditioning. This
method is useful for getting estimates of a particular variable.
"""
# do any requested conditioning
if constraints:
out_normal = self.conditional(**constraints)
else:
out_normal = self
out_normal = out_normal.marginal(variable)
return out_normal._mu[0, 0], np.sqrt(out_normal.cov[0, 0])
def percentile(self, **values):
"""
:type values: numbers
:param values: percentiles at which to compute locations. Must be in the form
`variable_name=variable_value`. variable_value can either be
a number or an iterable of numbers
:rtype: scalar or iterable depending on input
:return: the values corresponding to the input percentiles
Example:
.. code-block:: python
N = JointNormal(labels=['x', 'y'], mu=[0, 0], cov=[[1, 0], [0, 1]])
# compute the x value at the 80th percentile
prob = N.percentile(x=.8)
# compute x values for quartiles
prob = N.percentile(x=[.25, .5, .75])
"""
if len(values) > 1:
raise ValueError('You can only compute percentiles for one variable')
variable = list(values.keys())[0]
data = values[variable]
marginalized = self.marginal(variable)
out = stats.norm(marginalized.mu[0, 0], np.sqrt(marginalized.cov[0, 0])).ppf(data)
if hasattr(out, '__iter__'):
return list(out)
else:
return out
def probability(self, **constraints):
"""
:type constraints: numbers
:param constraints: constraints expressed as `variable_name=variable_value`.
One of the constraints must be expressed as either
`variable_name__gt=value` or `variable_name__lt=value`.
This constraint specifies the threshold value for
computing probabilities. This contstraint can be either
a number or an iterable.
:rtype: scalar or iterable depending on threshold constraint
:return: the output probability value(s)
Example:
.. code-block:: python
N = JointNormal(labels=['x', 'y'], mu=[0, 0], cov=[[1, 1], [1, 2]])
# compute probability that y < 2 given that x = 1.
prob = N.probability(y__lt=2, x=1)
# compute probability that y < w for w in range(3) given that x = 1
probs = N.probability(y__lt=range(3), x=1)
"""
free_keys = [k for k in constraints.keys() if '__gt' in k or '__lt' in k]
if len(free_keys) != 1:
raise ValueError('You must supply at least one kwarg ending in __gt or __lt')
free_key = free_keys[0]
conditional_keys = set(constraints.keys()) - set(free_keys)
out_norm = self
if conditional_keys:
out_norm = out_norm.conditional(**{k: constraints[k] for k in conditional_keys})
variable = free_key[:-4]
op = free_key[-4:]
data = constraints[free_key]
data = data if hasattr(data, '__iter__') else [data]
return out_norm._prob_for_records(data, variable, op)
def log_density(self, data, **constraints):
"""
:type data: a dict or a valid argument to pandas.Dataframe constructor
:param data: location(s) at which to compute log_density
:type constraints: numbers
:param constraints: constraints expressed as `variable_name=variable_value`.
:rtype: scalar or iterable depending on threshold constraint
:return: log of the output probability density value(s)
First conditions on the constraints, then evaluates the remaining distribution
at the location specified by data. Any variable not explicitly included in
either data or constrains is marginalized out.
"""
df = pd.DataFrame([data]) if isinstance(data, dict) else | pd.DataFrame(data) | pandas.DataFrame |
# FLOWDO
# FlowDo is an application created for the purpose of managing business activities like Inventory Maintenance, Billing, Sales analysis and other business functions.
# Developed by:
# <NAME> (@Moulishankar10)
# <NAME> (@ToastCoder)
# REQUIRED MODULES
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# VISUALIZER FUNCTIONS:
# Used to display keymaps for main menu and every submenu
# LIST OF KEYMAPS TO BE DISPLAYED IN MAIN MENU
def mainOptionsVisualizer():
print("""
\n******************************************** MAIN MENU ***********************************************\n
Press 1 to take a New Order
Press 2 to explore the Revenue options
Press 3 to explore the Inventory properties
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN ORDER OPTIONS
def orderOptionsVisualizer():
print("""
\n******************************************** ORDER MENU **********************************************\n
Press 1 to add a new product
Press 2 to remove a product
Press 3 to view the bill
Press 4 to modify your order
Press 5 to proceed your order
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN REVENUE OPTIONS
def revenueOptionsVisualizer():
print("""
\n******************************************* REVENUE MENU *********************************************\n
Press 1 to view the Revenue Database
Press 2 to view a Month's Total Revenue
Press 3 to view the product which generated the Maximum Profit in any month
Press 4 to view the product which generated the Minimum Profit in any month
Press 5 to view the Revenue Trend Graph for any year
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN INVENTORY OPTIONS
def inventoryOptionsVisualizer():
print("""
\n****************************************** INVENTORY MENU *********************************************\n
Press 1 to view the Stock Inventory
Press 2 to add a new product to your inventory
Press 3 to remove a product from your inventory
Press 4 to modify the properties of existing products
Press 9 for exit
\n*******************************************************************************************************\n
""")
# USED TO CHECK IF THE COLUMN FOR THE MONTH IS CREATED OR NOT
def revMonthChecker():
today = datetime.now()
frmt = today.strftime('%m-%Y')
rev_data = pd.read_csv('data/revenue.csv')
header = list(rev_data.columns)
if frmt not in header:
x = [0]*len(rev_data)
rev_data[frmt] = x
rev_data.to_csv("data/revenue.csv", index = False)
# CLASS FOR BILLING OPERATIONS
class Biller:
def __init__(self,l):
self.prod_name=[]
self.quantity=[]
self.price=[]
self.total_price=[]
self.limit=l
self.ordered = False
self.item_selected = False
def isFull(self):
return len(self.prod_name) == self.limit
def isEmpty(self):
return len(self.prod_name) == 0
# FUNCTION TO ADD A NEW PRODUCT TO THE BILL
def enqueue(self,ele,qn):
if self.isFull():
print("\nMaximum limit reached !")
elif ele.upper() in self.prod_name:
print(f"\n!! '{ele.upper()}' is already in the ordered list !!")
print("\n--- Please refer the 'ORDER MENU' to modify the ordered items ---\n")
else:
inv_data = pd.read_csv('data/inventory.csv')
flag = 0
for i in range(len(inv_data)):
flag = 0
if inv_data["Product_Name"][i] == ele.upper():
if qn.isnumeric() == True:
if int(qn) <= inv_data["Available_Stock"][i]:
self.prod_name.append(ele.upper())
self.quantity.append(int(qn))
self.price.append(inv_data["Selling_Price"][i])
self.item_selected = True
print("\n>>>>>>>> Product is Added to the Order <<<<<<<<\n")
break
else:
print("\n!! Sorry for the inconvenience... Your required product is Out of Stock !!")
break
else:
print("\n!! Invalid Amount of Quantity !!")
break
else:
flag = 1
if flag == 1:
print("\n!! Unavailable Product or Invalid Product !!")
# FUNCTION TO REMOVE A PRODUCT FROM THE BILL
def remove(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to remove !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele)
del self.prod_name[ind]
del self.quantity[ind]
del self.price[ind]
del self.total_price[ind]
print("\n>>>>>>>> Product is Removed from the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO DISPLAY CONTENTS OF THE BILL
def display(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to generate bill !!!\n")
else:
self.total_price = list(np.array(self.quantity)*np.array(self.price))
form = {'Product Name':self.prod_name,'Quantity':self.quantity,'Cost(1)':self.price,'Total Cost':self.total_price}
res = pd.DataFrame(form)
res.index=list(range(1,len(self.prod_name)+1))
print(res)
print("\n=============================================================\n")
print(f"Total Items : {len(self.prod_name)}")
print(f"Total Quantities : {sum(self.quantity)}")
print(f"Grand Total : Rs.{sum(self.total_price)}")
print("\n=============================================================\n")
# FUNCTION TO MODIFY A PRODUCT NAME OR QUANTITY IN THE BILL
def modify(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to modify !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele.upper())
key = int(input("\n Press 1 to modify the product name ..... \n\n Press 2 to modify the quantity .....\n\nYour Option : "))
if key == 1:
self.prod_name[ind] = input("\nEnter the new product name : ").upper()
elif key == 2:
self.quantity[ind] = int(input("\nEnter the new amount of quantity : "))
print("\n>>>>>>>> Updated the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO PERFORM THE POST PROCESSING ACTIVITIES ONCE THE BILL IS CONFIRMED
def postProcessor(self):
today = datetime.now()
frmt = today.strftime('%m-%Y')
inv_data = pd.read_csv('data/inventory.csv')
rev_data = pd.read_csv("data/revenue.csv")
for i in range(len(inv_data)):
for j in range(len(self.prod_name)):
if inv_data["Product_Name"][i] == self.prod_name[j]:
inv_data["Available_Stock"][i] -= self.quantity[j]
inv_data.to_csv('data/inventory.csv', index=False)
for i in range(len(rev_data)):
for j in range(len(self.prod_name)):
if rev_data["Product_Name"][i] == self.prod_name[j]:
rev_data[str(frmt)][i] += self.total_price[j]
rev_data.to_csv('data/revenue.csv', index=False)
self.ordered = True
print("\n\n\n -------- Updated the Inventory Data ! -------- \n")
#INDIVIDUAL FUNCTIONS USED IN REVENUE SUB MENU
month = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# FUNCTION TO VIEW THE REVENUE DATABASE
def viewRevenue():
rev_data = pd.read_csv('data/revenue.csv')
print("\n------------------------------------------ REVENUE DATABASE --------------------------------------------\n\n",rev_data.to_string(index=False))
# FUNCTION TO DISPLAY THE REVENUE GENERATED BY THIS MONTH
def viewMonthRevenue():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
month_revenue = sum(list(rev_data[frmt]))
print(f"\n The revenue generated in {month[int(frmt[:2])-1]} {int(frmt[-4:])} -- Rs.{month_revenue}")
else:
print("\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO DISPLAY THE MAXIMUM PROFIT GENERATED PRODUCTS
def maxProfit():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
if list(rev_data[frmt]) == [0 for i in range(len(rev_data))]:
today = datetime.now()
if frmt[:2] == today.strftime('%m'):
print(f"\n\n!! No Products are sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
print(f"\n\n!! No Products were sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
max_amt = max(list(rev_data[frmt]))
print(f"\n The following product(s) generated the maximum profit on {month[int(frmt[:2])-1]} {int(frmt[-4:])} : \n")
for i in range(len(rev_data)):
if rev_data[frmt][i] == max_amt:
print(" * {} - Rs.{}".format(rev_data["Product_Name"][i],max_amt))
else:
print("\n\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO DISPLAY THE MINIMUM PROFIT GENERATED PRODUCTS
def minProfit():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
if list(rev_data[frmt]) == [0 for i in range(len(rev_data))]:
today = datetime.now()
if frmt[:2] == today.strftime('%m'):
print(f"\n\n!! No Products are sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
print(f"\n\n!! No Products were sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
min_amt = min(list(rev_data[frmt]))
print(f"\n The following product(s) generated the least profit on {month[int(frmt[:2])-1]} {int(frmt[-4:])} : \n")
for i in range(len(rev_data)):
if rev_data[frmt][i] == min_amt:
print(" * {} - Rs.{}".format(rev_data["Product_Name"][i],min_amt))
else:
print("\n\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO VISUALIZE THE REVENUE GENERATED BY MONTHS THROUGH A GRAPH
def viewRevenueGraph():
rev_data = pd.read_csv('data/revenue.csv')
profits =[]
months = []
year = input("\nEnter the Year (YYYY) : ")
for i in rev_data.columns:
if year in i:
rev_data = pd.read_csv("data/revenue.csv")
for i in rev_data.columns:
if year in i:
months.append(month[int(i[:2])-1])
profits.append(sum(list(rev_data[i])))
plt.scatter(months, profits,color ='red',linewidths=3)
plt.plot(months,profits,color="blue")
plt.bar(months,profits,color="green",width = 0.2)
plt.xlabel("Month")
plt.ylabel("Revenue Generated (INR)")
plt.title("Revenue for the year {}".format(year))
plt.show()
flag = 1
break
elif year not in i:
flag = 0
if flag == 0:
print("\n\n!!!! Invalid Year or Non-Entried Year !!!!\n")
#INDIVIDUAL FUNCTIONS USED IN INVENTORY SUB MENU
# FUNCTION TO VIEW THE STOCK INVENTORY
def viewInventory():
inv_data = | pd.read_csv("data/inventory.csv") | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_Sumstat.ipynb (unless otherwise specified).
__all__ = ['p2z', 'Sumstat', 'read_sumstat', 'ss_2_vcf']
# Cell
import yaml
import numpy as np
import pandas as pd
from scipy.stats import norm
from .utils import *
# Cell
def p2z(pval,beta,twoside=True):
if twoside:
pval = pval/2
z=np.abs(norm.ppf(pval))
ind=beta<0
z[ind]=-z[ind]
return z
class Sumstat:
def __init__(self,sumstat_path,config_file=None,rename=True):
self.ss = self.read_sumstat(sumstat_path,config_file,rename)
def __repr__(self):
return "sumstat:% s" % (self.ss)
#functions to read sumstats
def read_sumstat(self,file, config_file,rename):
if config_file is not None:
config_file = yaml.safe_load(open(config_file, 'r'))
return read_sumstat(file,config_file,rename)
def extractbyregion(self,region):
sumstats = self.ss
idx = (sumstats.CHR == region[0]) & (sumstats.POS >= region[1]) & (sumstats.POS <= region[2])
print('this region',region,'has',sum(idx),'SNPs in Sumstat')
self.ss = sumstats[idx]
def extractbyvariants(self,variants,notin=False):
idx = self.ss.SNP.isin(variants)
if notin:
idx = idx == False
#update sumstats
self.ss = self.ss[idx]
def calculateZ(self):
self.ss['Z'] = list(p2z(self.ss.P,self.ss.BETA))
def match_ss(self,bim):
self.ss = check_ss1(self.ss,bim)
# Cell
def read_sumstat(file, config,rename=True):
try:
sumstats = pd.read_csv(file, compression='gzip', header=0, sep='\t', quotechar='"')
except:
sumstats = pd.read_csv(file, header=0, sep='\t', quotechar='"')
if config is not None:
try:
ID = config.pop('ID').split(',')
sumstats = sumstats.loc[:,list(config.values())]
sumstats.columns = list(config.keys())
sumstats.index = namebyordA0_A1(sumstats[ID],cols=ID)
except:
raise ValueError(f'According to config_file, input summary statistics should have the following columns: %s' % list(config.values()))
sumstats.columns = list(config.keys())
if rename:
sumstats.SNP = 'chr'+sumstats.CHR.astype(str).str.strip("chr") + ':' + sumstats.POS.astype(str) + '_' + sumstats.A0.astype(str) + '_' + sumstats.A1.astype(str)
sumstats.CHR = sumstats.CHR.astype(str).str.strip("chr").astype(int)
sumstats.POS = sumstats.POS.astype(int)
if "GENE" in sumstats.columns.values():
sumstats.index = namebyordA0_A1(sumstats[["GENE","CHR","POS","A0","A1"]],cols=["GENE","CHR","POS","A0","A1"])
else:
sumstats.index = namebyordA0_A1(sumstats[["CHR","POS","A0","A1"]],cols=["CHR","POS","A0","A1"])
return sumstats
# Cell
def ss_2_vcf(ss_df,name = "name"):
## Geno field
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from openpyxl import Workbook
import cx_Oracle
import sys
from sqlalchemy import create_engine
from PyQt6 import QtCore, QtGui, QtWidgets
import ctypes
import time
import threading
import qdarktheme
import cgitb
cgitb.enable(format = 'text')
dsn_tns = cx_Oracle.makedsn('ip-banco-oracle', 'porta', service_name='nomedoservico')
conn = cx_Oracle.connect(user=r'usuario', password='<PASSWORD>', dsn=dsn_tns)
c = conn.cursor()
engine = create_engine('sqlite://', echo=False)
class Ui_ConferenciadeNotas(object):
def setupUi(self, ConferenciadeNotas):
ConferenciadeNotas.setObjectName("ConferenciadeNotas")
ConferenciadeNotas.resize(868, 650)
ConferenciadeNotas.setWindowIcon(QtGui.QIcon("icone.ico"))
self.localArquivo = QtWidgets.QTextEdit(ConferenciadeNotas)
self.localArquivo.setGeometry(QtCore.QRect(100, 60, 590, 30))
self.localArquivo.setObjectName("localArquivo")
self.label = QtWidgets.QLabel(ConferenciadeNotas)
self.label.setGeometry(QtCore.QRect(0, 0, 870, 40))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(18)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(ConferenciadeNotas)
self.label_2.setGeometry(QtCore.QRect(10, 60, 90, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_2.setObjectName("label_2")
self.localizarArquivoBT = QtWidgets.QPushButton(ConferenciadeNotas)
self.localizarArquivoBT.setGeometry(QtCore.QRect(700, 60, 160, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
self.localizarArquivoBT.setFont(font)
self.localizarArquivoBT.setObjectName("localizarArquivoBT")
self.localizarArquivoBT.clicked.connect(self.locArquivo)
self.conferidoFiliais = QtWidgets.QTableWidget(ConferenciadeNotas)
self.conferidoFiliais.setGeometry(QtCore.QRect(20, 130, 180, 440))
font = QtGui.QFont()
font.setFamily("Century Gothic")
self.conferidoFiliais.setFont(font)
self.conferidoFiliais.setRowCount(16)
self.conferidoFiliais.setObjectName("conferidoFiliais")
self.conferidoFiliais.setColumnCount(3)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(12, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(13, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(14, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setVerticalHeaderItem(15, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.conferidoFiliais.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setKerning(True)
item.setFont(font)
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(4, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(4, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(5, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(5, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(5, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(6, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(6, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(6, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(7, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(7, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(7, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(8, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(8, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(8, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(9, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(9, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(9, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(10, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(10, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(10, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(11, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(11, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(11, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(12, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(12, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(12, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(13, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(13, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(13, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(14, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(14, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(14, 2, item)
item = QtWidgets.QTableWidgetItem()
self.conferidoFiliais.setItem(15, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(15, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.conferidoFiliais.setItem(15, 2, item)
self.conferidoFiliais.horizontalHeader().setDefaultSectionSize(50)
self.conferidoFiliais.horizontalHeader().setMinimumSectionSize(50)
self.conferidoFiliais.verticalHeader().setDefaultSectionSize(23)
self.conferidoFiliais.verticalHeader().setMinimumSectionSize(23)
self.nfsComErro = QtWidgets.QTableWidget(ConferenciadeNotas)
self.nfsComErro.setGeometry(QtCore.QRect(200, 130, 651, 440))
font = QtGui.QFont()
font.setFamily("Century Gothic")
self.nfsComErro.setFont(font)
#self.nfsComErro.setRowCount(100)
self.nfsComErro.setObjectName("nfsComErro")
self.nfsComErro.setColumnCount(6)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.nfsComErro.setHorizontalHeaderItem(5, item)
self.nfsComErro.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.ExtendedSelection)
self.nfsComErro.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectionBehavior.SelectItems)
self.label_3 = QtWidgets.QLabel(ConferenciadeNotas)
self.label_3.setGeometry(QtCore.QRect(0, 100, 870, 20))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_3.setObjectName("label_3")
self.exportResult = QtWidgets.QPushButton(ConferenciadeNotas)
self.exportResult.setGeometry(QtCore.QRect(703, 600, 150, 30))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
self.exportResult.setFont(font)
self.exportResult.setObjectName("exportResult")
self.exportResult.setText('Exportar')
self.exportResult.clicked.connect(self.exportExcel)
self.retranslateUi(ConferenciadeNotas)
QtCore.QMetaObject.connectSlotsByName(ConferenciadeNotas)
self.rows = 0
self.conferidoFiliais.horizontalHeader().setStretchLastSection(True)
self.nfsComErro.horizontalHeader().setStretchLastSection(True)
self.conferidoFiliais.horizontalHeader().setStyleSheet(""" QHeaderView::section {padding-left: 2;
padding-right: -10;
}""")
self.nfsComErro.horizontalHeader().setStyleSheet(""" QHeaderView::section {padding-left: 2;
padding-right: -10;
}""")
def retranslateUi(self, ConferenciadeNotas):
_translate = QtCore.QCoreApplication.translate
ConferenciadeNotas.setWindowTitle(_translate("ConferenciadeNotas", "Conferência de Notas CIGAMxSEFAZ"))
self.label.setText(_translate("ConferenciadeNotas", "Conferência de Notas CIGAM x SEFAZ"))
self.label_2.setText(_translate("ConferenciadeNotas", "Arquivo:"))
self.localizarArquivoBT.setText(_translate("ConferenciadeNotas", "Localizar Arquivo"))
item = self.conferidoFiliais.verticalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(3)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(4)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(5)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(6)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(7)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(8)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(9)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(10)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(11)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(12)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(13)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(14)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.verticalHeaderItem(15)
item.setText(_translate("ConferenciadeNotas", " "))
item = self.conferidoFiliais.horizontalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", "UN"))
item = self.conferidoFiliais.horizontalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", "NFE"))
item = self.conferidoFiliais.horizontalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", "NFCE"))
__sortingEnabled = self.conferidoFiliais.isSortingEnabled()
self.conferidoFiliais.setSortingEnabled(False)
item = self.conferidoFiliais.item(0, 0)
item.setText(_translate("ConferenciadeNotas", "001"))
item = self.conferidoFiliais.item(1, 0)
item.setText(_translate("ConferenciadeNotas", "002"))
item = self.conferidoFiliais.item(2, 0)
item.setText(_translate("ConferenciadeNotas", "003"))
item = self.conferidoFiliais.item(3, 0)
item.setText(_translate("ConferenciadeNotas", "004"))
item = self.conferidoFiliais.item(4, 0)
item.setText(_translate("ConferenciadeNotas", "005"))
item = self.conferidoFiliais.item(5, 0)
item.setText(_translate("ConferenciadeNotas", "006"))
item = self.conferidoFiliais.item(6, 0)
item.setText(_translate("ConferenciadeNotas", "007"))
item = self.conferidoFiliais.item(7, 0)
item.setText(_translate("ConferenciadeNotas", "008"))
item = self.conferidoFiliais.item(8, 0)
item.setText(_translate("ConferenciadeNotas", "009"))
item = self.conferidoFiliais.item(9, 0)
item.setText(_translate("ConferenciadeNotas", "010"))
item = self.conferidoFiliais.item(10, 0)
item.setText(_translate("ConferenciadeNotas", "011"))
item = self.conferidoFiliais.item(11, 0)
item.setText(_translate("ConferenciadeNotas", "013"))
item = self.conferidoFiliais.item(12, 0)
item.setText(_translate("ConferenciadeNotas", "014"))
item = self.conferidoFiliais.item(13, 0)
item.setText(_translate("ConferenciadeNotas", "016"))
item = self.conferidoFiliais.item(14, 0)
item.setText(_translate("ConferenciadeNotas", "100"))
item = self.conferidoFiliais.item(15, 0)
item.setText(_translate("ConferenciadeNotas", "200"))
self.conferidoFiliais.setSortingEnabled(__sortingEnabled)
item = self.nfsComErro.horizontalHeaderItem(0)
item.setText(_translate("ConferenciadeNotas", "UN"))
item = self.nfsComErro.horizontalHeaderItem(1)
item.setText(_translate("ConferenciadeNotas", "SERIE"))
item = self.nfsComErro.horizontalHeaderItem(2)
item.setText(_translate("ConferenciadeNotas", "NOTA"))
item = self.nfsComErro.horizontalHeaderItem(3)
item.setText(_translate("ConferenciadeNotas", "DATA"))
item = self.nfsComErro.horizontalHeaderItem(4)
item.setText(_translate("ConferenciadeNotas", "SITUACAO"))
item = self.nfsComErro.horizontalHeaderItem(5)
item.setText(_translate("ConferenciadeNotas", "TEM"))
self.label_3.setText(_translate("ConferenciadeNotas", "Unidade: Série: Data: até "))
def locArquivo(self):
arquivoLocal = QtWidgets.QFileDialog.getOpenFileNames(filter='*.xls')[0]
if (arquivoLocal == []):
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo não localizado ou invalido!', 0)
for files in arquivoLocal:
self.localArquivo.setText(' ')
self.localArquivo.setText(files)
self.file = files
df = pd.read_excel(self.file, skiprows=lambda x: x not in list(range(6, 9999)))
sqlSerie = " SELECT DISTINCT(A.SERIE) FROM (select CASE WHEN [SÉRIE] = '3' THEN 'NFE' WHEN [SÉRIE] = '7' THEN 'NFCE' WHEN [SÉRIE] = '8' THEN '2NFCE' ELSE 'NFCE' END AS SERIE \
FROM NFSEFAZ) A "
try:
df.to_sql('NFSEFAZ', engine, if_exists='replace', index=False)
except:
pass
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo '+ self.file + ' invalido, favor verificar!', 0)
try:
serieDf = engine.execute(sqlSerie)
except:
pass
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
Mbox('Erro arquivo', 'Arquivo '+ self.file + ' invalido, favor verificar!', 0)
serieFim = pd.DataFrame(serieDf, columns=['SERIE'])
self.serieTxt = serieFim.iloc[0]['SERIE']
try:
self.serieTxt2 = serieFim.iloc[1]['SERIE']
except:
pass
self.serieTxt2 = serieFim.iloc[0]['SERIE']
if(self.serieTxt in ['NFCE','2NFCE']):
file = self.file
dff = pd.read_excel(file, skiprows=lambda x: x not in list(range(0, 6)))
dff.to_sql('NFCESEFAZ', engine, if_exists='replace', index=False)
ie_un = engine.execute('SELECT REPLACE(SUBSTR("SECRETARIA DE ESTADO DE FAZENDA",21,10),"-","") FROM NFCESEFAZ WHERE "SECRETARIA DE ESTADO DE FAZENDA" LIKE "%INSCRIÇÃO ESTADUAL%"')
ie_un = ie_un.first()[0]
df = pd.read_excel(file, skiprows=lambda x: x not in list(range(6, 9999)))
sqlsefaz = (" select CASE WHEN {} = 130241750 THEN '001' \
WHEN {} = 131817086 THEN '002'\
WHEN {} = 131838245 THEN '003'\
WHEN {} = 131875523 THEN '004'\
WHEN {} = 131980203 THEN '005'\
WHEN {} = 132009412 THEN '006'\
WHEN {} = 132894939 THEN '007'\
WHEN {} = 132702371 THEN '008'\
WHEN {} = 133644065 THEN '009'\
WHEN {} = 131537326 THEN '010'\
WHEN {} = 133446565 THEN '011'\
WHEN {} = 132124726 THEN '013'\
WHEN {} = 133779416 THEN '014'\
WHEN {} = 133830900 THEN '016'\
WHEN {} = 133762033 THEN '100'\
WHEN {} = 131847031 THEN '200' ELSE {} END AS UN,\
CASE WHEN [SÉRIE] = '3' THEN 'NFE' WHEN [SÉRIE] = '7' THEN 'NFCE' WHEN [SÉRIE] = '8' THEN '2NFCE' ELSE 'NFCE' END AS SERIE,\
[NUMERO NOTA FISCAL] as NF, SUBSTR([DATA EMISSÃO],0,11) as DT_NF, \
CASE WHEN upper([SITUAÇÃO]) = 'CANCELADA FORA DO PRAZO' THEN 'CANCELADA' \
WHEN upper([SITUAÇÃO]) = 'AUTORIZADA FORA PRAZO' THEN 'AUTORIZADA' ELSE upper([SITUAÇÃO]) END AS SITUACAO\
FROM NFSEFAZ ").format(ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un,
ie_un, ie_un, ie_un, ie_un, ie_un, ie_un, ie_un)
df.to_sql('NFSEFAZ', engine, if_exists='replace', index=False)
results = engine.execute(sqlsefaz)
final = | pd.DataFrame(results, columns=['UN', 'SERIE', 'NF', 'DT_NF', 'SITUACAO']) | pandas.DataFrame |
import argparse
import numpy as np
import tensorflow as tf
import time
import pickle
import pandas as pd
import os
import math
import maddpg.common.tf_util as U
from maddpg.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=100, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=200000, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=2, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="ddpg", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=64, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default="PlaceHolder", help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="./save_files/", help="directory in which training state and "
"model should be saved")
parser.add_argument("--save-rate", type=int, default=1000, help="save model once every time this many episodes " \
"are "
"completed")
parser.add_argument("--load-dir", type=str, default="", help="directory in which training state and model are "
"loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--benchmark", action="store_true", default=False)
parser.add_argument("--benchmark-iters", type=int, default=100000, help="number of iterations run for benchmarking")
parser.add_argument("--benchmark-dir", type=str, default="./benchmark_files/", help="directory where benchmark data"
" is saved")
parser.add_argument("--plots-dir", type=str, default="./save_files/", help="directory where plot data is saved")
#Newly added arguments
parser.add_argument("--load", action="store_true", default=False) #only load if this is true. So we can display without loading
parser.add_argument("--load_episode",type = int, default=0)
parser.add_argument("--layout", type=str, default="smallClassic") #decide the layout to train
parser.add_argument("--pacman_obs_type", type=str, default="partial_obs") # pacman: full_obs or partial_obs
parser.add_argument("--ghost_obs_type", type=str, default="full_obs") # ghost: full_obs or partial_obs
parser.add_argument("--partial_obs_range", type=int, default=3) # 3x3,5x5,7x7 ...
parser.add_argument("--shared_obs", action="store_true", default= False) # pacman and ghost same observation?
parser.add_argument("--timeStepObs", action="store_true", default= False) # DEPRECATED
parser.add_argument("--astarSearch", action="store_true", default= False) # Do we want negative reward for dist
parser.add_argument("--astarAlpha", type=int, default= 1) # How much do we penalize them
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
n = int(input.shape[1])
m = num_outputs
first_layer = int((math.sqrt((m+2)*n)) + 2*(math.sqrt(n/(m+2))))
second_layer = int(m*(math.sqrt(n/(m+2))))
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=max(num_units,first_layer), activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=max(num_units,second_layer), activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def make_env(scenario_name, arglist, benchmark=False):
from pacman.gym_pacman.envs.pacman_env import PacmanEnv
env = PacmanEnv(arglist.display,
arglist.num_adversaries,
arglist.max_episode_len,
arglist.layout, # for random, put string "random"
arglist.pacman_obs_type,
arglist.ghost_obs_type,
arglist.partial_obs_range,
arglist.shared_obs,
arglist.timeStepObs,
arglist.astarSearch,
arglist.astarAlpha)
env.seed(1)
return env
def get_trainers(env, num_adversaries, obs_shape_n, arglist):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
print("obs_shape_n", obs_shape_n)
print("action_space", env.action_space)
for i in range(1): # Pac-Man
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy=='ddpg')))
for i in range(1, env.n): # Ghosts
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.adv_policy=='ddpg')))
return trainers
def train(arglist):
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
with U.single_threaded_session():
# Create environment
env = make_env(arglist.scenario, arglist, arglist.benchmark)
obs_n = env.reset() # so that env.observation_space is initialized so trainers can be initialized
# Create agent trainers
num_adversaries = arglist.num_adversaries
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
print("env.observation_space:", env.observation_space)
print("num adversaries: ", num_adversaries, ", env.n (num agents): ", env.n)
#need to ensure that the trainer is in correct order. pacman in front
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir + ("{}".format(arglist.load_episode))
if arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
if arglist.display and arglist.load:
print('Loading previous state...')
U.load_state(arglist.load_dir)
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [[] for i in range(env.n)] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver(max_to_keep=None)
episode_step = 0
train_step = 0
total_win =[0]
final_win = []
total_lose = [0]
final_lose = []
t_start = time.time()
loss_list ={}
for i in range(env.n):
loss_list[i] = [[] for i in range(6)]
print('Starting iterations...')
while True:
# get action
action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]
# environment step
new_obs_n, rew_n, done, info_n ,win , lose = env.step(action_n)
episode_step += 1
terminal = (episode_step >= arglist.max_episode_len)
# print("obs_n", obs_n)
# print("new_obs_n", new_obs_n)
#print("action_n", action_n)
# print("rew_n",episode_step, rew_n)
# print("done", done)
# print("terminal", terminal)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done, terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
if done or terminal:
if arglist.display:
env.render()
obs_n = env.reset()
episode_step = 0
if win:
total_win[-1] += 1
if lose:
total_lose[-1] += 1
total_win.append(0)
total_lose.append(0)
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
# increment global step counter
train_step += 1
# if train_step % 1000 == 0:
# print(train_step)
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
time.sleep(0.1)
env.render()
continue
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for ind, agent in enumerate(trainers):
loss = agent.update(trainers, train_step)
if train_step%10000 == 0 and loss != None:
for i in range(len(loss)):
loss_list[ind][i].append(loss[i])
# save model, display training output
if (terminal or done) and (len(episode_rewards) % arglist.save_rate == 0):
saving = arglist.save_dir + ("{}".format(0 + len(episode_rewards))) #TODO why append this
U.save_state(saving, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, number of wins {}, number of lose {}, "
"time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards],np.sum(total_win[-arglist.save_rate:]),np.sum(total_lose[-arglist.save_rate:]), round(time.time()-t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
final_win.append(np.sum(total_win[-arglist.save_rate:]))
final_lose.append(np.sum(total_lose[-arglist.save_rate:]))
ep_reward_df = pd.DataFrame(final_ep_rewards)
ep_ag_reward_df = pd.DataFrame(final_ep_ag_rewards)
win_df = pd.DataFrame(final_win)
lose_df = | pd.DataFrame(final_lose) | pandas.DataFrame |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[ | notna(x.array) | pandas.core.dtypes.missing.notna |
import pandas as pd
import numpy as np
import sys
import logging
import os
from pathlib import Path
class MSRawData:
"""
A class to describe raw data obtained from the MS machine
Args:
filePath (str): file path of the input MRM transition name file
logger (object): logger object created by start_logger in MSOrganiser
ingui (bool): if True, print analysis status to screen
"""
def __init__(self, filepath, logger=None, ingui=True):
self.__logger = logger
self.__ingui = ingui
self.__filecheck(Path(filepath))
def __filecheck(self,filepath):
"""Check if filepath exists and is a file"""
if not filepath.exists():
if self.__logger:
self.__logger.error('Input file path ' + '\'' + str(filepath) + '\'' +
' could not be found. ' +
'Please check if the input file path.')
if self.__ingui:
print('Input file path ' + '\'' + str(filepath) + '\'' +
' could not be found. ' +
'Please check if the input file path.',
flush = True)
sys.exit(-1)
elif not filepath.is_file():
if self.__logger:
self.__logger.error('Input file path ' + '\'' + str(filepath) + '\'' +
' does not lead to a system file. ' +
'Please check if the input file path is a system file and not a folder.')
if self.__ingui:
print('Input file path ' + '\'' + str(filepath) + '\'' +
' does not lead to a system file. ' +
'Please check if the input file path is a system file and not a folder.',
flush = True)
sys.exit(-1)
def remove_whiteSpaces(self,df):
"""Strip the whitespaces for each string columns of a df
Args:
df (pandas DataFrame): A panda data frame
Returns:
df (pandas DataFrame): A panda data frame with white space removed
"""
df[df.select_dtypes(['object']).columns] = df.select_dtypes(['object']).apply(lambda x: x.str.strip())
return df
class AgilentMSRawData(MSRawData):
"""
To describe raw data obtained from the Agilent MS machine
Args:
filePath (str): file path of the input MRM transition name file
logger (object): logger object created by start_logger in MSOrganiser
ingui (bool): if True, print analysis status to screen
"""
def __init__(self, filepath, logger=None, ingui=True):
MSRawData.__init__(self,filepath, ingui = ingui,logger=logger)
self.__logger = logger
self.__ingui = ingui
self.__readfile(filepath)
self.__getdataform(filepath)
self.__filename = os.path.basename(filepath)
self.VALID_COMPOUND_RESULTS = ('Area','RT','FWHM','S/N','Symmetry')
self.VALID_COMPOUND_METHODS = ('Precursor Ion','Product Ion')
def get_table(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data
Args:
column_name (str): The name of the column given in the Output_Options.
Returns:
A data frame of sample as rows and transition names as columns with values from the chosen column name
"""
if self.DataForm == "WideTableForm":
return self.__get_table_wide(column_name,is_numeric)
elif self.DataForm == "CompoundTableForm":
return self.__get_table_compound(column_name,is_numeric)
#def get_data_file_name(self):
# """Function to get the list of sample names in a form of a dataframe
#
# Returns:
# A data frame of sample as rows and transition names as columns with values from the chosen column name
#
# """
# if self.DataForm == "WideTableForm":
# return self.__get_data_file_name_wide()
# elif self.DataForm == "CompoundTableForm":
# return self.__get_data_file_name_compound()
def __get_table_wide(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data in Wide Table form"""
# Get the data file name and give error when it cannot be found
DataFileName_df = self.__get_data_file_name_wide()
# Check if Column name comes from Results or Methods group
if column_name in self.VALID_COMPOUND_RESULTS:
column_group = "Results"
elif column_name in self.VALID_COMPOUND_METHODS:
column_group = "Method"
else:
if self.__logger:
self.__logger.error('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.')
if self.__ingui:
print('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.',
flush=True)
sys.exit(-1)
# Extract the data with the given column name and group
table_index = self.RawData.iloc[0,:].str.contains(column_group) & self.RawData.iloc[1,:].str.contains(column_name)
table_df = self.RawData.loc[:,table_index].copy()
if table_df.empty:
return table_df
# Remove the column group text and whitespaces
table_df.iloc[0,:] = table_df.iloc[0,:].str.replace(column_group, "").str.strip()
# Assign column name
colnames = table_df.iloc[0,:].astype('str').str.strip()
table_df.columns = colnames
# We remove the first and second row because the column names are given
table_df = table_df.iloc[2:]
# Reset the row index
table_df = table_df.reset_index(drop=True)
# Convert text numbers into numeric
if is_numeric:
table_df = table_df.apply(pd.to_numeric, errors='coerce')
table_df = pd.concat([DataFileName_df, table_df], axis=1)
# Strip the whitespaces for each string columns
table_df = self.remove_whiteSpaces(table_df)
return table_df
def __get_table_compound(self,column_name,is_numeric=True):
"""Function to get the table from MassHunter Raw Data in Compound Table form"""
# Get the data file name and give error when it cannot be found
DataFileName_df = self.__get_data_file_name_compound()
# Check if Column name comes from Results or Methods group
# TODO try to extract data from VALID_COMPOUND_METHODS
if column_name in self.VALID_COMPOUND_RESULTS:
column_group = "Results"
elif column_name in self.VALID_COMPOUND_METHODS:
column_group = "Method"
else:
if self.__logger:
self.__logger.error('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.')
if self.__ingui:
print('Output option ' + column_name + ' ' +
'is not a valid column in MassHunter or not ' +
'available as a valid output for this program.',
flush=True)
sys.exit(-1)
# Get the compound table df and give error when it cannot be found
# CompoundName_df = self.__get_compound_name_compound(column_name)
table_df = self.__get_compound_name_compound(column_name)
if table_df.empty:
return table_df
table_df = table_df.transpose()
# Assign column name
colnames = table_df.iloc[0,:].astype('str').str.strip()
table_df.columns = colnames
# We remove the first row because the column names are given
table_df = table_df.iloc[1:]
# If column name is a compound method, only the first row has data, we need to replicate data for all the rows
if column_name in self.VALID_COMPOUND_METHODS:
table_df = pd.concat([table_df]*DataFileName_df.shape[0], ignore_index=True)
# Reset the row index
table_df = table_df.reset_index(drop=True)
# Convert text numbers into numeric
if is_numeric:
table_df = table_df.apply(pd.to_numeric, errors='coerce')
table_df = pd.concat([DataFileName_df, table_df], axis=1)
# Strip the whitespaces for each string columns
table_df = self.remove_whiteSpaces(table_df)
return table_df
def __get_compound_name_compound(self,column_name):
"""Function to get the df Transition Name as Rows, Sample Name as Columns with values from the chosen column_name. E.g Area """
# Get the column index of where the Transition Names are. We know for sure that it is on the third row
Compound_Col = self.RawData.iloc[0,:].str.contains("Compound Method") & self.RawData.iloc[1,:].str.contains("Name")
Compound_Col_Index = Compound_Col.index[Compound_Col == True].tolist()
# Give an error if we can't get any transition name
if len(Compound_Col_Index) == 0 :
if self.__logger:
self.__logger.error('\'' + self.__filename + '\' ' +
'has no column contaning \"Name\" in Compound Method Table. ' +
'Please check the input file.')
if self.__ingui:
print('\'' + self.__filename + '\' ' +
'has no column contaning \"Name\" in Compound Method Table. ' +
'Please check the input file.',
flush=True)
sys.exit(-1)
# Find cols with Transition in second row and Qualifier Method in the first row
Qualifier_Method_Col = self.RawData.iloc[0,:].str.contains("Qualifier \d Method", regex=True) & self.RawData.iloc[1,:].str.contains("Transition")
# Get the column index where the group of Qualifier Method first appeared.
Qualifier_Method_Col_Index = Qualifier_Method_Col.index[Qualifier_Method_Col == True].tolist()
# Find cols with Data File in the second row
DataFileName_Col = self.RawData.iloc[1,:].str.contains("Data File")
# Find the number of Qualifiers each Transition is entitled to have
No_of_Qual_per_Transition = int((Qualifier_Method_Col.values == True).sum() / (DataFileName_Col.values == True).sum() )
# We start a new Compound_list
Compound_list = []
# We start on row three
self.RawData.iloc[2:,sorted(Compound_Col_Index + Qualifier_Method_Col_Index[0:No_of_Qual_per_Transition] )].apply(lambda x: AgilentMSRawData._get_Compound_List(x=x,
Compound_list=Compound_list),
axis=1)
CompoundName_df = pd.DataFrame(Compound_list)
CompoundName_df = self.remove_whiteSpaces(CompoundName_df)
# All Column Name (e.g Area) and Transition index
ColName_Col = self.RawData.iloc[1,:].str.contains(column_name) | self.RawData.iloc[1,:].str.contains("Transition")
ColName_Col_Index = ColName_Col.index[ColName_Col == True].tolist()
# Transition from Compound Method (They should not be used to get the Qualifer Area)
CpdMethod_Transition_Col = self.RawData.iloc[0,:].str.contains("Compound Method") & self.RawData.iloc[1,:].str.contains("Transition")
CpdMethod_Transition_Col_Index = CpdMethod_Transition_Col.index[CpdMethod_Transition_Col == True].tolist()
# Column Name (e.g Area) found for the Qualifier
ColName_Qualifier_Col = self.RawData.iloc[0,:].str.contains("Qualifier \d Results", regex=True) & self.RawData.iloc[1,:].str.contains(column_name)
ColName_Qualifier_Col_Index = ColName_Qualifier_Col.index[ColName_Qualifier_Col == True].tolist()
# Column Name (e.g Area), found for the Transitions
ColName_Compound_Col_Index = [x for x in ColName_Col_Index if x not in sorted(CpdMethod_Transition_Col_Index + Qualifier_Method_Col_Index + ColName_Qualifier_Col_Index, key = int)]
table_list = []
# We start on row three, update the table list with the column_name
self.RawData.iloc[2:,sorted(ColName_Col_Index, key=int)].apply(lambda x: AgilentMSRawData._get_Compound_Data(x=x,
table_list=table_list,
ColName_Compound_Col_Index = ColName_Compound_Col_Index,
Qualifier_Method_Col_Index = Qualifier_Method_Col_Index,
ColName_Qualifier_Col_Index = ColName_Qualifier_Col_Index,
No_of_Qual_per_Transition = No_of_Qual_per_Transition)
,axis=1)
if pd.DataFrame(table_list).empty:
return(pd.DataFrame(table_list))
else:
# TODO
# Check if the number of rows in the table_list of values,
# matches the number of rows (Transition and Qualifier Names) in the CompoundName_df
# If not, give an error of a possible corrupted csv input.
#print(len(CompoundName_df.index))
#print(len(pd.DataFrame(table_list)))
return(pd.concat([CompoundName_df, pd.DataFrame(table_list) ], axis=1))
return(pd.DataFrame())
def _get_Compound_Data(x,table_list,ColName_Compound_Col_Index,Qualifier_Method_Col_Index,ColName_Qualifier_Col_Index,No_of_Qual_per_Transition):
"""Function to get the values from the chosen column_name. E.g(Area) from a given row from the raw MRM data from Agilent in Compound Table form. table_list will be updated"""
#Get Compound row
Compound_df = pd.DataFrame(x[x.index.isin(ColName_Compound_Col_Index)])
Compound_df = Compound_df.T.values.tolist()
#Append to table_list
table_list.extend(Compound_df)
#Get Qualifier row
for i in range(0,No_of_Qual_per_Transition):
#Check if there is a transition
#print([Qualifier_Method_Col_Index[i]])
#print(x[ x.index.isin([Qualifier_Method_Col_Index[i]])].values.tolist()[0])
#sys.exit(0)
Transition = x[ x.index.isin([Qualifier_Method_Col_Index[i]])].values.tolist()[0]
if(pd.isna(Transition)):
break
else:
#When there is a transition, we need to collect a subset of ColName_Qualifier_Col_Index that correspond to this transition
#ColName_Qualifier_Col_Index_subset = [ColName_Qualifier_Col_Index[index] for index in range(0+i,int(len(ColName_Qualifier_Col_Index)/No_of_Qual_per_Transition),No_of_Qual_per_Transition)]
ColName_Qualifier_Col_Index_subset = [ColName_Qualifier_Col_Index[index] for index in range(0+i,int(len(ColName_Qualifier_Col_Index)),No_of_Qual_per_Transition)]
Qualifier_df = pd.DataFrame(x[x.index.isin(ColName_Qualifier_Col_Index_subset)])
Qualifier_df = Qualifier_df.T.values.tolist()
#Append to table_list
table_list.extend(Qualifier_df)
#if i == 0:
# print(Transition)
# print(Qualifier_df)
# sys.exit(0)
def _get_Compound_List(x,Compound_list):
"""Function to get the Transition Names and Qualifiers from a given row from the raw MRM data from Agilent in Compound Table form. Compound_list will be updated"""
#x is a series
#Remove NA if any
s = x.dropna().copy()
#Update the Qualifer name if there is any
s[s.str.contains("->")] = "Qualifier (" + s[s.str.contains("->")].values + ")"
Compound_list.extend(s.values.tolist())
def __get_data_file_name_wide(self):
"""Function to get the list of sample names from MassHunter Raw Data in Wide Table form"""
DataFileName_Col = self.RawData.iloc[0,:].str.contains("Sample") & self.RawData.iloc[1,:].str.contains("Data File")
DataFileName_df = self.RawData.loc[:,DataFileName_Col].copy()
if DataFileName_df.empty:
if self.__logger:
self.__logger.error('\'' + self.__filename + '\' ' +
'has no column containing \"Data File\". ' +
'Please check the input file.')
if self.__ingui:
print('\'' + self.__filename + '\' ' +
'has no column containing \"Data File\". ' +
'Please check the input file.',
flush=True)
sys.exit(-1)
#We standardise the name to Sample_Name
colnames = ["Sample_Name"]
DataFileName_df.columns = colnames
#We remove the first and second row because the column names are given
DataFileName_df = DataFileName_df.iloc[2:]
#Reset the row index
DataFileName_df = DataFileName_df.reset_index(drop=True)
#Strip the whitespaces for each string columns
DataFileName_df = self.remove_whiteSpaces(DataFileName_df)
#Remove the .d extention for Agilent Files
DataFileName_df["Sample_Name"] = DataFileName_df["Sample_Name"].replace('.d$','',regex=True)
return DataFileName_df
def __get_data_file_name_compound(self):
"""Function to get the list of sample names from MassHunter Raw Data in Compound Table form"""
DataFileName_Col = self.RawData.iloc[1,:].str.contains("Data File")
#We take the copy of the original dataframe, convert the Series output into a Dataframe
DataFileName_df = self.RawData.loc[2,DataFileName_Col].copy().to_frame()
if DataFileName_df.empty:
if self.__logger:
self.__logger.error('\'' + self.__filename + '\' ' +
'has no column containing \"Data File\". ' +
'Please check the input file.')
if self.__ingui:
print('\'' + self.__filename + '\' ' +
'has no column containing \"Data File\". ' +
'Please check the input file.',
flush=True)
sys.exit(-1)
#We standardise the name to Sample Name
colnames = ["Sample_Name"]
DataFileName_df.columns = colnames
#Reset the row index
DataFileName_df = DataFileName_df.reset_index(drop=True)
#Strip the whitespaces for each string columns
DataFileName_df = self.remove_whiteSpaces(DataFileName_df)
#Remove the .d extention for Agilent Files
DataFileName_df["Sample_Name"] = DataFileName_df["Sample_Name"].replace('.d$','',regex=True)
return DataFileName_df
def __readfile(self,filepath):
"""Function to read the input file"""
# Check if input is blank/None
if not filepath:
if self.__logger:
self.__logger.error('%s is empty. Please give an input file.', str(filepath))
if self.__ingui:
print(str(filepath) + ' is empty. Please give an input file.',flush=True)
sys.exit(-1)
# Check if the file exists for reading
if not os.path.isfile(filepath):
if self.__logger:
self.__logger.error('%s does not exists. Please check the input file.',str(filepath))
if self.__ingui:
print(str(filepath) + ' does not exists. Please check the input file.',flush=True)
sys.exit(-1)
#self.RawData = pd.read_csv(filepath, header=None,low_memory=False,encoding = "ISO-8859-1")
all_encoders_fail = True
for encoder in ["ANSI","ISO-8859-1","utf-8"]:
try:
self.RawData = pd.read_csv(filepath, header=None,low_memory=False, encoding = encoder)
all_encoders_fail = False
except UnicodeDecodeError:
if self.__logger:
self.__logger.warning('Warning: Unable to read csv file using the %s encoder',encoder)
continue
except pd.errors.EmptyDataError:
# If the file has no content
if self.__logger:
self.__logger.error(str(filepath) + ' is an empty file. Please check the input file.')
if self.__ingui:
print(str(filepath) + ' is an empty file. Please check the input file.',flush=True)
sys.exit(-1)
if all_encoders_fail:
if self.__logger:
self.__logger.error('Unable to read csv file with the available encoders')
if self.__ingui:
print('Unable to read csv file with the available encoders',flush=True)
sys.exit(-1)
# On the first row, fill empty cells forward
self.RawData.iloc[0,:] = self.RawData.iloc[0,:].fillna(method='ffill')
def __getdataform(self,filepath):
"""Function to get the Masshunter data form"""
if | pd.isna(self.RawData.iloc[0,0]) | pandas.isna |
from PIL import ImageGrab
import win32gui
from sentiment import face_sentiment
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import tkinter as tk
import tkinter.ttk as ttk
import sys
class GroupSentiment(tk.Frame):
def __init__(self, master=None):
self.toplist, self.winlist = [], []
tk.Frame.__init__(self,master)
self.createWidgets()
def enum_cb(self, hwnd, results):
self.winlist.append((hwnd, win32gui.GetWindowText(hwnd)))
def take_screenshot(self):
win32gui.EnumWindows(self.enum_cb, self.toplist)
zoom = [(hwnd, title) for hwnd, title in self.winlist if 'Zoom Meeting' in title]
# just grab the hwnd for first window matching firefox
if not zoom:
print('Zoom Meeting not found')
return None
zoom = zoom[0]
hwnd = zoom[0]
win32gui.SetForegroundWindow(hwnd)
bbox = win32gui.GetWindowRect(hwnd)
img = ImageGrab.grab(bbox)
img = np.array(img)
return img
def createWidgets(self):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
# bar1.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.get_tk_widget().grid(row=0, column=1)
canvas.draw()
self.plotbutton = tk.Button(master=root, text="plot", command=lambda: self.plot(canvas, ax))
self.plotbutton.grid(row=0, column=0)
while True:
self.plot(canvas, ax)
def plot(self, canvas, ax):
print('Starting')
screenshot = self.take_screenshot()
if screenshot is not None:
emotions = face_sentiment(screenshot)
print(emotions)
data = {'Emotions': ['anger', 'joy', 'surprise', 'sorrow'],
'Values': emotions
}
df = | pd.DataFrame(data, columns=['Emotions', 'Values']) | pandas.DataFrame |
from sklearn.base import TransformerMixin, BaseEstimator
import pandas as pd
class BaseTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **transform_params):
return self
class ColumnSelector(BaseTransformer):
"""Selects columns from Pandas Dataframe"""
def __init__(self, columns, c_type=None):
self.columns = columns
self.c_type = c_type
def transform(self, X, **transform_params):
cs = X[self.columns]
if self.c_type is None:
return cs
else:
return cs.astype(self.c_type)
class SpreadBinary(BaseTransformer):
def transform(self, X, **transform_params):
return X.applymap(lambda x: 1 if x == 1 else -1)
class DfTransformerAdapter(BaseTransformer):
"""Adapts a scikit-learn Transformer to return a pandas DataFrame"""
def __init__(self, transformer):
self.transformer = transformer
def fit(self, X, y=None, **fit_params):
self.transformer.fit(X, y=y, **fit_params)
return self
def transform(self, X, **transform_params):
raw_result = self.transformer.transform(X, **transform_params)
return pd.DataFrame(raw_result, columns=X.columns, index=X.index)
class DfOneHot(BaseTransformer):
"""
Wraps helper method `get_dummies` making sure all columns get one-hot encoded.
"""
def __init__(self):
self.dummy_columns = []
def fit(self, X, y=None, **fit_params):
self.dummy_columns = pd.get_dummies(
X,
prefix=[c for c in X.columns],
columns=X.columns).columns
return self
def transform(self, X, **transform_params):
return pd.get_dummies(
X,
prefix=[c for c in X.columns],
columns=X.columns).reindex(columns=self.dummy_columns, fill_value=0)
class DfFeatureUnion(BaseTransformer):
"""A dataframe friendly implementation of `FeatureUnion`"""
def __init__(self, transformers):
self.transformers = transformers
def fit(self, X, y=None, **fit_params):
for l, t in self.transformers:
t.fit(X, y=y, **fit_params)
return self
def transform(self, X, **transform_params):
transform_results = [t.transform(X, **transform_params) for l, t in self.transformers]
return | pd.concat(transform_results, axis=1) | pandas.concat |
# Copyright 2020 trueto
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import chardet
import pandas as pd
from pathlib import Path
str_type = {
"疾病和诊断": "Dis",
"解剖部位": "Body",
"实验室检验": "Test",
"影像检查": "CT",
"药物": "Drug",
"手术": "Sur"
}
def C_trans_to_E(string):
E_pun = u',.!?[]()<>"\'"\':;'
C_pun = u',。!?【】()《》“‘”’:;'
table= {ord(f): ord(t) for f, t in zip(C_pun, E_pun)}
string = string.translate(table)
return re.sub("[ |\t|\r|\n|\\\]", "_", string)
def strQ2B(ustr):
"全角转半角"
rstr = ""
for uchar in ustr:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstr += chr(inside_code)
return rstr
def get_X_y(in_file, out_file, max_len=500):
X = []
y = []
entity_data = []
with open(in_file, 'r', encoding='utf8') as f:
for line in f:
tempObj = json.loads(line)
originalText = tempObj['originalText']
text = C_trans_to_E(strQ2B(originalText))
entities = tempObj['entities']
print("Processing text:{}".format(text))
if len(text) <= max_len:
X_ = list(text)
y_ = ["O"] * len(X_)
for entity in entities:
start_pos = entity["start_pos"]
end_pos = entity["end_pos"]
label_type = entity["label_type"]
if "clinical" in in_file:
tag = str_type[label_type]
else:
tag = label_type
# for i in range(start_pos, end_pos):
# y_[i] = tag
entity_data.append([text[start_pos : end_pos], tag])
y_[start_pos] = 'B-'+tag
for i in range(start_pos+1, end_pos):
y_[i] = 'I-' + tag
assert len(X_) == len(y_)
X.append(X_)
y.append(y_)
else:
# 分句
dot_index_list = []
text_ = text
flag = 0
while(len(text_) > max_len):
text_ = text_[:max_len]
index_list = []
for match in re.finditer(',', text_):
index = match.span()[0]
index_list.append(index)
# last_dot = index_list.pop()
if len(index_list) > 1:
last_dot = index_list.pop()
else:
index_list_ = []
for match in re.finditer('.', text_):
index = match.span()[0]
index_list_.append(index)
if len(index_list_) > 1:
last_dot = index_list_.pop()
else:
last_dot = len(text_)
dot_index_list.append(last_dot + flag)
text_ = text[last_dot+flag:]
flag += last_dot
print(dot_index_list)
flag = 0
dot_index_list.append(len(text))
for i, dot_index in enumerate(dot_index_list):
short_text = text[flag: dot_index+1]
X_ = list(short_text)
print("Short text:{}".format(short_text))
y_ = ["O"] * len(X_)
for entity in entities:
start_pos = entity["start_pos"]
end_pos = entity["end_pos"]
label_type = entity["label_type"]
if "clinical" in in_file:
tag = str_type[label_type]
else:
tag = label_type
#for j in range(start_pos, end_pos):
# j = j - flag
# if j >= 0 and j < len(y_):
# y_[j] = tag
en_list = []
k = start_pos - flag
if k >= 0 and k < len(y_):
y_[k] = 'B-' + tag
en_list.append(X_[k])
for j in range(start_pos+1, end_pos):
j = j - flag
if j >= 0 and j < len(y_):
y_[j] = 'I-' + tag
en_list.append(X_[j])
if len(en_list) > 0:
entity_data.append(["".join(en_list), tag])
# if start_pos - flag > 0:
# print(short_text[start_pos - flag : end_pos - flag])
assert len(X_) == len(y_)
X.append(X_)
y.append(y_)
flag = dot_index + 1
assert len(X) == len(y)
data_obj = (X, y, entity_data)
pd.to_pickle(data_obj, out_file)
def get_X(in_file, out_file, max_len=500):
X = []
cut_his = {}
originalTexts = []
texts = []
with open(in_file, 'rb') as f:
encoding = chardet.detect(f.read())['encoding']
with open(in_file, 'r', encoding="utf8") as f:
for text_id, line in enumerate(f):
tempObj = json.loads(line, encoding=encoding)
originalText = tempObj['originalText']
originalTexts.append(originalText)
text = C_trans_to_E(strQ2B(originalText))
texts.append(text)
print("Processing text:{}".format(text))
if len(text) <= max_len:
X_ = list(text)
X.append(X_)
cut_his[text_id] = len(X) - 1
else:
# 分句
dot_index_list = []
text_ = text
flag = 0
while(len(text_) > max_len):
text_ = text_[:max_len]
index_list = []
for match in re.finditer(',', text_):
index = match.span()[0]
index_list.append(index)
# last_dot = index_list.pop()
if len(index_list) > 1:
last_dot = index_list.pop()
else:
index_list_ = []
for match in re.finditer('.', text_):
index = match.span()[0]
index_list_.append(index)
if len(index_list_) > 1:
last_dot = index_list_.pop()
else:
last_dot = len(text_)
dot_index_list.append(last_dot + flag)
text_ = text[last_dot+flag:]
flag += last_dot
print(dot_index_list)
flag = 0
dot_index_list.append(len(text))
text_id_list = []
for i, dot_index in enumerate(dot_index_list):
short_text = text[flag: dot_index+1]
X_ = list(short_text)
X.append(X_)
text_id_list.append(len(X)-1)
flag = dot_index + 1
cut_his[text_id] = text_id_list
# assert len(X) == len(ids)
data_obj = (X, cut_his, originalTexts, texts)
pd.to_pickle(data_obj, out_file)
def get_vocab_csv(input_file, name):
_, _, entity_data = | pd.read_pickle(input_file) | pandas.read_pickle |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.materiallaws import WoehlerCurve
wc_data = pd.Series({
'k_1': 7.,
'TN': 1.75,
'ND': 1e6,
'SD': 300.0
})
def test_woehler_accessor():
wc = wc_data.drop('TN')
for key in wc.index:
wc_miss = wc.drop(key)
with pytest.raises(AttributeError):
wc_miss.woehler
def test_woehler_transform_probability():
wc_50 = pd.Series({
'k_1': 2,
'k_2': np.inf,
'TS': 2.,
'TN': 9.,
'ND': 3e6,
'SD': 300 * np.sqrt(2.),
'failure_probability': 0.5
}).sort_index()
transformed_90 = wc_50.woehler.transform_to_failure_probability(0.9).to_pandas()
pd.testing.assert_series_equal(transformed_90[['SD', 'ND', 'failure_probability']],
pd.Series({'SD': 600.0, 'ND': 4.5e6, 'failure_probability': 0.9}))
transformed_back = transformed_90.woehler.transform_to_failure_probability(0.5).to_pandas()
| pd.testing.assert_series_equal(transformed_back, wc_50) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#BTC strategy model with brute force optimization, need BTC data set to run
#BTC/USD time series can be found for free on Investing.com
#Import modules
import numpy as np
import random as rand
import pandas as pd
import time as t
from pandas import read_csv
#Number of iterations for brute force optimization
iterations = range(0, 2000)
#Can access BTC/USD time series for free on Investing.com
df = read_csv('BTCUSD.csv', sep = ',')
#Variable assignments
Empty = []
Counter = 0
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
#Start timer
Start = t.time()
#Formatting
df = df.set_index('Date')
df = df.iloc[::-1]
df['Adj Close'] = df['Adj Close'].str.replace(',', '')
df['Adj Close'] = pd.to_numeric(df['Adj Close'], errors='coerce')
df['High'] = df['High'].str.replace(',', '')
df['High'] = pd.to_numeric(df['High'], errors='coerce')
df['Open'] = df['Open'].str.replace(',', '')
df['Open'] = pd.to_numeric(df['Open'], errors='coerce')
df['Low'] = df['Low'].str.replace(',', '')
df['Low'] = | pd.to_numeric(df['Low'], errors='coerce') | pandas.to_numeric |
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
import pandas
default_args = {
'owner': 'ODDS',
}
dag = DAG('product_price_pipeline',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False)
start = DummyOperator(task_id='start', dag=dag)
def get_product_upc_and_description():
df = | pandas.read_csv('products-lookup-table.csv', header=1) | pandas.read_csv |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import csv
import glob
import scikit_posthocs as sp
from scipy import stats
import os
from scipy import stats
import scikit_posthocs as sp
sns.set(context='paper', style='whitegrid')
hue_order = ["traffic light", "crossing intention", "trajectory"]
eps=0.01
tl_black_list = [
"3_3_96tl",
"3_3_102tl",
"3_4_107tl",
"3_4_108tl",
"3_5_112tl",
"3_5_113tl",
"3_5_116tl",
"3_5_117tl",
"3_5_118tl",
"3_5_119tl",
"3_5_122tl",
"3_5_123tl",
"3_5_126tl",
"3_5_127tl",
"3_6_128tl",
"3_6_137tl",
"3_7_142tl",
"3_8_153tl",
"3_8_160tl",
"3_9_173tl",
"3_9_174tl",
"3_9_179tl",
"3_10_185tl",
"3_10_188tl",
"3_11_205tl",
"3_12_218tl",
"3_12_221tl",
"3_15_241tl",
"3_16_256tl",
"3_16_257tl",
]
opposite_anno_list = ["3_16_259tl", "3_16_258tl", "3_16_249tl"]
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = int(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
trial = filename.split("_")[-1].replace(".csv", "")
buf["subject"] = filename.replace("log_data_", "").split("_")[0]
buf["task"] = filename.replace("log_data_", "").split("_")[1]
correct_list = []
response_list = []
for idx, row in buf.iterrows():
if row.id in tl_black_list:
row.last_state = -2
if row.last_state == -1: # no intervention
correct_list.append(-1)
response_list.append(-1)
elif int(row.last_state) == int(row.state):
if row.id in opposite_anno_list:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
if row.id in opposite_anno_list:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
buf["correct"] = correct_list
buf["response"] = response_list
len(correct_list)
if log_data is None:
log_data = buf
else:
log_data = log_data.append(buf, ignore_index=True)
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
subject_data = pd.DataFrame(columns=["subject", "task", "acc", "int_length", "missing"])
for subject in log_data.subject.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.subject == subject) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
acc = len(target[target.correct == 1])/(len(target[target.correct == 0]) + len(target[target.correct == 1])+eps)
missing = len(target[target.correct == -1])/(len(target[target.correct != -2])+eps)
buf = pd.DataFrame([(subject, task_list.get(task), acc, length, missing)], columns=subject_data.columns)
subject_data = | pd.concat([subject_data, buf]) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: | pd.Timestamp("2013-05-30 00:00:00") | pandas.Timestamp |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import copy
import torch
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
device = "cuda" if torch.cuda.is_available() else "cpu"
def _to_tensor(x):
if not isinstance(x, torch.Tensor):
return torch.tensor(x, dtype=torch.float, device=device)
return x
def _create_ts_slices(index, seq_len):
"""
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
"""
assert index.is_lexsorted(), "index should be sorted"
# number of dates for each code
sample_count_by_codes = pd.Series(0, index=index).groupby(level=0).size().values
# start_index for each code
start_index_of_codes = np.roll(np.cumsum(sample_count_by_codes), 1)
start_index_of_codes[0] = 0
# all the [start, stop) indices of features
# features btw [start, stop) are used to predict the `stop - 1` label
slices = []
for cur_loc, cur_cnt in zip(start_index_of_codes, sample_count_by_codes):
for stop in range(1, cur_cnt + 1):
end = cur_loc + stop
start = max(end - seq_len, 0)
slices.append(slice(start, end))
slices = np.array(slices)
return slices
def _get_date_parse_fn(target):
"""get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
"""
if isinstance(target, pd.Timestamp):
_fn = lambda x: | pd.Timestamp(x) | pandas.Timestamp |
import biom
import skbio
import numpy as np
import pandas as pd
from deicode.matrix_completion import MatrixCompletion
from deicode.preprocessing import rclr
from deicode._rpca_defaults import (DEFAULT_RANK, DEFAULT_MSC, DEFAULT_MFC,
DEFAULT_ITERATIONS)
from scipy.linalg import svd
def rpca(table: biom.Table,
n_components: int = DEFAULT_RANK,
min_sample_count: int = DEFAULT_MSC,
min_feature_count: int = DEFAULT_MFC,
max_iterations: int = DEFAULT_ITERATIONS) -> (
skbio.OrdinationResults,
skbio.DistanceMatrix):
"""Runs RPCA with an rclr preprocessing step.
This code will be run by both the standalone and QIIME 2 versions of
DEICODE.
"""
# filter sample to min depth
def sample_filter(val, id_, md): return sum(val) > min_sample_count
def observation_filter(val, id_, md): return sum(val) > min_feature_count
# filter and import table
table = table.filter(observation_filter, axis='observation')
table = table.filter(sample_filter, axis='sample')
table = table.to_dataframe().T
if len(table.index) != len(set(table.index)):
raise ValueError('Data-table contains duplicate indices')
if len(table.columns) != len(set(table.columns)):
raise ValueError('Data-table contains duplicate columns')
# rclr preprocessing and OptSpace (RPCA)
opt = MatrixCompletion(n_components=n_components,
max_iterations=max_iterations).fit(rclr(table))
rename_cols = ['PC' + str(i+1) for i in range(n_components)]
X = opt.sample_weights @ opt.s @ opt.feature_weights.T
X = X - X.mean(axis=0)
X = X - X.mean(axis=1).reshape(-1, 1)
u, s, v = svd(X)
u = u[:, :n_components]
v = v.T[:, :n_components]
p = s**2 / np.sum(s**2)
p = p[:n_components]
s = s[:n_components]
feature_loading = | pd.DataFrame(v, index=table.columns, columns=rename_cols) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
| tm.assert_frame_equal(sorted_df, expected) | pandas._testing.assert_frame_equal |
"""Functions for pulling data primarily from the EIA's Form 860."""
import logging
import pandas as pd
import sqlalchemy as sa
import pudl
from pudl.metadata.fields import apply_pudl_dtypes
logger = logging.getLogger(__name__)
def utilities_eia860(pudl_engine, start_date=None, end_date=None):
"""Pull all fields from the EIA860 Utilities table.
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing all the fields of the EIA 860
Utilities table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# grab the entity table
utils_eia_tbl = pt["utilities_entity_eia"]
utils_eia_select = sa.sql.select(utils_eia_tbl)
utils_eia_df = pd.read_sql(utils_eia_select, pudl_engine)
# grab the annual eia entity table
utils_eia860_tbl = pt["utilities_eia860"]
utils_eia860_select = sa.sql.select(utils_eia860_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
utils_eia860_select = utils_eia860_select.where(
utils_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = pd.to_datetime(end_date)
utils_eia860_select = utils_eia860_select.where(
utils_eia860_tbl.c.report_date <= end_date
)
utils_eia860_df = pd.read_sql(utils_eia860_select, pudl_engine)
# grab the glue table for the utility_id_pudl
utils_g_eia_tbl = pt["utilities_eia"]
utils_g_eia_select = sa.sql.select(
utils_g_eia_tbl.c.utility_id_eia,
utils_g_eia_tbl.c.utility_id_pudl,
)
utils_g_eia_df = pd.read_sql(utils_g_eia_select, pudl_engine)
out_df = pd.merge(utils_eia_df, utils_eia860_df, how="left", on=["utility_id_eia"])
out_df = pd.merge(out_df, utils_g_eia_df, how="left", on=["utility_id_eia"])
out_df = (
out_df.assign(report_date=lambda x: pd.to_datetime(x.report_date))
.dropna(subset=["report_date", "utility_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
first_cols = [
"report_date",
"utility_id_eia",
"utility_id_pudl",
"utility_name_eia",
]
out_df = pudl.helpers.organize_cols(out_df, first_cols)
return out_df
def plants_eia860(pudl_engine, start_date=None, end_date=None):
"""Pull all fields from the EIA Plants tables.
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing all the fields of the EIA 860
Plants table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# grab the entity table
plants_eia_tbl = pt["plants_entity_eia"]
plants_eia_select = sa.sql.select(plants_eia_tbl)
plants_eia_df = pd.read_sql(plants_eia_select, pudl_engine)
# grab the annual table select
plants_eia860_tbl = pt["plants_eia860"]
plants_eia860_select = sa.sql.select(plants_eia860_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
plants_eia860_select = plants_eia860_select.where(
plants_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = pd.to_datetime(end_date)
plants_eia860_select = plants_eia860_select.where(
plants_eia860_tbl.c.report_date <= end_date
)
plants_eia860_df = pd.read_sql(plants_eia860_select, pudl_engine).assign(
report_date=lambda x: pd.to_datetime(x.report_date)
)
# plant glue table
plants_g_eia_tbl = pt["plants_eia"]
plants_g_eia_select = sa.sql.select(
plants_g_eia_tbl.c.plant_id_eia,
plants_g_eia_tbl.c.plant_id_pudl,
)
plants_g_eia_df = pd.read_sql(plants_g_eia_select, pudl_engine)
out_df = pd.merge(plants_eia_df, plants_eia860_df, how="left", on=["plant_id_eia"])
out_df = pd.merge(out_df, plants_g_eia_df, how="left", on=["plant_id_eia"])
utils_eia_tbl = pt["utilities_eia"]
utils_eia_select = sa.sql.select(utils_eia_tbl)
utils_eia_df = pd.read_sql(utils_eia_select, pudl_engine)
out_df = (
pd.merge(out_df, utils_eia_df, how="left", on=["utility_id_eia"])
.dropna(subset=["report_date", "plant_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
return out_df
def plants_utils_eia860(pudl_engine, start_date=None, end_date=None):
"""Create a dataframe of plant and utility IDs and names from EIA 860.
Returns a pandas dataframe with the following columns:
- report_date (in which data was reported)
- plant_name_eia (from EIA entity)
- plant_id_eia (from EIA entity)
- plant_id_pudl
- utility_id_eia (from EIA860)
- utility_name_eia (from EIA860)
- utility_id_pudl
Args:
pudl_engine (sqlalchemy.engine.Engine): SQLAlchemy connection engine
for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
Returns:
pandas.DataFrame: A DataFrame containing plant and utility IDs and
names from EIA 860.
"""
# Contains the one-to-one mapping of EIA plants to their operators
plants_eia = (
plants_eia860(pudl_engine, start_date=start_date, end_date=end_date)
.drop(
[
"utility_id_pudl",
"city",
"state", # Avoid dupes in merge
"zip_code",
"street_address",
"utility_name_eia",
],
axis="columns",
)
.dropna(subset=["utility_id_eia"]) # Drop unmergable records
)
utils_eia = utilities_eia860(pudl_engine, start_date=start_date, end_date=end_date)
# to avoid duplicate columns on the merge...
out_df = pd.merge(
plants_eia, utils_eia, how="left", on=["report_date", "utility_id_eia"]
)
out_df = (
out_df.loc[
:,
[
"report_date",
"plant_id_eia",
"plant_name_eia",
"plant_id_pudl",
"utility_id_eia",
"utility_name_eia",
"utility_id_pudl",
],
]
.dropna(subset=["report_date", "plant_id_eia", "utility_id_eia"])
.pipe(apply_pudl_dtypes, group="eia")
)
return out_df
def generators_eia860(
pudl_engine: sa.engine.Engine,
start_date=None,
end_date=None,
unit_ids: bool = False,
fill_tech_desc: bool = True,
) -> pd.DataFrame:
"""Pull all fields reported in the generators_eia860 table.
Merge in other useful fields including the latitude & longitude of the
plant that the generators are part of, canonical plant & operator names and
the PUDL IDs of the plant and operator, for merging with other PUDL data
sources.
Fill in data for adjacent years if requested, but never fill in earlier
than the earliest working year of data for EIA923, and never add more than
one year on after the reported data (since there should at most be a one
year lag between EIA923 and EIA860 reporting)
This also fills the ``technology_description`` field according to matching
``energy_source_code_1`` values. It will only do so if the ``energy_source_code_1``
is consistent throughout years for a given plant.
Args:
pudl_engine: SQLAlchemy connection engine for the PUDL DB.
start_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
end_date (date-like): date-like object, including a string of the
form 'YYYY-MM-DD' which will be used to specify the date range of
records to be pulled. Dates are inclusive.
unit_ids: If True, use several heuristics to assign
individual generators to functional units. EXPERIMENTAL.
fill_tech_desc: If True, backfill the technology_description
field to years earlier than 2013 based on plant and
energy_source_code_1 and fill in technologies with only one matching code.
Returns:
A DataFrame containing all the fields of the EIA 860 Generators table.
"""
pt = pudl.output.pudltabl.get_table_meta(pudl_engine)
# Almost all the info we need will come from here.
gens_eia860_tbl = pt["generators_eia860"]
gens_eia860_select = sa.sql.select(gens_eia860_tbl)
# To get plant age
generators_entity_eia_tbl = pt["generators_entity_eia"]
generators_entity_eia_select = sa.sql.select(generators_entity_eia_tbl)
# To get the Lat/Lon coordinates
plants_entity_eia_tbl = pt["plants_entity_eia"]
plants_entity_eia_select = sa.sql.select(plants_entity_eia_tbl)
if start_date is not None:
start_date = pd.to_datetime(start_date)
gens_eia860_select = gens_eia860_select.where(
gens_eia860_tbl.c.report_date >= start_date
)
if end_date is not None:
end_date = | pd.to_datetime(end_date) | pandas.to_datetime |
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _drop_in_time_slice(m2m, m2b, m5cb, time_slice, to_drop):
"""Drops certain members from data structures, only in a given time slice.
This can be useful for removing people who weren't there on a specific day, or non-participants.
"""
logger.debug("Removing data: {} {}".format(time_slice, to_drop))
m2m.drop(m2m.loc[(time_slice, slice(None), to_drop), :].index, inplace=True)
m2m.drop(m2m.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m2b.drop(m2b.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m5cb.drop(m5cb.loc[(time_slice, to_drop), :].index, inplace=True)
def _clean_m2m(where, participation_dates, battery_sundays):
logger.info('loading m2m')
m2m = pd.read_hdf(dirty_store_path, 'proximity/member_to_member', where=where)
logger.info("original m2m len: {}".format(len(m2m)))
if len(m2m) == 0:
return
logger.info('cleaning m2m')
m2m.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2m['keep'] = False
# For m2m, we need to look on both sides. Therefore, for each participating member, we will
# turn on a "keep" flag if the member is valid on either sides of the connection. Then, we will only keep
# records in which both sides are valid
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2m.member1 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side1_cond, 'keep_1'] = True
side2_cond = ((m2m.member2 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side2_cond, 'keep_2'] = True
m2m.loc[(m2m.keep_1 == True) & (m2m.keep_2 == True), 'keep'] = True
del m2m['keep_1']
del m2m['keep_2']
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2m.datetime >= s.battery_period_start) & (m2m.datetime <= s.battery_period_end))
m2m.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
m2m = m2m[m2m.keep == True]
logger.info("after cleaning: {}".format(len(m2m)))
del m2m['keep']
m2m.set_index(['datetime','member1','member2'], inplace=True)
logger.info("appending cleaned m2m to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_to_member', m2m)
del m2m
def _clean_m2b(where, participation_dates, battery_sundays):
logger.info('loading m2b')
m2b = pd.read_hdf(dirty_store_path, 'proximity/member_to_beacon', where=where)
logger.info("original m2b len: {}".format(len(m2b)))
if len(m2b) == 0:
return
logger.info("cleaning m2b")
m2b.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2b['keep'] = False
# Only keep data within participation dates
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2b.member == p.member) & (m2b.datetime >= p.start_date_ts) & (m2b.datetime < p.end_date_ts))
m2b.loc[side1_cond, 'keep'] = True
logger.info('So far, keeping {} rows'.format(len(m2b[m2b['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2b.datetime >= s.battery_period_start) & (m2b.datetime <= s.battery_period_end))
m2b.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2b[m2b['keep'] == True])))
m2b = m2b[m2b.keep == True]
logger.info("after cleaning: {}".format(len(m2b)))
del m2b['keep']
m2b.set_index(['datetime','member','beacon'], inplace=True)
logger.info("appending cleaned m2b to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_to_beacon', m2b)
del m2b
def _clean_m5cb(where, participation_dates, battery_sundays):
logger.info('loading m2b')
m5cb = pd.read_hdf(dirty_store_path, 'proximity/member_5_closest_beacons', where=where)
logger.info("original m2b len: {}".format(len(m5cb)))
if len(m5cb) == 0:
return
logger.info("cleaning m2b")
m5cb.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m5cb['keep'] = False
# Only keep data within participation dates
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m5cb.member == p.member) & (m5cb.datetime >= p.start_date_ts) & (m5cb.datetime < p.end_date_ts))
m5cb.loc[side1_cond, 'keep'] = True
logger.info('So far, keeping {} rows'.format(len(m5cb[m5cb['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m5cb.datetime >= s.battery_period_start) & (m5cb.datetime <= s.battery_period_end))
m5cb.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m5cb[m5cb['keep'] == True])))
m5cb = m5cb[m5cb.keep == True]
logger.info("after cleaning: {}".format(len(m5cb)))
del m5cb['keep']
m5cb.set_index(['datetime', 'member'], inplace=True)
logger.info("appending cleaned m5cb to {}".format(clean_store_path))
with pd.HDFStore(clean_store_path) as store:
store.append('proximity/member_5_closest_beacons', m5cb)
del m5cb
def _clean_date_range(start_ts, end_ts, members_metadata):
"""
Clean a given date range for all relevant dataframes
"""
##################################################
# figure out what to drop and what to keep
##################################################
where = "datetime >= '" + str(start_ts) + "' & datetime < '" + str(end_ts) + "'"
# Convert text into timestamps with timezone
period1_start_ts = pd.Timestamp(period1_start, tz=time_zone)
period2_end_ts = pd.Timestamp(period2_end, tz=time_zone)
# Start and end dates for participants
participation_dates = members_metadata[members_metadata['participates'] == 1][['member', 'start_date', 'end_date']]
participation_dates['start_date_ts'] = pd.to_datetime(participation_dates['start_date']).dt.tz_localize(time_zone)
participation_dates['end_date_ts'] = | pd.to_datetime(participation_dates['end_date']) | pandas.to_datetime |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import date
import time
def add():
try:
df = pd.read_csv('data pool/raw_ios.csv', index_col=0)
except:
df = pd.DataFrame()
to_add = dict()
month = input('Month: ')
with open('names_urls.txt', 'r', encoding='UTF-8') as f:
names_urls = f.read().splitlines()
names_urls = [ele for ele in names_urls if ele != '' and ele != "\ufeff"]
if month in df.columns:
command = input(f'{month} is already included in the file, do you want to replace it? (y/other keys to leave) ')
if command != 'y':
return None
df.drop(month, axis=1, inplace=True)
for i, name_url in enumerate(names_urls):
if (i % 10 == 0 and i > 0):
print('Taking a 10-second break to pretend as a human...')
time.sleep(10)
app_name = name_url.split(',')[0]
url = name_url.split(',')[-1]
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
id = int(url.split('/')[-2])
print(i+1, response, id, app_name)
target = [element for element in list(soup.body) if 'downloads_and_revenue' in element][0]
target = '{' + target.replace(' ', '') + '}'
kocomponent = 'kocomponent'
name = 'name'
params = 'params'
null = float('NaN')
true = True
false = False
target = eval(target)
downloads = target['kocomponent']['params']['downloads_and_revenue']['downloads']
try:
if '<' in downloads:
downloads = downloads.replace('<', '')
if 'k' in downloads:
downloads = int(downloads[:-1])*1000
elif 'm' in downloads:
downloads = int(downloads[:-1])*1000000
except:
pass
revenue = target['kocomponent']['params']['downloads_and_revenue']['revenue']
try:
if '<' in revenue:
revenue = revenue.replace('<', '')
if 'k' in revenue:
revenue = int(revenue[1:-1])*1000
elif 'm' in revenue:
revenue = int(revenue[1:-1])*1000000
except:
pass
rating = target['kocomponent']['params']['current_rating']
rating_count = target['kocomponent']['params']['current_rating_count']
headers = {
'authority': 'sensortower.com',
'accept': 'application/json, text/javascript, */*; q=0.01',
'x-csrf-token': '<KEY>',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'sec-gpc': '1',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://sensortower.com/',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
'cookie': 'locale=en; session=c5e603284f395fb6b917669a20432f14; OptanonConsent=isIABGlobal=false&datestamp=Mon+Jul+05+2021+11%3A02%3A31+GMT%2B0800+(Hong+Kong+Standard+Time)&version=6.16.0&hosts=&landingPath=NotLandingPage&groups=C0004%3A1%2CC0003%3A1%2CC0002%3A1%2CC0001%3A1&AwaitingReconsent=false&geolocation=HK%3BHCW; OptanonAlertBoxClosed=2021-07-05T03:02:31.664Z; amplitude_id_6edb64137a31fa337b6f553dbccf2d8bsensortower.com=<KEY>',
}
params = (
('app_id', id),
('country', 'US'),
('limit', '1'),
)
response = requests.get('https://sensortower.com/api/ios/visibility_scores', headers=headers, params=params)
score = eval(response.content.decode('utf8'))[0]['total_score']
to_add[app_name] = str([downloads, revenue, rating, rating_count, score])
time.sleep(3)
d = {month:to_add}
to_add_df = pd.DataFrame(d)
df = pd.concat([df, to_add_df], axis=1)
df.to_csv('data pool/raw_ios.csv')
def e0(self):
nan = float('NaN')
try:
return eval(self)[0]
except:
return float('NaN')
def e1(self):
nan = float('NaN')
try:
return eval(self)[1]
except:
return float('NaN')
def e2(self):
nan = float('NaN')
try:
return eval(self)[2]
except:
return float('NaN')
def e3(self):
nan = float('NaN')
try:
return eval(self)[3]
except:
return float('NaN')
def e4(self):
nan = float('NaN')
try:
return eval(self)[4]
except:
return float('NaN')
def extract():
df = pd.read_csv('data pool/raw_ios.csv', index_col=0)
df1 = pd.DataFrame()
for col in df:
df1[col] = df[col].apply(e0)
df1.to_excel('lab/downloads.xlsx')
df = pd.read_csv('data pool/raw_ios.csv', index_col=0)
df1 = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import argparse
from astropy.io import fits
from fiery_llama.matched_filters import PointFilter, cubeify
parser = argparse.ArgumentParser()
help_data = "Must be .h5 or .fits file type."
parser.add_argument("data", help=help_data)
table_help = "if .h5 file provide table name"
parser.add_argument("--data-table", help=table_help)
help_signal = "Must be .h5 or .fits file type."
parser.add_argument("signal", help=help_signal)
parser.add_argument("--signal-table", help=table_help)
parser.add_argument("--nra", default=100)
parser.add_argument("--ndec", default=100)
_help = "the columns to filter on, if not given defaults to all filter columns"
parser.add_argument("--signal-columns", nargs="*", help=_help)
parser.add_argument("--create-image")
if __name__ == "__main__":
args = parser.parse_args()
if args.data_table is not None:
data = | pd.read_hdf(args.data, args.data_table) | pandas.read_hdf |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
def resample_timeseries_dataframe(df,
dt_col,
interval,
start_time,
end_time,
merge_mode="mean"):
'''
resample and return a dataframe with a new time interval.
:param df: input dataframe.
:param dt_col: name of datetime column.
:param interval: pandas offset aliases, indicating time interval of the output dataframe
:param start_time: start time of the output dataframe
:param end_time: end time of the output dataframe
:param merge_mode: if current interval is smaller than output interval,
we need to merge the values in a mode. "max", "min", "mean"
or "sum" are supported for now.
'''
assert dt_col in df.columns, f"dt_col {dt_col} can not be found in df."
assert pd.isna(df[dt_col]).sum() == 0, "There is N/A in datetime col"
assert | pd.Timestamp(start_time) | pandas.Timestamp |
import pandas as pd
file_unitedstates = open("./data/unitedstates.csv", "r")
file_russia = open("./data/russia.csv", "r")
file_japan = open("./data/japan.csv", "r")
unitedstates = file_unitedstates.read()
russia = file_russia.read()
japan = file_japan.read()
the_set = []
df_us = | pd.read_csv("./data/unitedstates.csv", sep=",") | pandas.read_csv |
import logging
from typing import Any, Dict, Union
from starlette.exceptions import HTTPException
from .runner import ModelServingRunner
from .serving import ModelServing
try:
import modin.pandas as pandas
import numpy
except ImportError:
try:
import pandas
import numpy
except ImportError:
raise ImportError(
"Cannot import pandas. Please install foxcross using foxcross[pandas] or"
" foxcross[modin]"
)
logger = logging.getLogger(__name__)
class DataFrameModelServing(ModelServing):
# TODO: probably should limit to orient choices
pandas_orient = "index"
def predict(
self, data: Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]
) -> Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]:
"""
Method to define how the model performs a prediction.
Must return a pandas DataFrame or a dictionary of pandas DataFrames
"""
raise NotImplementedError(
"You must implement your model serving's predict method"
)
def pre_process_input(
self, data: Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]
) -> Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]:
"""Hook to enable pre-processing of input data"""
return super().pre_process_input(data)
def post_process_results(
self, data: Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]
) -> Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]:
"""Hook to enable post-processing of output data"""
return super().post_process_results(data)
def _format_input(
self, data: Dict
) -> Union[pandas.DataFrame, Dict[str, pandas.DataFrame]]:
try:
if data.pop("multi_dataframe", None) is True:
logger.debug("Formatting pandas multi_dataframe input")
return {key: | pandas.DataFrame(value) | pandas.DataFrame |
from pytorch_lightning.callbacks import ModelCheckpoint
from test_tube import Experiment
from pytorch_lightning import Trainer
import argparse
import logging
import torch
import scipy
import os
import pickle as pkl
import pandas as pd
from torch.utils.data import DataLoader
from src.configs.configs import TGCN as TGCNConfig
from src.configs.configs import Data as DataConfig
from src.data_loader.reader import read_cluster_mapping
from src.data_loader.tensor_dataset import GraphTensorDataset
from src.logs import get_logger_settings, setup_logging
from src.models.tgcn.temporal_spatial_model import TGCN
from src.utils.sparse import dense_to_sparse, sparse_scipy2torch
from src.module import DATACONFIG_GETTER
cfg = DATACONFIG_GETTER()
def get_datasets():
mapping = read_cluster_mapping()
cluster_idx_ids = dict()
datasets = list()
adjs = list()
edgelists = list()
cluster_idx = 0
for cluster_id in mapping:
# cache them in h5
if not os.path.exists(os.path.join(cfg['save_dir_data'], f"cluster_id={cluster_id}.hdf5")):
# some clusters do not exist in the cache folder, ignore them.
continue
adj = scipy.sparse.load_npz(os.path.join(cfg['save_dir_adj'], f"cluster_id={cluster_id}.npz"))
adjs.append(adj)
edgelist = pkl.load(open(os.path.join(cfg['save_dir_adj'], f"cluster_id={cluster_id}.edgelist"), 'rb'))
edgelists.append(edgelist)
datasets.append(os.path.join(cfg['save_dir_data'], f"cluster_id={cluster_id}/"))
cluster_idx_ids[cluster_idx] = cluster_id
cluster_idx += 1
return datasets, adjs, cluster_idx_ids, edgelists
def train():
datasets, adjs, cluster_idx_ids, _ = get_datasets()
# PyTorch summarywriter with a few bells and whistles
exp = Experiment(save_dir=cfg['save_dir_model'])
checkpoint_callback = ModelCheckpoint(
filepath=cfg['save_dir_checkpoints'],
save_best_only=True,
verbose=True,
monitor='avg_val_mae',
mode='min'
)
# pass in experiment for automatic tensorboard logging.
trainer = Trainer(experiment=exp,
max_nb_epochs=TGCNConfig.max_nb_epochs,
train_percent_check=TGCNConfig.train_percent_check,
checkpoint_callback=checkpoint_callback,
gpus=1) if torch.cuda.is_available() else \
Trainer(experiment=exp,
max_nb_epochs=TGCNConfig.max_nb_epochs,
train_percent_check=TGCNConfig.train_percent_check,
checkpoint_callback=checkpoint_callback)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = TGCN(input_dim=TGCNConfig.input_dim,
hidden_dim=TGCNConfig.hidden_dim,
layer_dim=TGCNConfig.layer_dim,
output_dim=TGCNConfig.output_dim,
adjs=adjs,
datasets=datasets,
cluster_idx_ids=cluster_idx_ids,
device=device)
model = model.to(device)
trainer.fit(model)
def load_model(weights_path, adjs, datasets, cluster_idx_ids, device=None):
checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage)
model = TGCN(input_dim=TGCNConfig.input_dim,
hidden_dim=TGCNConfig.hidden_dim,
layer_dim=TGCNConfig.layer_dim,
output_dim=TGCNConfig.output_dim,
adjs=adjs,
datasets=datasets,
cluster_idx_ids=cluster_idx_ids,
device=device)
model.load_state_dict(checkpoint['state_dict'])
model.on_load_checkpoint(checkpoint)
model.freeze()
return model
def get_data_loader(datasets, adjs, cluster_idx_ids, mode):
if mode == "train":
time_steps = DataConfig.train_num_steps
elif mode == "valid":
time_steps = DataConfig.valid_num_steps
else:
time_steps = DataConfig.test_num_steps
ds = GraphTensorDataset(datasets, adj_list=adjs,
mode=mode,
cluster_idx_ids=cluster_idx_ids,
time_steps=time_steps)
return DataLoader(ds, batch_size=1, shuffle=False, pin_memory=True, num_workers=0)
def test():
datasets, adjs, cluster_idx_ids, edgelists = get_datasets()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = load_model(cfg['save_dir_checkpoints'],
adjs=adjs, cluster_idx_ids=cluster_idx_ids, datasets=datasets, device=device)
model = model.to(device)
adjs = [sparse_scipy2torch(adj) for adj in adjs]
train_dataloader = get_data_loader(datasets, adjs, cluster_idx_ids, mode="train")
valid_dataloader = get_data_loader(datasets, adjs, cluster_idx_ids, mode="valid")
test_dataloader = get_data_loader(datasets, adjs, cluster_idx_ids, mode="test")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dl_idx = 0
for dl in (train_dataloader, valid_dataloader, test_dataloader):
if dl_idx == 0:
time_steps = DataConfig.train_num_steps
base_steps = 0
mode = "train"
elif dl_idx == 1:
time_steps = DataConfig.valid_num_steps
base_steps = DataConfig.train_num_steps
mode = "valid"
else:
time_steps = DataConfig.test_num_steps
base_steps = DataConfig.train_num_steps + DataConfig.test_num_steps
mode = "test"
speed_tile = {}
prev_gidx = 0
with torch.no_grad():
for batch_nb, batch in enumerate(dl):
time_step = batch_nb % time_steps + base_steps
graph_idx = int(batch_nb / time_steps)
if graph_idx > prev_gidx:
df = | pd.DataFrame.from_dict(speed_tile) | pandas.DataFrame.from_dict |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_spatial_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t24 = pd.Timestamp('2021-11-22') # Start mandatory telework
t25 = pd.Timestamp('2021-12-18') # Early closing of schools
t26 = pd.Timestamp('2021-12-26') # Start of Christmass break
t27 = pd.Timestamp('2022-01-06') # End of Christmass break
t28 = pd.Timestamp('2022-01-28') # End of measures
t29 = pd.Timestamp('2022-02-28') # Start of Spring Break
t30 = pd.Timestamp('2022-03-06') # End of Spring Break
t31 = pd.Timestamp('2022-04-04') # Start of Easter Break
t32 = pd.Timestamp('2022-04-17') # End of Easter Break
t33 = pd.Timestamp('2022-07-01') # Start of summer holidays
t34 = pd.Timestamp('2022-09-01') # End of summer holidays
t35 = pd.Timestamp('2022-09-21') # Opening of universities
t36 = pd.Timestamp('2022-10-31') # Start of autumn break
t37 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t24 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t24, 5)
elif t24 + | pd.Timedelta(5, unit='D') | pandas.Timedelta |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = | pd.read_csv('data/test.csv') | pandas.read_csv |
# Make a datafile with the wastewater data from Biobot and COVID-19 disease outcomes for
# those areas and dates. Put into a format that is good for creating a Flourish chart.
import pandas as pd
from urllib import request
BIOBOT_DOWNLOAD = "https://github.com/biobotanalytics/covid19-wastewater-data/raw/master/wastewater_by_county.csv"
COVID_ACT_NOW_DOWNLOAD = "https://api.covidactnow.org/v2/country/US.timeseries.csv?apiKey=<KEY>"
#COUNTY_POP_LOCAL = "/Users/chuck/Desktop/COVID Programming/US Census/Population_Density_County.csv"
BIOBOT_LOCAL = "/Users/chuck/Desktop/COVID Programming/Biobot/wastewater_by_county.csv"
COVID_ACT_NOW_LOCAL = "/Users/chuck/Desktop/COVID Programming/Covid Act Now/counties.timeseries.csv"
BIOBOT_USA_CHART_DATA = "biobot_vs_outcomes_usa.tsv"
BIOBOT_REGIONS_CHART_DATA = "biobot_vs_outcomes_regions.tsv"
# Get the population density of US counties, and tweak as needed.
# This is June 2020 data that I downloaded once from https://covid19.census.gov/datasets/21843f238cbb46b08615fc53e19e0daf/explore
#CountyPopDF = pd.read_csv(COUNTY_POP_LOCAL, sep=',', header='infer', dtype=str)
#CountyPopDF = CountyPopDF[["GEOID", "B01001_calc_PopDensity"]]
#CountyPopDF = CountyPopDF.rename(columns={"GEOID":"FIPS", "B01001_calc_PopDensity":"DensitySqKm"})
# Get the latest data from Biobot, and tweak as we need it.
request.urlretrieve(BIOBOT_DOWNLOAD, BIOBOT_LOCAL)
BiobotDF = | pd.read_csv(BIOBOT_LOCAL, sep=',', header='infer', dtype=str) | pandas.read_csv |
"""
Collection of functions used for the stitching.
IMPORTANT:
The identification of the organization of the fovs in the composite image
can be simplified if the (0,0) coords of the stage/camera will
be set to the same position for all machine used in the analysis.
In our case we started running experiments with the coords not adjusted
so the position of (0,0) is different for all the machine that
are used to generate the data.
"""
from typing import *
import logging
import shutil
import copy
import itertools
import math
import pickle
import zarr
import sys
import operator
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from itertools import groupby
from pathlib import Path
from sklearn.neighbors import NearestNeighbors
import sklearn.linear_model as linmod
from skimage.feature import register_translation
from skimage import measure
from scipy.optimize import minimize
from pynndescent import NNDescent
from pysmFISH.logger_utils import selected_logger
from pysmFISH.fovs_registration import create_fake_image
from pysmFISH.data_models import Dataset
from pysmFISH import io
class organize_square_tiles():
"""Class designed to determine the tile organization and identify the coords of the
overlapping regions between the tiles.
IMPORTANT: The normalize_coords method should be adjusted according to the
setup of the microscope.
"""
def __init__(self, experiment_fpath:str,dataset: pd.DataFrame,
metadata:Dict,round_num:int):
"""Class initialization
Args:
experiment_fpath (str): Path to the experiment to process
dataset (pd.DataFrame): Properties of the images of the experiment
metadata (Dict): Metadata describing the experiment
round_num (int): Reference acquisition round number
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
"""Method to extract images coords in the stage reference
system"""
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
"""
Normalize the coords according to how the stage/camera are set.
This function must be modified according to the stage/camera setup.
ROBOFISH1 has stage with x increasing left-> right and y top->bottom
------> (x)
|
|
V (y)
ROBOFISH2 has stage with x increasing right-> left and y top->bottom
(x) <------
|
|
V (y)
ROBOFISH3 has stage with x increasing left-> right and y bottom->top
^ (y)
|
|
------> (x)
Axis modifications steps:
(1) The reference system will be first converted to image style:
------> (x)
|
|
V (y)
This step will cause a change in the position of the reference corner
for each fov. After image acquisition the reference corner is top-left
however after converting the axis direction to image-style the reference corner
will change postion:
ROBOFISH1: top-left --> top-left
ROBOFISH2: top-left --> top-right
ROBOFISH3: top-left --> bottom-left
(2) The coords will be translated to (0,0)
(3) then to matrix (python) notation
------> (columns)
|
|
V (rows)
"""
# port the coords to image type coords
if self.metadata['machine'] == 'ROBOFISH2':
self.x_coords = - self.x_coords
self.reference_corner_fov_position = 'top-right'
elif self.metadata['machine'] == 'ROBOFISH3':
self.x_coords = - self.x_coords
self.y_coords = - self.y_coords
self.reference_corner_fov_position = 'bottom-left'
elif self.metadata['machine'] == 'ROBOFISH1':
self.reference_corner_fov_position = 'top-left'
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
# shift the coords to reference point (0,0)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_min >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_min>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# if x_max >=0 :
# self.x_coords = self.x_coords - x_min
# else:
# self.x_coords = self.x_coords + np.abs(x_min)
# if y_max>0:
# self.y_coords = self.y_coords - y_min
# else:
# self.y_coords = self.y_coords + np.abs(y_min)
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
# move coords to pxl space
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
# def save_graph_original_coords(self):
# to correct because I already converted the coords to image
# # Turn interactive plotting off
# saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
# plt.ioff()
# # Create image type axes
# labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
# fig = plt.figure(figsize=(20,10))
# plt.plot(self.x_coords,self.y_coords,'or')
# for label, x, y in zip(labels, self.x_coords,self.y_coords):
# plt.annotate(
# label,
# xy=(x,y), xytext=(-2, 2),
# textcoords='offset points', ha='center', va='bottom',fontsize=12)
# plt.tight_layout()
# plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
"""Method used to save the organization of the tiles
"""
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
"""Method that use Nearest neighbors to identify the beighbouring tiles
"""
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
"""Method used to calculate the coords of the overlapping regions between the tiles.
"""
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.reference_corner_fov_position == 'top-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'top-right':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_width
c_br = tile2_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_width
c_br = tile1_c_coords
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'bottom-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_height
r_br = tile2_r_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_height
r_br = tile1_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
"""Method used to run all the methods
"""
self.extract_microscope_coords()
# self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
class organize_square_tiles_old_room():
"""
Class used to identify the orgabnization of the tiles before the
reorganization of the Robofish room of April 2021 when Robofish3
was assembled.
"""
def __init__(self, experiment_fpath:str,dataset, metadata:Dict,round_num:int):
"""
round_num = int
reference channel
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
if self.metadata['machine'] == 'ROBOFISH2':
# RobofishII has stage with reference point
# in the center (0,0)
# consider that we get the top-right corner of the image as well
self.reference_corner_fov_position = 'old-room-robofish2' # Not sure (i don't remember)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_max >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_max>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# flip y_axis
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'ROBOFISH1':
# The current system has stage ref coords top-left
self.reference_corner_fov_position = 'top-left'
# Normalize to (0,0) still BOTTOM-RIGHT
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
self.x_coords = self.x_coords - x_min
self.y_coords = self.y_coords - y_min
# flip axis to move (0,0) on TOP-LEF
self.x_coords = self.x_coords - self.x_coords.max()
self.x_coords = - self.x_coords
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
def save_graph_original_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.plot(self.x_coords,self.y_coords,'or')
for label, x, y in zip(labels, self.x_coords,self.y_coords):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.metadata['machine'] == 'ROBOFISH2':
# If tile coords are top left
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_size
r_bl = tile2_c_coords + self.img_size
r_tr = tile1_c_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_size
r_bl = tile1_r_coords + self.img_size
r_tr = tile2_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_size
c_tr = tile2_c_coords + self.img_size
c_bl = tile1_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_size
c_bl = tile2_c_coords
c_tr = tile1_c_coords + self.img_size
col_order = ('left','right')
elif self.metadata['machine'] == 'ROBOFISH1':
# If tile coords are bottom right
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_size
r_br = tile2_r_coords
r_bl = tile2_c_coords
r_tr = tile1_c_coords - self.img_size
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_size
r_br = tile1_r_coords
r_bl = tile1_r_coords
r_tr = tile2_r_coords - self.img_size
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_size
c_br = tile2_c_coords
c_tr = tile2_c_coords
c_bl = tile1_c_coords - self.img_size
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_size
c_br = tile1_c_coords
c_bl = tile2_c_coords - self.img_size
c_tr = tile1_c_coords
col_order = ('left','right')
else:
pass
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
self.extract_microscope_coords()
self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
def stitch_using_coords_general(decoded_df: pd.DataFrame, tile_corners_coords_pxl: np.ndarray,
reference_corner_fov_position: str, metadata: Dict, tag: str):
"""Function to create a stitched image using the fov coords
of the stage.
Args:
decoded_df (pd.DataFrame): Counts after decoding
tile_corners_coords_pxl (np.ndarray): Coords of the fovs according to the stage
reference_corner_fov_position (str): Position of the reference corner determine by
the organization stage/camera. In our setup can be:
- top-left
- top-right
- bottom_left
metadata (Dict): [description]
tag (str): [description]
Returns:
[type]: Decoded counts with coords of the dots adjusted to the stage
reference point
"""
logger = selected_logger()
was_file = 0
if not isinstance(decoded_df, pd.DataFrame):
was_file = 1
decoded_df_fpath = copy.deepcopy(decoded_df)
decoded_df = pd.read_parquet(decoded_df)
if decoded_df['r_px_registered'].empty:
decoded_df['r_px_'+tag] = np.nan
decoded_df['c_px_'+tag] = np.nan
else:
#fov = decoded_df.iloc[0]['fov_num']
fov = int(decoded_df.fov_num.unique()[0])
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if reference_corner_fov_position == 'top-left':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'top-right':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'bottom-left':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'bottom-right':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'old-room-robofish2':
decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
else:
logger.error(f"the referernce corner fov position name is wrong")
sys.exit(f"the referernce corner fov position name is wrong")
# decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
# decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
if was_file:
decoded_df.to_parquet(decoded_df_fpath,index=False)
else:
return decoded_df
def stitch_using_coords_general_segmented_objects(fov,obj_dict,tile_corners_coords_pxl,reference_corner_fov_position, metadata):
"""
Function used to stitch the segmented object used for defining the cells.
"""
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if obj_dict:
if reference_corner_fov_position == 'top-left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
elif reference_corner_fov_position == 'top-right':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords - (metadata['img_width'] -coords_dict['original_coords'][:,1])]).T
elif reference_corner_fov_position == 'bottom_left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + (metadata['img_height'] -coords_dict['original_coords'][:,0]),
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
return obj_dict
def register_coords_obj(fov,segmentation_output_path,
stitching_parameters,
reference_corner_fov_position,
metadata):
"""Function used to register the coords of the segmented object to th
Args:
fov ([type]): [description]
segmentation_output_path ([type]): [description]
"""
segmented_output = pickle.load(open(segmentation_output_path / ('preprocessed_data_fov_' + str(fov) + '_mask.pkl'), 'rb'))
segmented_regions = measure.regionprops(segmented_output)
segmented_regions_dict = {}
for prop in segmented_regions:
segmented_regions_dict[str(fov)+'-'+str(prop.label)] = {}
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['original_coords']=prop.coords
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['stitched_coords']= np.nan
segmented_regions_dict = stitch_using_coords_general_segmented_objects(fov,segmented_regions_dict,
stitching_parameters,reference_corner_fov_position, metadata)
pickle.dump(segmented_regions_dict,open(segmentation_output_path / ('registered_objs_dict_fov_' + str(fov) + '.pkl'), 'wb'))
def get_all_dots_in_overlapping_regions(counts_df, chunk_coords, stitching_selected='microscope_stitched'):
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
r_tl = chunk_coords[0]
r_br = chunk_coords[1]
c_tl = chunk_coords[2]
c_br = chunk_coords[3]
overlapping_ref_df = counts_df.loc[(counts_df[r_tag] > r_tl) & (counts_df[r_tag] < r_br)
& (counts_df[c_tag] > c_tl) & (counts_df[c_tag] < c_br),:]
return overlapping_ref_df
# TODO adjust the registration with dots (triangulation)
def register_cpl(cpl, chunk_coords, experiment_fpath,
stitching_channel,
reference_round):
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = pd.read_parquet(counts2_fpath)
count1_grp = counts1_df.loc[(counts1_df.channel == stitching_channel) &
(counts1_df.round_num == reference_round),:]
count2_grp = counts2_df.loc[(counts2_df.channel == stitching_channel) &
(counts2_df.round_num == reference_round),:]
count1_grp = counts1_df.loc[counts1_df.channel == stitching_channel,:]
count2_grp = counts2_df.loc[counts2_df.channel == stitching_channel,:]
overlap_count1 = get_all_dots_in_overlapping_regions(count1_grp, chunk_coords,stitching_selected='microscope_stitched')
overlap_count2 = get_all_dots_in_overlapping_regions(count2_grp, chunk_coords,stitching_selected='microscope_stitched')
if overlap_count1.empty or overlap_count2.empty:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
# TODO
# Maybe add a selction step where if the number of beads is below X the beads based registration will be run or fft based
# registration if the number of beads is high enough
r_tl = chunk_coords[0]
c_tl = chunk_coords[2]
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int') + 1
norm_ref_coords = overlap_count1.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
norm_comp_coords = overlap_count2.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
img_ref = create_fake_image(img_shape, norm_ref_coords)
img_tran = create_fake_image(img_shape, norm_comp_coords)
shift, error, diffphase = register_translation(img_ref, img_tran)
registration[cpl] = [shift, error]
return registration
def register_cpl_fresh_nuclei(cpl: Tuple, chunk_coords: np.ndarray, order: dict,
metadata:dict, experiment_fpath:str):
"""Function to register orverlapping regions of nuclear staining for stitching
Args:
cpl (Tuple): overlapping tiles
chunk_coords (np.ndarray): coords of the overlapping region [r_tl,r_br,c_tl,c_br]
order (dict): description of the position of the tiles
metadata (dict): dictionary with the general experiment data
experiment_fpath (str): path to the experiment to process
Returns:
dict: registration output [shift, error]
"""
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
img_width = metadata['img_width']
img_height = metadata['img_height']
experiment_name = metadata['experiment_name']
error = 0
filtered_nuclei_fpath = experiment_fpath / 'fresh_tissue' / 'fresh_tissue_nuclei_preprocessed_img_data.zarr'
try:
st = zarr.DirectoryStore(filtered_nuclei_fpath)
root = zarr.group(store=st, overwrite=False)
except:
logger.error(f'cannot load the zarr files with filtered nuclei')
else:
try:
img1 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[0])]['preprocessed_data_fov_'+str(cpl[0])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[0]}')
else:
try:
img2 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[1])]['preprocessed_data_fov_'+str(cpl[1])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[1]}')
else:
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int')
if order == {'row_order': ('top', 'bottom'), 'column_order': ('right', 'left')}:
img1_slice = img1[(img_height-img_shape[0]):img_height,0:img_shape[1]]
img2_slice = img2[0:img_shape[0],(img_width-img_shape[1]):img_width]
elif order == {'row_order': ('top', 'bottom'), 'column_order': ('left', 'right')}:
img1_slice = img1[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
img2_slice = img2[0:img_shape[0],0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('left', 'right')}:
img1_slice = img1[0:img_shape[0],img_width-img_shape[1]:img_width]
img2_slice = img2[img_height-img_shape[0]:img_height,0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('right', 'left')}:
img1_slice = img1[0:img_shape[0],0:img_shape[1]]
img2_slice = img2[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
else:
logger.error(f'unknown fovs order')
error = 1
if error:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
shift, error, diffphase = register_translation(img1_slice, img2_slice)
registration[cpl] = [shift, error]
return registration
def stitching_graph(experiment_fpath, stitching_channel,tiles_org, metadata,
reference_round, client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl,cpl, chunk_coords, experiment_fpath,stitching_channel,
reference_round)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
# Run registration only if there are not too many overlappig regions without
# dots
# counts_cpls_missing_overlapping_dots = 0
# cpls_missing_overlapping_dots = []
# for cpl, registration_output in all_registrations_dict.items():
# if np.isnan(registration_output[1]):
# cpls_missing_overlapping_dots.append(cpl)
# counts_cpls_missing_overlapping_dots += 1
# global_stitching_done = 0
# if len(cpls_missing_overlapping_dots) > 10:
# logger.error(f"Too many cpl of fovs without overlapping reference dots")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_no_global_stitching.pkl','rb'))
# global_stitching_done = 0
# return tiles_org.tile_corners_coords_pxl, global_stitching_done
# else:
# global_stitching_done = 1
# logger.error(f"The number of cpls of fovs without overlapping reference dots is low, test global stitching")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_yes_global_stitching.pkl','wb'))
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitching_graph_fresh_nuclei(experiment_fpath,tiles_org, metadata,
client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
unfolded_overlapping_order_dict = {key:value for (k,v) in tiles_org.overlapping_order.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl_fresh_nuclei,cpl, chunk_coords,
unfolded_overlapping_order_dict[cpl],
metadata,
experiment_fpath)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'fresh_tissue'/ 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched_nuclei')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'fresh_tissue' / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'fresh_tissue' / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitching_graph_serial_nuclei(experiment_fpath,tiles_org, metadata,
registration_reference_hybridization,
client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
unfolded_overlapping_order_dict = {key:value for (k,v) in tiles_org.overlapping_order.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl_fresh_nuclei,cpl, chunk_coords,
unfolded_overlapping_order_dict[cpl],
metadata,
experiment_fpath)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched_nuclei')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitched_beads_on_nuclei_fresh_tissue(experiment_fpath:str,
client,
nuclei_tag:str='_ChannelCy3_Nuclei_',
beads_tag:str='_ChannelEuropium_Cy3_',
round_num:int = 1,
overlapping_percentage:int=5,
machine:str='ROBOFISH2'
):
"""Function tun run the stitching of the dots in the fresh images using
the nuclei images as reference
Args:
experiment_fpath (str): path of the experiment to process
client ([type]): dask client for parallel processing
nuclei_tag (str, optional): Tag to identify the nuclei dataset. Defaults to '_ChannelCy3_Nuclei_'.
beads_tag (str, optional): Tag to identify the beads dataset. Defaults to '_ChannelEuropium_Cy3_'.
round_num (int, optional): Reference round,for the fresh tissue there is only one. Defaults to 1.
overlapping_percentage (int, optional): Overlapping between the different tiles. Defaults to 5.
machine (str, optional): machine running the experiment. Defaults to 'ROBOFISH2'.
"""
experiment_fpath = Path(experiment_fpath)
fresh_tissue_path = experiment_fpath / 'fresh_tissue'
beads_dataset_fpath = list(fresh_tissue_path.glob('*'+ beads_tag +'*.parquet'))[0]
nuclei_dataset_fpath = list(fresh_tissue_path.glob('*'+ nuclei_tag +'*.parquet'))[0]
# Collect and adjust beads dataset with missing values
beads_data = Dataset()
beads_data.load_dataset(beads_dataset_fpath)
beads_data.dataset['processing_type'] = 'undefined'
beads_data.dataset['overlapping_percentage'] = overlapping_percentage / 100
beads_data.dataset['machine'] = machine
metadata_beads = beads_data.collect_metadata(beads_data.dataset)
beads_org_tiles = organize_square_tiles(experiment_fpath,beads_data.dataset,metadata_beads,round_num)
beads_org_tiles.run_tiles_organization()
flist = list((fresh_tissue_path / 'results').glob('*decoded_fov*.parquet'))
# duplicate registered
for fpath in flist:
data = pd.read_parquet(fpath)
data['r_px_registered'] = data['r_px_original']
data['c_px_registered'] = data['c_px_original']
data['hamming_distance'] = 0
data['decoded_genes'] = 'beads'
data.to_parquet(fpath)
all_futures = []
for fpath in flist:
future = client.submit(stitch_using_coords_general, fpath,
beads_org_tiles.tile_corners_coords_pxl,
beads_org_tiles.reference_corner_fov_position,
metadata_beads,tag='microscope_stitched')
all_futures.append(future)
_ = client.gather(all_futures)
io.simple_output_plotting(fresh_tissue_path,
stitching_selected= 'microscope_stitched',
selected_Hdistance=0,
client = client,
input_file_tag = 'decoded_fov',
file_tag = 'stitched_microscope')
# Collect and adjust nuclei dataset with missing values
nuclei_data = Dataset()
nuclei_data.load_dataset(nuclei_dataset_fpath)
nuclei_data.dataset['processing_type'] = 'undefined'
nuclei_data.dataset['overlapping_percentage'] = overlapping_percentage / 100
nuclei_data.dataset['machine'] = machine
metadata_nuclei = nuclei_data.collect_metadata(nuclei_data.dataset)
nuclei_org_tiles = organize_square_tiles(experiment_fpath,nuclei_data.dataset,metadata_nuclei,round_num)
nuclei_org_tiles.run_tiles_organization()
adjusted_coords =stitching_graph_fresh_nuclei(experiment_fpath,nuclei_org_tiles, metadata_nuclei,
client, nr_dim = 2)
io.simple_output_plotting(fresh_tissue_path,
stitching_selected= 'global_stitched_nuclei',
selected_Hdistance=0,
client = client,
input_file_tag = 'decoded_fov',
file_tag = 'global_stitched_nuclei')
# REMOVED OVERLAPPING DOTS ACCORDING TO FOV (MUCH FASTER THAN FOR GENE)
# EXPECIALLY FOR LARGE AREAS WITH A LOT OF COUNTS
def identify_duplicated_dots_NNDescend(ref_tiles_df: pd.DataFrame,comp_tiles_df: pd.DataFrame,
stitching_selected: str,same_dot_radius: int)-> list:
"""Function used to identify duplicated dots for a gene in the overlapping regions. This
version of the function uses the fast nearest neighbor coded in NNDescend
Args:
ref_tiles_df (pd.DataFrame): Counts of the reference tiles
comp_tiles_df (pd.DataFrame): Counts in the comparing tiles
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
list: dot ids to remove
"""
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
overlapping_ref_coords = ref_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
overlapping_comp_coords = comp_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
dots_ids = comp_tiles_df.loc[:, ['dot_id']].to_numpy()
index = NNDescent(overlapping_ref_coords,metric='euclidean',n_neighbors=1)
indices, dists = index.query(overlapping_comp_coords,k=1)
idx_dists = np.where(dists < same_dot_radius)[0]
dots_id_to_remove = dots_ids[idx_dists]
dots_id_to_remove = list(dots_id_to_remove.reshape(dots_id_to_remove.shape[0],))
return dots_id_to_remove
def identify_duplicated_dots_sklearn(ref_tiles_df: pd.DataFrame,comp_tiles_df: pd.DataFrame,
stitching_selected: str,same_dot_radius: int)-> list:
"""Function used to identify duplicated dots for a gene in the overlapping regions. This
version of the function uses the fast nearest neighbor coded in NNDescend
Args:
ref_tiles_df (pd.DataFrame): Counts of the reference tiles
comp_tiles_df (pd.DataFrame): Counts in the comparing tiles
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
list: dot ids to remove
"""
nn = NearestNeighbors(n_neighbors=1,radius=same_dot_radius, metric='euclidean',algorithm='kd_tree')
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
overlapping_ref_coords = ref_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
overlapping_comp_coords = comp_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
dots_ids = comp_tiles_df.loc[:, ['dot_id']].to_numpy()
nn.fit(overlapping_ref_coords)
dists, indices = nn.kneighbors(overlapping_comp_coords, return_distance=True)
idx_dists = np.where(dists <= same_dot_radius)[0]
dots_id_to_remove = dots_ids[idx_dists]
dots_id_to_remove = list(dots_id_to_remove.reshape(dots_id_to_remove.shape[0],))
return dots_id_to_remove
def remove_overlapping_dots_fov(cpl: Tuple[int,int], chunk_coords: np.ndarray,
experiment_fpath: str, stitching_selected:str,
hamming_distance: float, same_dot_radius: int)-> Dict[Tuple[int,int],List[str]]:
"""Function that identify the overlapping dots between two different tiles. The duplicated dots
for all genes are identified
Args:
cpl (Tuple[int,int]): Adjacent tiles to compare
chunk_coords (np.ndarray): Coords of the overlapping regions between the two tiles to compare
experiment_fpath (str): Path to the experiment to process
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
hamming_distance (float): Selected distance from the code
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
Dict[Tuple[int,int],List[str]]: {cpl:all_dots_id_to_remove}
"""
logger = selected_logger()
all_dots_id_to_remove = []
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = pd.read_parquet(counts2_fpath)
# count1_grp = counts1_df.loc[counts1_df.hamming_distance < hamming_distance,:]
# count2_grp = counts2_df.loc[counts2_df.hamming_distance < hamming_distance,:]
count1_df = counts1_df.loc[counts1_df.hamming_distance < hamming_distance,:]
count2_df = counts2_df.loc[counts2_df.hamming_distance < hamming_distance,:]
overlap_count1 = get_all_dots_in_overlapping_regions(counts1_df, chunk_coords,
stitching_selected)
overlap_count2 = get_all_dots_in_overlapping_regions(counts2_df, chunk_coords,
stitching_selected)
count1_grp = overlap_count1.groupby('decoded_genes')
count2_grp = overlap_count2.groupby('decoded_genes')
for gene, over_c1_df in count1_grp:
try:
over_c2_df = count2_grp.get_group(gene)
except:
pass
else:
dots_id_to_remove = identify_duplicated_dots_sklearn(over_c1_df,over_c2_df,
stitching_selected,same_dot_radius)
if len(dots_id_to_remove):
all_dots_id_to_remove.append(dots_id_to_remove)
all_dots_id_to_remove = [el for tg in all_dots_id_to_remove for el in tg]
return {cpl:all_dots_id_to_remove}
def remove_overlapping_dots_serial_fov(cpl: Tuple[int,int], chunk_coords: np.ndarray,
experiment_fpath: str, stitching_selected:str,
same_dot_radius: int)-> Dict[Tuple[int,int],List[str]]:
"""Function that identify the overlapping dots between two different tiles. The duplicated dots
for all genes are identified
Args:
cpl (Tuple[int,int]): Adjacent tiles to compare
chunk_coords (np.ndarray): Coords of the overlapping regions between the two tiles to compare
experiment_fpath (str): Path to the experiment to process
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
Dict[Tuple[int,int],List[str]]: {cpl:all_dots_id_to_remove}
"""
logger = selected_logger()
all_dots_id_to_remove = []
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = pd.read_parquet(counts2_fpath)
overlap_count1 = get_all_dots_in_overlapping_regions(counts1_df, chunk_coords,
stitching_selected)
overlap_count2 = get_all_dots_in_overlapping_regions(counts2_df, chunk_coords,
stitching_selected)
count1_grp = overlap_count1.groupby('target_name')
count2_grp = overlap_count2.groupby('target_name')
for gene, over_c1_df in count1_grp:
try:
over_c2_df = count2_grp.get_group(gene)
except:
pass
else:
dots_id_to_remove = identify_duplicated_dots_sklearn(over_c1_df,over_c2_df,
stitching_selected,same_dot_radius)
if len(dots_id_to_remove):
all_dots_id_to_remove.append(dots_id_to_remove)
all_dots_id_to_remove = [el for tg in all_dots_id_to_remove for el in tg]
return {cpl:all_dots_id_to_remove}
def clean_from_duplicated_dots(fov: int, dots_id_to_remove: list, experiment_fpath: str,
tag_cleaned_file:str):
"""Function to remove the dulicated dots.
Args:
fov (int): Field of view to process
dots_id_to_remove (str): ids of the duplicated dots
experiment_fpath (str): Path to the experiment to process
tag_cleaned_file (str): tag name of the file with cleaned counts
"""
logger = selected_logger()
experiment_fpath = Path(experiment_fpath)
try:
fname = list((experiment_fpath / 'results').glob('*_decoded_fov_' + str(fov) + '.parquet'))[0]
except:
logger.error(f'missing decoded file for fov {fov}')
else:
save_name = fname.stem.split('_decoded_fov_')[0] + '_'+ tag_cleaned_file +'_cleaned_df_fov_' + str(fov) + '.parquet'
save_name = experiment_fpath / 'results' / save_name
if len(dots_id_to_remove):
try:
counts_df = pd.read_parquet(fname)
logger.error(f'loaded {fname}')
except:
logger.error(f'missing {fname}')
else:
cleaned_df = counts_df.loc[~counts_df.dot_id.isin(dots_id_to_remove), :]
cleaned_df.to_parquet(save_name,index=False)
logger.error(f'saved {fname}')
save_name = fname.stem.split('_decoded_fov_')[0] + '_' + tag_cleaned_file + '_removed_df_fov_' + str(fov) + '.parquet'
save_name = experiment_fpath / 'results' / save_name
removed_df = counts_df.loc[counts_df.dot_id.isin(dots_id_to_remove), :]
removed_df.to_parquet(save_name,index=False)
else:
try:
_ = shutil.copy2(fname.as_posix(),save_name.as_posix())
logger.error(f'copied {fname}')
except:
logger.error(f'cannot copy {fname} to {save_name}')
"""
The overlapping dots are not removed right after being identified
to avoid race conditions
"""
def remove_duplicated_dots_graph(experiment_fpath: str,dataset: pd.DataFrame,
tiles_org, hamming_distance: float,
same_dot_radius: int,
stitching_selected: str, client):
"""Dask task graph builder/runner function to parallel remove duplicated dots
The overlapping dots are not removed right after being identified
because the same fov can be part of two different overlapping couples.
Args:
experiment_fpath (str): Path to the experiment to process
dataset (pd.DataFrame): Properties of the images of the experiment
tiles_org ([type]): Organization of the tiles
hamming_distance (float): Selected distance from the code
same_dot_radius (int): searching radius used to define if two dots are
the same
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
client (dask.distributed.Client): Dask client in charge of controlling
the processing of the task graph.
tag_cleaned_file (str): tag name of the file with cleaned counts
"""
logger = selected_logger()
fovs = dataset.loc[:,'fov_num'].unique()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
# Prepare the dataframe
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
all_futures = []
for cpl,chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(remove_overlapping_dots_fov,
cpl = cpl,
chunk_coords=chunk_coords,
experiment_fpath=experiment_fpath,
stitching_selected=stitching_selected,
hamming_distance=hamming_distance,
same_dot_radius = same_dot_radius)
all_futures.append(future)
to_remove = client.gather(all_futures)
to_remove_comb = {k: v for d in to_remove for k, v in d.items()}
removed_dot_dict = {}
for key, items in to_remove_comb.items():
if key[1] not in removed_dot_dict.keys():
removed_dot_dict[key[1]] = []
removed_dot_dict[key[1]].append(items)
for key, items in removed_dot_dict.items():
removed_dot_dict[key] = [el for tg in items for el in tg]
for fov,dots_id_to_remove in removed_dot_dict.items():
future = client.submit(clean_from_duplicated_dots,
fov = fov,
dots_id_to_remove=dots_id_to_remove,
experiment_fpath=experiment_fpath,
tag_cleaned_file=stitching_selected)
all_futures.append(future)
_ = client.gather(all_futures)
"""
The overlapping dots are not removed right after being identified
to avoid race conditions
"""
def remove_duplicated_dots_serial_graph(experiment_fpath: str,dataset: pd.DataFrame,
tiles_org,
same_dot_radius: int,
stitching_selected: str, client):
"""Dask task graph builder/runner function to parallel remove duplicated dots
The overlapping dots are not removed right after being identified
because the same fov can be part of two different overlapping couples.
Args:
experiment_fpath (str): Path to the experiment to process
dataset (pd.DataFrame): Properties of the images of the experiment
tiles_org ([type]): Organization of the tiles
same_dot_radius (int): searching radius used to define if two dots are
the same
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
client (dask.distributed.Client): Dask client in charge of controlling
the processing of the task graph.
tag_cleaned_file (str): tag name of the file with cleaned counts
"""
logger = selected_logger()
fovs = dataset.loc[:,'fov_num'].unique()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
# Prepare the dataframe
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
all_futures = []
for cpl,chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(remove_overlapping_dots_serial_fov,
cpl = cpl,
chunk_coords=chunk_coords,
experiment_fpath=experiment_fpath,
stitching_selected=stitching_selected,
same_dot_radius = same_dot_radius)
all_futures.append(future)
to_remove = client.gather(all_futures)
to_remove_comb = {k: v for d in to_remove for k, v in d.items()}
removed_dot_dict = {}
for key, items in to_remove_comb.items():
if key[1] not in removed_dot_dict.keys():
removed_dot_dict[key[1]] = []
removed_dot_dict[key[1]].append(items)
for key, items in removed_dot_dict.items():
removed_dot_dict[key] = [el for tg in items for el in tg]
for fov,dots_id_to_remove in removed_dot_dict.items():
future = client.submit(clean_from_duplicated_dots,
fov = fov,
dots_id_to_remove=dots_id_to_remove,
experiment_fpath=experiment_fpath,
tag_cleaned_file=stitching_selected)
all_futures.append(future)
_ = client.gather(all_futures)
# TODO Remove functions
def stitch_using_coords_general_df(decoded_df,tile_corners_coords_pxl,reference_corner_fov_position, metadata,tag):
"""
Tiles are placed directly on the position indicated by the microscope
coords
"""
if decoded_df['r_px_registered'].empty:
decoded_df['r_px_'+tag] = np.nan
decoded_df['c_px_'+tag] = np.nan
else:
fov = decoded_df.iloc[0]['fov_num']
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if reference_corner_fov_position == 'top-left':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'top-right':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'bottom_left':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
# if decoded_df['r_px_registered'].empty:
# decoded_df['r_px_microscope_stitched'] = np.nan
# decoded_df['c_px_microscope_stitched'] = np.nan
# else:
# fov = decoded_df.iloc[0]['fov_num']
# r_microscope_coords = tile_corners_coords_pxl[fov,0]
# c_microscope_coords = tile_corners_coords_pxl[fov,1]
# decoded_df['r_px_microscope_stitched'] = r_microscope_coords - decoded_df['r_px_registered']
# decoded_df['c_px_microscope_stitched'] = c_microscope_coords - decoded_df['c_px_registered']
# new room
# decoded_df['r_px_microscope_stitched'] = r_microscope_coords + decoded_df['r_px_registered']
# decoded_df['c_px_microscope_stitched'] = c_microscope_coords + decoded_df['c_px_registered']
return decoded_df
# REMOVE OVERLAPPING DOTS ACCORDING TO GENE
# preprocessing and removal part to put in the flow file
# all_files = (Path(experiment_fpath) / 'tmp' / 'registered_counts').glob('*decoded*.parquet')
# counts_dd_list = [dd.read_parquet(counts_file) for counts_file in all_files]
# counts_dd = dd.concat(counts_dd_list, axis=0)
# counts_dd = counts_dd.loc[counts_dd.dot_id == counts_dd.barcode_reference_dot_id,['barcode_reference_dot_id',
# r_tag, c_tag, select_genes,
# 'fov_num']]
# counts_df = counts_dd.dropna(subset=[select_genes]).compute()
# grpd = counts_df.groupby(select_genes)
# all_futures = []
# for gene, count_df in grpd:
# future = client.submit(remove_overlapping_dots_from_gene,
# experiment_fpath = experiment_fpath,
# counts_df=counts_df,
# unfolded_overlapping_regions_dict=corrected_overlapping_regions_dict,
# stitching_selected=stitching_selected,
# gene = gene,
# same_dot_radius = same_dot_radius)
# all_futures.append(future)
def get_dots_in_overlapping_regions(counts_df, unfolded_overlapping_regions_dict,
stitching_selected, gene):
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
ref_tiles_df = pd.DataFrame(columns=counts_df.columns)
comp_tiles_df = pd.DataFrame(columns=counts_df.columns)
grpd_df = counts_df.groupby('fov_num')
list_fov = list(grpd_df.groups.keys())
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
if (cpl[0] in list_fov) and (cpl[1] in list_fov):
r_tl = chunk_coords[0]
r_br = chunk_coords[1]
c_tl = chunk_coords[2]
c_br = chunk_coords[3]
barcoded_ref_df = grpd_df.get_group(cpl[0])
barcoded_comp_df = grpd_df.get_group(cpl[1])
overlapping_ref_df = barcoded_ref_df.loc[(barcoded_ref_df[r_tag] > r_tl) & (barcoded_ref_df[r_tag] < r_br)
& (barcoded_ref_df[c_tag] > c_tl) & (barcoded_ref_df[c_tag] < c_br),:]
overlapping_comp_df = barcoded_comp_df.loc[(barcoded_comp_df[r_tag] > r_tl) & (barcoded_comp_df[r_tag] < r_br)
& (barcoded_comp_df[c_tag] > c_tl) & (barcoded_comp_df[c_tag] < c_br),:]
ref_tiles_df = ref_tiles_df.append(overlapping_ref_df)
comp_tiles_df = comp_tiles_df.append(overlapping_comp_df)
return ref_tiles_df, comp_tiles_df
def identify_duplicated_dots(ref_tiles_df,comp_tiles_df,stitching_selected,same_dot_radius):
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
overlapping_ref_coords = ref_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
overlapping_comp_coords = comp_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
dots_ids = comp_tiles_df.loc[:, ['dot_id']].to_numpy()
index = NNDescent(overlapping_ref_coords,metric='euclidean',n_neighbors=1)
indices, dists = index.query(overlapping_comp_coords,k=1)
idx_dists = np.where(dists < same_dot_radius)[0]
dots_id_to_remove = dots_ids[idx_dists]
dots_id_to_remove = list(dots_id_to_remove.reshape(dots_id_to_remove.shape[0],))
return dots_id_to_remove
def remove_overlapping_dots_from_gene(experiment_fpath,counts_df,unfolded_overlapping_regions_dict,
stitching_selected,gene,same_dot_radius):
experiment_fpath = Path(experiment_fpath)
ref_tiles_df, comp_tiles_df = get_dots_in_overlapping_regions(counts_df,unfolded_overlapping_regions_dict,
stitching_selected, gene)
dots_id_to_remove = identify_duplicated_dots(ref_tiles_df,comp_tiles_df,stitching_selected,same_dot_radius)
cleaned_df = counts_df.loc[~counts_df.barcode_reference_dot_id.isin(dots_id_to_remove), :]
fpath = experiment_fpath / 'results' / (experiment_fpath.stem + '_' + gene +'_counts.parquet')
cleaned_df.to_parquet(fpath,index=False)
class r_c_chunking():
"""
Utility class used to chunk and arbitrary region and obtain the coords if the chunks.
The chunking can be different between row and
columns
Parameters:
-----------
region_dimensions: np.ndarray
number of rows and columns of the region to chunk
r_chunk_size: float
size of the chunks along the rows
c_chunk_size: float
size of the chunks along the columns
tl_coords: np.ndarray
coordinate of the top left corner of the region to chunk
to use to calculate the coords of the chunks
"""
def __init__(self, region_dimensions, r_chunk_size, c_chunk_size, tl_coords):
self.region_dimensions = region_dimensions
self.r_chunk_size = r_chunk_size
self.c_chunk_size = c_chunk_size
self.tl_coords = tl_coords
@staticmethod
def block_chunks_calculator(dimension,chunk_size):
"""
Helper function to calculate the size of the chunks created according
the length of the vector and the chunk size.
Parameters:
-----------
dimension: int
Length of the vector to Chunk
chunkSize: int
Dimension of the Chunks
Returns:
-----------
chunks_sizes: np.array
Array of the sizes of the created chunks. It deals with conditions
when the expected chunks size do not fit an even number of times in the
dimension
"""
number_even_chunks=int(dimension//chunk_size)
total_size_even_chunks=number_even_chunks*chunk_size
odd_tile_size=dimension-total_size_even_chunks
chunk_sizes=[]
chunks_sizes=list(np.repeat(chunk_size,number_even_chunks-1))
if odd_tile_size < chunk_size:
chunks_sizes.append(chunk_size+odd_tile_size)
else:
chunks_sizes.append(odd_tile_size)
return tuple(chunks_sizes)
def block_chunking(self):
"""
Function used to generate the coords of the images according to the
chunking
Notes:
------
For both lists each np.array contains the coords in the following order:
[row_tl,row_br,col_tl,col_br]
"""
num_r,num_c = self.region_dimensions
self.starting_position = self.tl_coords
self.end_position = self.tl_coords + self.region_dimensions
# Calculate the size of the chunks
r_chunks_size = self.block_chunks_calculator(num_r,self.r_chunk_size)
c_chunks_size = self.block_chunks_calculator(num_c,self.c_chunk_size)
# Calculate the total numbers of chunks
nr_chunks = len(r_chunks_size)
nc_chunks = len(c_chunks_size)
# Coords top left corner (tl)
if nr_chunks == 1:
r_coords_tl = self.starting_position[0]
else:
r_coords_tl = [self.starting_position[0]]
for i in np.arange(1,nr_chunks):
r_coords_tl.append(r_coords_tl[i-1] + self.r_chunk_size )
r_coords_tl = np.array(r_coords_tl)
# r_coords_tl = np.arange(self.starting_position[0],(self.starting_position[0]+self.r_chunk_size*(nr_chunks)),self.r_chunk_size)
if nc_chunks == 1:
c_coords_tl = self.starting_position[1]
else:
c_coords_tl = [self.starting_position[1]]
for i in np.arange(1,nc_chunks):
c_coords_tl.append(c_coords_tl[i-1] + self.c_chunk_size )
c_coords_tl = np.array(c_coords_tl)
# c_coords_tl = np.arange(self.starting_position[1],(self.starting_position[1]+self.c_chunk_size*(nc_chunks)),self.c_chunk_size)
# Coords of all the tl in the image
r_coords_tl_all,c_coords_tl_all = np.meshgrid(r_coords_tl,c_coords_tl,indexing='ij')
self.coords_all_to_test = [r_coords_tl_all,c_coords_tl_all]
# Calculate all the br coords
r_coords_br_all = r_coords_tl_all.copy()
c_coords_br_all = c_coords_tl_all.copy()
for c in np.arange(0,r_coords_tl_all.shape[1]):
r_coords_br_all[:,c] = r_coords_br_all[:,c]+r_chunks_size
for r in np.arange(0,r_coords_tl_all.shape[0]):
c_coords_br_all[r,:] = c_coords_br_all[r,:]+c_chunks_size
# The coords list are generated as:
# row_tl,row_br,col_tl,col_br
# Create a list for the padded coords
self.coords_chunks_list = list()
for r in np.arange(0,r_coords_tl_all.shape[0]):
for c in np.arange(0,r_coords_tl_all.shape[1]):
self.coords_chunks_list.append(np.array([r_coords_tl_all[r][c],\
r_coords_br_all[r][c],\
c_coords_tl_all[r][c],\
c_coords_br_all[r][c]]))
class triangles_based_dots_stitching():
"""
Class used to register the different rounds by searaching and
matching all possible triangles formed by the dots in the reference
and translated image. This function run only a registration to the reference
round
The calculation of the triangle is based on list processing and may
be improved in ported to numpy.
https://stackoverflow.com/questions/43126580/match-set-of-x-y-points-to-another-set-that-is-scaled-rotated-translated-and
"""
def __init__(self, ref_overlapping_counts, comp_overlapping_counts, chunk_coords):
self.ref_overlapping_counts = ref_overlapping_counts
self.comp_overlapping_counts = comp_overlapping_counts
self.chunk_coords = chunk_coords
self.r_tl = self.chunk_coords[0]
self.r_br = self.chunk_coords[1]
self.c_tl = self.chunk_coords[2]
self.c_br = self.chunk_coords[3]
num_r = np.abs(np.abs(self.r_tl) - np.abs(self.r_br))
num_c = np.abs(np.abs(self.c_tl) - np.abs(self.c_br))
self.overlapping_region_dimensions = np.array([num_r,num_c])
if num_r > num_c:
self.chunk_search_ax = 'r'
self.r_chunk_size = num_c
self.c_chunk_size = num_c
self.max_chunk_size = num_r
else:
self.chunk_search_ax = 'c'
self.r_chunk_size = num_r
self.c_chunk_size = num_r
self.max_chunk_size = num_c
self.min_dots_chunk = 6
self.min_error_triangles = 1
self.max_dots = 12
self.logger = logging.getLogger(__name__)
self.tl_coords = np.array([self.r_tl, self.c_tl])
@staticmethod
def obj_fun(pars,x,src):
tx, ty = pars
H = np.array([[1, 0, tx],\
[0, 1, ty]])
src1 = np.c_[src,np.ones(src.shape[0])]
return np.sum( (x - src1.dot(H.T)[:,:2])**2 )
@staticmethod
def apply_transform(pars, src):
tx, ty = pars
H = np.array([[1, 0, tx],\
[0, 1, ty]])
src1 = np.c_[src,np.ones(src.shape[0])]
return src1.dot(H.T)[:,:2]
@staticmethod
def distance(x1,y1,x2,y2):
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2 )
@staticmethod
def list_subtract(list1,list2):
return np.absolute(np.array(list1)-np.array(list2))
def tri_sides(self,set_x, set_x_tri):
triangles = []
for i in range(len(set_x_tri)):
point1 = set_x_tri[i][0]
point2 = set_x_tri[i][1]
point3 = set_x_tri[i][2]
point1x, point1y = set_x[point1][0], set_x[point1][1]
point2x, point2y = set_x[point2][0], set_x[point2][1]
point3x, point3y = set_x[point3][0], set_x[point3][1]
len1 = self.distance(point1x,point1y,point2x,point2y)
len2 = self.distance(point1x,point1y,point3x,point3y)
len3 = self.distance(point2x,point2y,point3x,point3y)
# you need to normalize in case the ref and the tran
# are warped
#min_side = min(len1,len2,len3)
#len1/=min_side
#len2/=min_side
#len3/=min_side
t=[len1,len2,len3]
t.sort()
triangles.append(t)
return triangles
def identify_matching_coords(self,set_A, set_B, threshold):
match_A_pts = []
match_B_pts = []
set_A_tri = list(itertools.combinations(range(len(set_A)), 3))
set_B_tri = list(itertools.combinations(range(len(set_B)), 3))
A_triangles = self.tri_sides(set_A, set_A_tri)
B_triangles = self.tri_sides(set_B, set_B_tri)
sums = []
for i in range(len(A_triangles)):
for j in range(len(B_triangles)):
k = sum(self.list_subtract(A_triangles[i], B_triangles[j]))
if k < threshold:
sums.append([i,j,k])
# sort by smallest sum
sums = sorted(sums, key=operator.itemgetter(2))
if len(sums):
match_A = set_A_tri[sums[0][0]]
match_B = set_B_tri[sums[0][1]]
for i in range(3):
match_A_pts.append(set_A[match_A[i]])
match_B_pts.append(set_B[match_B[i]])
return (match_A_pts,match_B_pts)
def calculate_chunks(self):
self.chunks = r_c_chunking(self.overlapping_region_dimensions,self.r_chunk_size,
self.c_chunk_size,self.tl_coords)
self.chunks.block_chunking()
self.coords_chunks_list = self.chunks.coords_chunks_list
def calculate_dots_chunks(self,coords,chunk_coords):
r_tl = chunk_coords[0]
r_br = chunk_coords[1]
c_tl = chunk_coords[2]
c_br = chunk_coords[3]
# Select only the coords in the trimmed region
coords_in_chunk = coords[((r_tl < coords[:,0]) & (coords[:,0]<r_br)\
& (c_tl <coords[:,1]) &(coords[:,1]<c_br)),: ]
return coords_in_chunk
def optimize_chunking(self,ref_coords, tran_coords):
self.enough_dots = False
if self.chunk_search_ax == 'c':
chunk_size = self.c_chunk_size
else:
chunk_size = self.r_chunk_size
while chunk_size < self.max_chunk_size:
chunks = r_c_chunking(self.overlapping_region_dimensions,self.r_chunk_size,
self.c_chunk_size,self.tl_coords)
chunks.block_chunking()
coords_chunks_list = chunks.coords_chunks_list
ref_max_number_dots = []
tran_max_number_dots = []
ref_total = []
tran_total = []
for chunk_coords in coords_chunks_list:
ref_coords_in_chunk = self.calculate_dots_chunks(ref_coords,chunk_coords)
tran_coords_in_chunk = self.calculate_dots_chunks(tran_coords,chunk_coords)
if ref_coords_in_chunk.shape[0] > self.min_dots_chunk and tran_coords_in_chunk.shape[0] > self.min_dots_chunk:
self.enough_dots = True
break
if self.enough_dots:
break
else:
self.enough_dots = False
chunk_size += 200
if self.chunk_search_ax == 'c':
self.c_chunk_size += 200
else:
self.r_chunk_size += 200
if self.enough_dots:
# Collect the ref and tran coords from the chunks with enough dots
self.ref_tran_screening_list = []
for chunk_coords in coords_chunks_list:
ref_coords_in_chunk = self.calculate_dots_chunks(ref_coords,chunk_coords)
tran_coords_in_chunk = self.calculate_dots_chunks(tran_coords,chunk_coords)
if ref_coords_in_chunk.shape[0] > self.min_dots_chunk and tran_coords_in_chunk.shape[0] > self.min_dots_chunk:
self.ref_tran_screening_list.append((ref_coords_in_chunk,tran_coords_in_chunk,chunk_coords))
def register(self,ref_coords,tran_coords):
self.optimize_chunking(ref_coords, tran_coords)
self.completed_registration = False
if self.enough_dots:
match_ref_pts_all = []
match_tran_pts_all = []
# Collect all matching dots in all chunked regions with number of dots above threshold
for ref_coords_in_chunk,tran_coords_in_chunk, chunk_coords in self.ref_tran_screening_list:
match_ref_pts, match_tran_pts = self.identify_matching_coords(ref_coords_in_chunk,tran_coords_in_chunk,self.min_error_triangles)
if len(match_ref_pts) and len(match_tran_pts):
match_ref_pts_all.append(match_ref_pts)
match_tran_pts_all.append(match_tran_pts)
if len(match_ref_pts_all) > self.max_dots:
break
match_ref_pts_all = [pts for grp in match_ref_pts_all for pts in grp]
match_tran_pts_all = [pts for grp in match_tran_pts_all for pts in grp]
if len(match_ref_pts_all):
match_ref_pts_all = np.vstack(match_ref_pts_all)
match_tran_pts_all = np.vstack(match_tran_pts_all)
minimization_output = minimize(self.obj_fun,[0,0],args=(match_ref_pts_all,match_tran_pts_all), method='Nelder-Mead')
if minimization_output.success:
self.tran_registered_coords = self.apply_transform(minimization_output.x, tran_coords)
self.transformation_matrix = minimization_output.x
self.completed_registration = True
else:
self.logger.info(f'chunk {chunk_coords} failed minimization of distances')
else:
self.logger.info(f'chunk {chunk_coords} did not find matching triangles')
else:
self.logger.info(f'cannot register rounds not enough dots')
self.tran_registered_coords = tran_coords
self.transformation_matrix = np.empty([1,2])
self.transformation_matrix[:] = np.nan
if not self.completed_registration:
self.logger.info(f'was not possible to register ')
self.tran_registered_coords = tran_coords
self.transformation_matrix = np.empty([1,2])
self.transformation_matrix[:] = np.nan
def register_adj_tiles(experiment_fpath, roi_num, stitching_channel, idx_reference_tile,overlapping_regions,tile_corners_coords_pxl):
stitching_shift = {}
# tmp = []
experiment_fpath = Path(experiment_fpath)
counts_fpath = experiment_fpath / 'counts'/ ('roi_' + str(roi_num)) / stitching_channel
search_key = '*_fov_' + str(idx_reference_tile) + '.parquet'
# Adde crosscheck for error
ref_counts_fpath = list(counts_fpath.glob(search_key))[0]
ref_counts_df = | pd.read_parquet(ref_counts_fpath) | pandas.read_parquet |
# The analyser
import pandas as pd
import matplotlib.pyplot as plt
import dill
import os
import numpy as np
from funcs import store_namespace
from funcs import load_namespace
import datetime
from matplotlib.font_manager import FontProperties
from matplotlib import rc
community = 'ResidentialCommunity'
sim_ids = ['MinEne_0-2']
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('path to models', 'teaser_bldgs_residential'))
#
bldg_list = [bldg_list[0], bldg_list[1]]
print(bldg_list)
folder = 'results'
step = 300
nodynprice=0
mon = 'jan'
constr_folder = 'decentr_enemin_constr_'+mon
#bldg_list = bldg_list[0:1]
if mon == 'jan':
start = '1/7/2017 16:30:00'
end = '1/7/2017 19:00:00'
controlseq_time = '01/07/2017 16:55:00'
elif mon == 'mar':
start = '3/1/2017 16:30:00'
end = '3/1/2017 19:00:00'
controlseq_time = '03/01/2017 16:55:00'
elif mon=='nov':
start = '11/20/2017 16:30:00'
end = '11/20/2017 19:00:00'
controlseq_time = '11/20/2017 16:55:00'
sim_range = pd.date_range(start, end, freq = str(step)+'S')
simu_path = "path to simulation folder"
other_input = {}
price = {}
flex_cost = {}
ref_profile = {}
controlseq = {}
opt_control = {}
emutemps = {}
mpctemps = {}
opt_stats = {}
flex_down = {}
flex_up = {}
power = {}
for bldg in bldg_list:
building = bldg+'_'+model_id
for sim_id in sim_ids:
opt_stats[sim_id] = {}
controlseq[sim_id] = {}
mpctemps[sim_id] = {}
emutemps[sim_id] = {}
power[sim_id] = {}
for time_idx in sim_range:
time_idx = time_idx.strftime('%m/%d/%Y %H:%M:%S')
t = time_idx.replace('/','-').replace(':','-').replace(' ','-')
opt_stats[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'opt_stats_'+sim_id+'_'+t))
emutemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'emutemps_'+sim_id+'_'+t))
mpctemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'mpctemps_'+sim_id+'_'+t))
controlseq[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'controlseq_'+sim_id)+'_'+t)
power[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'power_'+sim_id)+'_'+t)
#flex_down[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_down'+sim_id))
#flex_up[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_up'+sim_id))
i=0
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = pd.to_datetime(emutemps_df.index)
emutemps_df.index = emutemps_df.index.shift(1, freq=str(step)+'S')
power_df = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df.index = pd.to_datetime(power_df.index)
opt_stats_df = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df.index = pd.to_datetime(opt_stats_df.index)
power_df.index = power_df.index.shift(1, freq=str(step)+'S')
else:
emutemps_df1 = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df1.index = pd.to_datetime(emutemps_df1.index)
emutemps_df1.index = emutemps_df1.index.shift(1, freq=str(step) + 'S')
emutemps_df = pd.concat([emutemps_df, emutemps_df1])
power_df1 = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df1.index = pd.to_datetime(power_df1.index)
power_df1.index = power_df1.index.shift(1, freq=str(step)+'S')
power_df = pd.concat([power_df, power_df1])
opt_stats_df1 = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df1.index = pd.to_datetime(opt_stats_df1.index)
opt_stats_df = pd.concat([opt_stats, opt_stats_df1])
i = i+1
store_namespace(os.path.join(simu_path, folder,'emutemps'),emutemps_df)
store_namespace(os.path.join(simu_path, folder,'mpctemps'),mpctemps)
store_namespace(os.path.join(simu_path, folder,'opt_stats'),opt_stats_df)
constraints = {}
for bldg in bldg_list:
setpoint_dict = load_namespace(os.path.join(simu_path, constr_folder, 'constraints_'+bldg+'_'+model_id)).data['TAir']
constraints[bldg] = {}
for param in setpoint_dict.keys():
constraints[bldg]['hi'] = setpoint_dict['Slack_LTE'].display_data().resample(str(step)+'S').ffill()
constraints[bldg]['lo'] = setpoint_dict['Slack_GTE'].display_data().resample(str(step)+'S').ffill()
constraints_df = pd.DataFrame.from_dict(constraints, orient = 'index')
#print(constraints_df['hi'].values)
weather = load_namespace(os.path.join(simu_path, folder, 'weather'))
price = load_namespace(os.path.join(simu_path, folder, 'sim_price'))
price = price.display_data()
if nodynprice==1:
price = pd.Series(50, price.index,name='pi_e')
#print(weather)
# """""""""""" Comfort violations """""""""""""""""""
#print(constraints_df)
#print(emutemps_df)
violation = {}
#print(constraints_df.loc['Detached_0']['lo'])
for bldg in bldg_list:
violation[bldg] = {}
for time in emutemps_df[bldg+'_'+model_id].index:
#print(emutemps_df[bldg+'_'+model_id][time])
emutemp = emutemps_df[bldg+'_'+model_id][time]
#emutemp = emutemp[time]
#emutemp = emutemp.values()
#print(emutemp)
constraint_hi = constraints_df.loc[bldg]['hi'][time]-273.15
constraint_lo = constraints_df.loc[bldg]['lo'][time]-273.15 #print(time)
#print(constraint_hi)
#print(constraint_lo)
if emutemp > constraint_hi:
violation[bldg][time] = (emutemp - constraint_hi)*step/3600
elif emutemp < constraint_lo:
violation[bldg][time] = (constraint_lo-emutemp)*step/3600
else:
violation[bldg][time] = 0
violation_df = pd.DataFrame.from_dict(violation, orient = 'columns')
print(violation_df)
store_namespace(os.path.join(simu_path, folder,'violation_df'),violation_df)
aggr = {}
dt = []
#print(controlseq.keys())
for time in controlseq[sim_ids[0]].keys():
control_start = datetime.datetime.strptime(time, '%m/%d/%Y %H:%M:%S')
control_end = datetime.datetime.strptime(time, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*int(step))
dt.append(control_start)
aggr[time] = pd.DataFrame.from_dict(controlseq[sim_ids[0]][time],orient='columns')
dt = pd.DataFrame(dt,columns = ['Dates'])
dt = dt.set_index(pd.DatetimeIndex(dt['Dates']))
index = dt.index
index = index.tz_localize('UTC')
index = index.sort_values()
mast_index = index
last_str = index[-1].strftime('%m/%d/%Y %H:%M:%S')
#real_cont = pd.DataFrame.from_dict(controlseq[sim_ids[0]][last_str],orient='columns')[index[0]:index[-1]]
real_cont = power_df
real_cont_aggr = real_cont.sum(axis=1)
aggrcom = {}
for time in aggr.keys():
aggrcom[time] = aggr[time].sum(axis=1)
store_namespace(os.path.join(simu_path,folder,'real_cont'), real_cont)
store_namespace(os.path.join(simu_path,folder,'aggr'), aggr)
store_namespace(os.path.join(simu_path,folder,'aggrcom'), aggrcom)
# --------------------- Flexibility factor and peak power ---------------
if mon == 'jan':
ff_date = '01/07/2017'
if mon == 'mar':
ff_date = '03/01/2017'
if mon == 'nov':
ff_date = '11/20/2017'
hc_start = datetime.datetime.strptime(ff_date + ' 18:00:00', '%m/%d/%Y %H:%M:%S')
hc_end = index[-1]
lc_start = index[0]
lc_end = datetime.datetime.strptime(ff_date + ' 17:59:00', '%m/%d/%Y %H:%M:%S')
peak_comm = real_cont_aggr.max()
peak_time_comm = real_cont_aggr.idxmax()
peak_time_comm_hh = real_cont_aggr.resample(str(step)+'S').mean().idxmax()
peak_comm_hh = real_cont_aggr.resample(str(step)+'S').mean().max()
peak_comm =(peak_comm, peak_time_comm)
peak_comm_hh =(peak_comm_hh, peak_time_comm_hh)
print(peak_comm)
print(peak_comm_hh)
peak = {}
peak_hh = {}
cons_hc = real_cont[hc_start:hc_end]
cons_lc = real_cont[lc_start:lc_end]
print(cons_hc)
real_cont_hh = real_cont.resample(str(step)+'S').mean()
for bldg in bldg_list:
bldg = bldg+'_'+model_id
peak_val = real_cont.loc[:][bldg].max()
peak_idx = real_cont.loc[:][bldg].idxmax()
peak_hh_val = real_cont_hh.loc[:][bldg].max()
peak_hh_idx = real_cont_hh.loc[:][bldg].idxmax()
peak[bldg] = (peak_val, peak_idx)
peak_hh[bldg] = (peak_hh_val, peak_hh_idx)
peak = pd.DataFrame.from_dict(peak, orient='index')
peak_hh = pd.DataFrame.from_dict(peak_hh, orient='index')
print(peak_hh)
print(peak)
# -----------------------------------------------
print('%%%%%%%%%---- Plots ----%%%%%%%')
fig_folder = os.path.join(simu_path, folder, 'figs')
#print(controlseq)
#print(flex_cost.display_data())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Prices
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
ax1 = ax.twinx()
i = 0
plot_times=[0,4,8,12,18]
i=0
for bldg in [bldg_list[0]]:
ax.plot(real_cont.index, real_cont[bldg+'_'+model_id].values/1000,'-', label='ref_profile')
#resamp_index = index.asfreq('1800S')
ax.set_ylabel('Heat demand [kW]', fontsize=18)
ax1.plot(price.index, price.values, '--o', label="Price")
#ax1.plot(flex_cost[bldg_list[0]+'_'+model_id].index, flex_cost[bldg_list[0]+'_'+model_id].values, '--o', label="Flex Cost")
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax1.set_ylabel(r'Price [pounds / kWh]', fontsize=18)
#ax.legend(fontsize=14, loc = 0)
#plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.xticks(rotation=35)
plt.xlabel("Time",fontsize=18)
plt.title("Decentralised Algorithm:\n Heat demand under dynamic pricing and loadshaping",fontsize=22)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig(os.path.join(simu_path, folder, "mincost_price.png"))
plt.clf()
#plt.close()
#plt.close('all')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Outside temperature and optimised control sequence
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
ax1 = ax.twinx()
#aggr = {}
i = 0
#price = price.display_data()
plot_times=[0,4,8,12,18]
#print(controlseq.keys())
i=0
ax.plot(real_cont_aggr.index, real_cont_aggr.values/1000,'-x', label='realised')
ax.set_ylabel('Heat demand [kW]', fontsize=18)
ax1.plot(price.index, price.values, '--o', label="Price")
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax1.set_ylabel(r'Price [pounds / kWh]', fontsize=18)
#ax.legend(fontsize=14, loc = 0)
#plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.xticks(rotation=35)
plt.xlabel("Time",fontsize=18)
plt.title("Decentralised Algorithm:\n Power demand",fontsize=22)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig(os.path.join(simu_path, folder, "mincost_price_aggr.png"))
plt.clf()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Temperatures
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
#ax1 = ax.twinx()
plot_bldgs = [0]
plot_times=[0,1,2,3,4]
i= 0
#print(emutemps)
for sim_id in sim_ids:
i = 0
for time in mpctemps[sim_id].keys():
j = 0
for bldg in mpctemps[sim_id][time].keys():
if j in plot_bldgs:
ax.plot(mpctemps[sim_id][time][bldg].index, mpctemps[sim_id][time][bldg].values, '-' , label='mpc_'+bldg)
j = j+1
i = i+1
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
#ax.legend(fontsize=14)
plt.xlabel("Time",fontsize=18)
plt.ylabel(r"Temperature [$^\circ$C]",fontsize=18)
plt.title("Predicted Temperatures with Cost Minimisation",fontsize=22)
plt.xticks(rotation=35)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.savefig(os.path.join(simu_path, folder, "temps_mpc.pdf"),bbox_inches="tight")
plt.savefig(os.path.join(simu_path, folder, "temps_mpc.png"),bbox_inches="tight")
plt.clf()
#print(ref_heatinput)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Temperatures
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
#ax1 = ax.twinx()
plot_bldgs = [0]
i= 0
#print(emutemps)
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = pd.to_datetime(emutemps_df.index)
emutemps_df.index = emutemps_df.index.shift(1, freq=str(step)+'S')
else:
emutemps_df1 = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df1.index = | pd.to_datetime(emutemps_df1.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
'''
データの読み込みと確認
'''
# ライブラリのインポート
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ランダムシードの設定
import random
np.random.seed(1234)
random.seed(1234)
# データの読み込み
train = pd.read_csv('./data/train.tsv', sep='\t')
test = pd.read_csv('./data/test.tsv', sep='\t')
# データの確認
print(train.head())
print(train.dtypes)
'''
sepalとpetalの面積と長さ/幅の特徴量追加
'''
# ライブラリのインポート
from sklearn.preprocessing import StandardScaler
# sepalとpetalの面積
train['sepal_area'] = train['sepal length in cm'] * train['sepal width in cm']
train['petal_area'] = train['petal length in cm'] * train['petal width in cm']
# length/widthの割合
train['sepal_length/width'] = train['sepal length in cm'] / train['sepal width in cm']
train['petal_length/width'] = train['petal length in cm'] / train['petal width in cm']
# 説明変数を指定
X_train = train.drop(['class',
'id',
'sepal length in cm',
'sepal width in cm',
'petal length in cm',
'petal width in cm'], axis=1)
Y_train = train['class']
# 説明変数の標準化
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
# 標準化した説明変数をDataFrame化する
X_train_std = | pd.DataFrame(X_train_std) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
to_clean = ["old_mturk_first_person", "remaining_first_person"]
dir = "data_to_curate"
delta = 0.21
dfs = []
all_scenarios = []
all_labels = []
for c in to_clean:
file = os.path.join(dir, c + ".tsv")
df = | pd.read_csv(file, sep="\t", header=None) | pandas.read_csv |
from datetime import datetime
from enum import Enum
from typing import List, Union, Dict
import numpy as np
import pandas as pd
from dataclasses import dataclass
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
from ira.analysis.commissions import TransactionCostsCalculator, ZeroTCC
from ira.simulator.SignalTester import Tracker, SimulationResult
from ira.utils.nb_functions import z_backtest
from ira.utils.ui_utils import red, green, yellow, blue
from ira.utils.utils import mstruct, runtime_env
from qlearn import MarketDataComposer
from qlearn.simulation.multiproc import Task, RunningInfoManager
class _Types(Enum):
UKNOWN = 'unknown'
LIST = 'list'
TRACKER = 'tracker'
SIGNAL = 'signal'
ESTIMATOR = 'estimator'
def _type(obj) -> _Types:
if obj is None:
t = _Types.UKNOWN
elif isinstance(obj, (list, tuple)):
t = _Types.LIST
elif isinstance(obj, Tracker):
t = _Types.TRACKER
elif isinstance(obj, (pd.DataFrame, pd.Series)):
t = _Types.SIGNAL
elif isinstance(obj, (Pipeline, BaseEstimator)):
t = _Types.ESTIMATOR
elif isinstance(obj, dict):
# when tracker has setup for each intrument {str -> Tracker}
if all([isinstance(k, str) & isinstance(v, Tracker) for k, v in obj.items()]):
t = _Types.TRACKER
else:
t = _Types.UKNOWN
else:
t = _Types.UKNOWN
return t
def start_stop_sigs(data: Dict[str, pd.DataFrame], start=None, stop=None):
"""
Generate stub signals (NaNs mainly for backtester progress)
"""
r = None
if stop is not None:
try:
stop = str(pd.Timestamp(start) + pd.Timedelta(stop))
except:
pass
for i, d in data.items():
start = d.index[0] if start is None else start
stop = d.index[-1] if stop is None else stop
d_sel = d[start:stop]
if d_sel.empty:
raise ValueError(f">>> There is no '{i}' historical data for period {start} : {stop} !")
dx = max(len(d_sel) // 99, 1)
ix = d_sel.index[::dx]
last_idx = d_sel.index[-1]
if last_idx not in ix:
ix = ix.append( | pd.DatetimeIndex([last_idx]) | pandas.DatetimeIndex |
#!/usr/bin/env python
import os,sys
import pandas as pd
import argparse
daismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,daismdir)
import daism.modules.simulation as simulation
import daism.modules.training as training
import daism.modules.prediction as prediction
#--------------------------------------
#--------------------------------------
# main()
parser = argparse.ArgumentParser(description='DAISM-XMBD deconvolution.')
subparsers = parser.add_subparsers(dest='subcommand', help='Select one of the following sub-commands')
# create the parser for the "one-stop DAISM-DNN" command
parser_a = subparsers.add_parser('DAISM', help='one-stop DAISM-XMBD',description="one-stop DAISM-XMBD")
parser_a.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_a.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_a.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_a.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_a.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_a.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_a.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_a.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "DAISM simulation" command
parser_b = subparsers.add_parser('DAISM_simulation', help='training set simulation using DAISM strategy',description='training set simulation using DAISM strategy.')
parser_b.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_b.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_b.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_b.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_b.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_b.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_b.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "Generic simulation" command
parser_c = subparsers.add_parser('Generic_simulation', help='training set simulation using purified cells only',description='training set simulation using purified cells only.')
parser_c.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_c.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_c.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_c.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_c.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "training" command
parser_d = subparsers.add_parser('training', help='train DNN model',description='train DNN model.')
parser_d.add_argument("-trainexp", type=str, help="Simulated samples expression file", default=None)
parser_d.add_argument("-trainfra", type=str, help="Simulated samples ground truth file", default=None)
parser_d.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_d.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "prediction" command
parser_e = subparsers.add_parser('prediction', help='predict using a trained model',description='predict using a trained model.')
parser_e.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_e.add_argument("-model", type=str, help="Deep-learing model file trained by DAISM", default="../output/DAISM_model.pkl")
parser_e.add_argument("-celltype", type=str, help="Model celltypes", default="../output/DAISM_model_celltypes.txt")
parser_e.add_argument("-feature", type=str, help="Model feature", default="../output/DAISM_model_feature.txt")
parser_e.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_e.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
class Options:
random_seed = 777
min_f = 0.01
max_f = 0.99
lr = 1e-4
batchsize = 64
num_epoches = 500
ncuda = 0
def main():
# parse some argument lists
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
#### DAISM modules ####
if (inputArgs.subcommand=='DAISM'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\t')
# Training model
model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+"/output/",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)
# Save signature genes and celltype labels
pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\t')
pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\t')
# Prediction
result = prediction.dnn_prediction(model, test_sample, list(mixfra.index), list(mixsam.index),Options.ncuda)
# Save predicted result
result.to_csv(inputArgs.outdir+'/output/DAISM_result.txt',sep='\t')
############################
#### simulation modules ####
############################
#### DAISM simulation modules ####
if (inputArgs.subcommand=='DAISM_simulation'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode ="daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\t')
#### Generic simulation modules ####
if (inputArgs.subcommand=='Generic_simulation'):
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "generic"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.generic_simulation(C_all,Options.random_seed,inputArgs.N,inputArgs.platform,commongenes)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/Generic_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/Generic_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/Generic_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/Generic_mixfra.txt',sep='\t')
##########################
#### training modules ####
##########################
if (inputArgs.subcommand=='training'):
# Load training data
mixsam = pd.read_csv(inputArgs.trainexp, sep="\t", index_col=0)
mixfra = pd.read_csv(inputArgs.trainfra, sep="\t", index_col=0)
# Training model
model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+"/output/",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\t')
pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\t')
############################
#### prediction modules ####
############################
if (inputArgs.subcommand=='prediction'):
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Load signature genes and celltype labels
feature = pd.read_csv(inputArgs.feature,sep='\t')['0']
celltypes = | pd.read_csv(inputArgs.celltype,sep='\t') | pandas.read_csv |
#!/usr/bin/python
"""
Copyright 2013 <NAME> (aka Daniel)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, division
import unittest
import slicedpy.utils as utils
import pandas as pd
class TestUtils(unittest.TestCase):
def test_find_nearest(self):
N = 24
idx = pd.date_range('2013-01-01', periods=N, freq='H')
series = pd.Series(None, index=idx)
for i in range(N):
nearest = utils.find_nearest(series, idx[i])
self.assertEqual(nearest, i)
idx_ten_mins = pd.date_range('2013-01-01 00:10', periods=N, freq='H')
for i in range(N):
nearest = utils.find_nearest(series, idx_ten_mins[i])
self.assertEqual(nearest, i)
idx_fifty_mins = | pd.date_range('2013-01-01 00:50', periods=N, freq='H') | pandas.date_range |
from datetime import datetime
import pytest
from pytz import utc
import pandas._testing as tm
from pandas.tseries.holiday import (
MO,
SA,
AbstractHolidayCalendar,
DateOffset,
EasterMonday,
GoodFriday,
Holiday,
HolidayCalendarFactory,
Timestamp,
USColumbusDay,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
get_calendar,
next_monday,
)
def _check_holiday_results(holiday, start, end, expected):
"""
Check that the dates for a given holiday match in date and timezone.
Parameters
----------
holiday : Holiday
The holiday to check.
start : datetime-like
The start date of range in which to collect dates for a given holiday.
end : datetime-like
The end date of range in which to collect dates for a given holiday.
expected : list
The list of dates we expect to get.
"""
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert list(
holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(end)))
) == [utc.localize(dt) for dt in expected]
@pytest.mark.parametrize(
"holiday,start_date,end_date,expected",
[
(
USMemorialDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
],
),
(
Holiday("July 4th Eve", month=7, day=3),
"2001-01-01",
"2003-03-03",
[Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")],
),
(
Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)),
"2001-01-01",
"2008-03-03",
[
Timestamp("2001-07-03 00:00:00"),
Timestamp("2002-07-03 00:00:00"),
Timestamp("2003-07-03 00:00:00"),
Timestamp("2006-07-03 00:00:00"),
Timestamp("2007-07-03 00:00:00"),
],
),
(
EasterMonday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-25 00:00:00"),
Timestamp("2012-04-09 00:00:00"),
Timestamp("2013-04-01 00:00:00"),
Timestamp("2014-04-21 00:00:00"),
Timestamp("2015-04-06 00:00:00"),
Timestamp("2016-03-28 00:00:00"),
Timestamp("2017-04-17 00:00:00"),
Timestamp("2018-04-02 00:00:00"),
Timestamp("2019-04-22 00:00:00"),
Timestamp("2020-04-13 00:00:00"),
],
),
(
GoodFriday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-22 00:00:00"),
Timestamp("2012-04-06 00:00:00"),
Timestamp("2013-03-29 00:00:00"),
Timestamp("2014-04-18 00:00:00"),
Timestamp("2015-04-03 00:00:00"),
Timestamp("2016-03-25 00:00:00"),
Timestamp("2017-04-14 00:00:00"),
Timestamp("2018-03-30 00:00:00"),
Timestamp("2019-04-19 00:00:00"),
Timestamp("2020-04-10 00:00:00"),
],
),
(
USThanksgivingDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
],
),
],
)
def test_holiday_dates(holiday, start_date, end_date, expected):
_check_holiday_results(holiday, start_date, end_date, expected)
@pytest.mark.parametrize(
"holiday,start,expected",
[
(USMemorialDay, datetime(2015, 7, 1), []),
(USMemorialDay, "2015-05-25", "2015-05-25"),
(USLaborDay, datetime(2015, 7, 1), []),
(USLaborDay, "2015-09-07", "2015-09-07"),
(USColumbusDay, datetime(2015, 7, 1), []),
(USColumbusDay, "2015-10-12", "2015-10-12"),
(USThanksgivingDay, datetime(2015, 7, 1), []),
(USThanksgivingDay, "2015-11-26", "2015-11-26"),
(USMartinLutherKingJr, datetime(2015, 7, 1), []),
(USMartinLutherKingJr, "2015-01-19", "2015-01-19"),
(USPresidentsDay, datetime(2015, 7, 1), []),
(USPresidentsDay, "2015-02-16", "2015-02-16"),
(GoodFriday, datetime(2015, 7, 1), []),
(GoodFriday, "2015-04-03", "2015-04-03"),
(EasterMonday, "2015-04-06", "2015-04-06"),
(EasterMonday, datetime(2015, 7, 1), []),
(EasterMonday, "2015-04-05", []),
("New Years Day", "2015-01-01", "2015-01-01"),
("New Years Day", "2010-12-31", "2010-12-31"),
("New Years Day", datetime(2015, 7, 1), []),
("New Years Day", "2011-01-01", []),
("July 4th", "2015-07-03", "2015-07-03"),
("July 4th", datetime(2015, 7, 1), []),
("July 4th", "2015-07-04", []),
("Veterans Day", "2012-11-12", "2012-11-12"),
("Veterans Day", datetime(2015, 7, 1), []),
("Veterans Day", "2012-11-11", []),
("Christmas", "2011-12-26", "2011-12-26"),
("Christmas", datetime(2015, 7, 1), []),
("Christmas", "2011-12-25", []),
],
)
def test_holidays_within_dates(holiday, start, expected):
# see gh-11477
#
# Fix holiday behavior where holiday.dates returned dates outside
# start/end date, or observed rules could not be applied because the
# holiday was not in the original date range (e.g., 7/4/2015 -> 7/3/2015).
if isinstance(holiday, str):
calendar = get_calendar("USFederalHolidayCalendar")
holiday = calendar.rule_from_name(holiday)
if isinstance(expected, str):
expected = [Timestamp(expected)]
_check_holiday_results(holiday, start, start, expected)
@pytest.mark.parametrize(
"transform", [lambda x: x.strftime("%Y-%m-%d"), lambda x: | Timestamp(x) | pandas.tseries.holiday.Timestamp |
# Imports: standard library
import os
import re
import logging
from abc import ABC
from typing import Any, Set, Dict, List, Tuple, Optional
from datetime import datetime
# Imports: third party
import h5py
import numpy as np
import pandas as pd
import unidecode
# Imports: first party
from ml4c3.utils import get_unix_timestamps
from definitions.edw import EDW_FILES, MED_ACTIONS
from definitions.icu import ALARMS_FILES, ICU_SCALE_UNITS
from definitions.globals import TIMEZONE
from ingest.icu.data_objects import (
Event,
Procedure,
Medication,
StaticData,
Measurement,
BedmasterAlarm,
BedmasterSignal,
)
from tensorize.bedmaster.bedmaster_stats import BedmasterStats
from tensorize.bedmaster.match_patient_bedmaster import PatientBedmasterMatcher
# pylint: disable=too-many-branches, dangerous-default-value
class Reader(ABC):
"""
Parent class for our Readers class.
As an abstract class, it can't be directly instanced. Its children
should be used instead.
"""
@staticmethod
def _ensure_contiguous(data: np.ndarray) -> np.ndarray:
if len(data) > 0:
dtype = Any
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int
else:
dtype = float
except ValueError:
dtype = "S"
try:
data = np.ascontiguousarray(data, dtype=dtype)
except (UnicodeEncodeError, SystemError):
logging.info("Unknown character. Not ensuring contiguous array.")
new_data = []
for element in data:
new_data.append(unidecode.unidecode(str(element)))
data = np.ascontiguousarray(new_data, dtype="S")
except ValueError:
logging.exception(
f"Unknown method to convert np.ndarray of "
f"{dtype} objects to numpy contiguous type.",
)
raise
return data
class EDWReader(Reader):
"""
Implementation of the Reader for EDW data.
Usage:
>>> reader = EDWReader('MRN')
>>> hr = reader.get_measurement('HR')
"""
def __init__(
self,
path: str,
mrn: str,
csn: str,
med_file: str = EDW_FILES["med_file"]["name"],
move_file: str = EDW_FILES["move_file"]["name"],
adm_file: str = EDW_FILES["adm_file"]["name"],
demo_file: str = EDW_FILES["demo_file"]["name"],
vitals_file: str = EDW_FILES["vitals_file"]["name"],
lab_file: str = EDW_FILES["lab_file"]["name"],
surgery_file: str = EDW_FILES["surgery_file"]["name"],
other_procedures_file: str = EDW_FILES["other_procedures_file"]["name"],
transfusions_file: str = EDW_FILES["transfusions_file"]["name"],
events_file: str = EDW_FILES["events_file"]["name"],
medhist_file: str = EDW_FILES["medhist_file"]["name"],
surghist_file: str = EDW_FILES["surghist_file"]["name"],
socialhist_file: str = EDW_FILES["socialhist_file"]["name"],
):
"""
Init EDW Reader.
:param path: absolute path of files.
:param mrn: MRN of the patient.
:param csn: CSN of the patient visit.
:param med_file: file containing the medicines data from the patient.
Can be inferred if None.
:param move_file: file containing the movements of the patient
(admission, transfer and discharge) from the patient.
Can be inferred if None.
:param demo_file: file containing the demographic data from
the patient. Can be inferred if None.
:param vitals_file: file containing the vital signals from
the patient. Can be inferred if None.
:param lab_file: file containing the laboratory signals from
the patient. Can be inferred if None.
:param adm_file: file containing the admission data from
the patient. Can be inferred if None.
:param surgery_file: file containing the surgeries performed to
the patient. Can be inferred if None.
:param other_procedures_file: file containing procedures performed to
the patient. Can be inferred if None.
:param transfusions_file: file containing the transfusions performed to
the patient. Can be inferred if None.
:param eventss_file: file containing the events during
the patient stay. Can be inferred if None.
:param medhist_file: file containing the medical history information of the
patient. Can be inferred if None.
:param surghist_file: file containing the surgical history information of the
patient. Can be inferred if None.
:param socialhist_file: file containing the social history information of the
patient. Can be inferred if None.
"""
self.path = path
self.mrn = mrn
self.csn = csn
self.move_file = self.infer_full_path(move_file)
self.demo_file = self.infer_full_path(demo_file)
self.vitals_file = self.infer_full_path(vitals_file)
self.lab_file = self.infer_full_path(lab_file)
self.med_file = self.infer_full_path(med_file)
self.adm_file = self.infer_full_path(adm_file)
self.surgery_file = self.infer_full_path(surgery_file)
self.other_procedures_file = self.infer_full_path(other_procedures_file)
self.transfusions_file = self.infer_full_path(transfusions_file)
self.events_file = self.infer_full_path(events_file)
self.medhist_file = self.infer_full_path(medhist_file)
self.surghist_file = self.infer_full_path(surghist_file)
self.socialhist_file = self.infer_full_path(socialhist_file)
self.timezone = TIMEZONE
def infer_full_path(self, file_name: str) -> str:
"""
Infer a file name from MRN and type of data.
Used if a file is not specified on the input.
:param file_name: <str> 8 possible options:
'medications.csv', 'demographics.csv', 'labs.csv',
'flowsheet.scv', 'admission-vitals.csv',
'surgery.csv','procedures.csv', 'transfusions.csv'
:return: <str> the inferred path
"""
if not file_name.endswith(".csv"):
file_name = f"{file_name}.csv"
full_path = os.path.join(self.path, self.mrn, self.csn, file_name)
return full_path
def list_vitals(self) -> List[str]:
"""
List all the vital signs taken from the patient.
:return: <List[str]> List with all the available vital signals
from the patient
"""
signal_column = EDW_FILES["vitals_file"]["columns"][0]
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return list(vitals_df[signal_column].astype("str").str.upper().unique())
def list_labs(self) -> List[str]:
"""
List all the lab measurements taken from the patient.
:return: <List[str]> List with all the available lab measurements
from the patient.
"""
signal_column = EDW_FILES["lab_file"]["columns"][0]
labs_df = pd.read_csv(self.lab_file)
return list(labs_df[signal_column].astype("str").str.upper().unique())
def list_medications(self) -> List[str]:
"""
List all the medications given to the patient.
:return: <List[str]> List with all the medications on
the patients record
"""
signal_column = EDW_FILES["med_file"]["columns"][0]
status_column = EDW_FILES["med_file"]["columns"][1]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
return list(med_df[signal_column].astype("str").str.upper().unique())
def list_surgery(self) -> List[str]:
"""
List all the types of surgery performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(self.surgery_file, "surgery_file")
def list_other_procedures(self) -> List[str]:
"""
List all the types of procedures performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(
self.other_procedures_file,
"other_procedures_file",
)
def list_transfusions(self) -> List[str]:
"""
List all the transfusions types that have been done on the patient.
:return: <List[str]> List with all the transfusions type of
the patient
"""
return self._list_procedures(self.transfusions_file, "transfusions_file")
@staticmethod
def _list_procedures(file_name, file_key) -> List[str]:
"""
Filter and list all the procedures in the given file.
"""
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
return list(data[signal_column].astype("str").str.upper().unique())
def list_events(self) -> List[str]:
"""
List all the event types during the patient stay.
:return: <List[str]> List with all the events type.
"""
signal_column, _ = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
return list(data[signal_column].astype("str").str.upper().unique())
def get_static_data(self) -> StaticData:
"""
Get the static data from the EDW csv file (admission + demographics).
:return: <StaticData> wrapped information
"""
movement_df = pd.read_csv(self.move_file)
admission_df = pd.read_csv(self.adm_file)
demographics_df = pd.read_csv(self.demo_file)
# Obtain patient's movement (location and when they move)
department_id = np.array(movement_df["DepartmentID"], dtype=int)
department_nm = np.array(movement_df["DepartmentDSC"], dtype="S")
room_bed = np.array(movement_df["BedLabelNM"], dtype="S")
move_time = np.array(movement_df["TransferInDTS"], dtype="S")
# Convert weight from ounces to pounds
weight = float(admission_df["WeightPoundNBR"].values[0]) / 16
# Convert height from feet & inches to meters
height = self._convert_height(admission_df["HeightTXT"].values[0])
admin_type = admission_df["HospitalAdmitTypeDSC"].values[0]
# Find possible diagnosis at admission
diag_info = admission_df["AdmitDiagnosisTXT"].dropna().drop_duplicates()
if list(diag_info):
diag_info = diag_info.astype("str")
admin_diag = diag_info.str.cat(sep="; ")
else:
admin_diag = "UNKNOWN"
admin_date = admission_df["HospitalAdmitDTS"].values[0]
birth_date = demographics_df["BirthDTS"].values[0]
race = demographics_df["PatientRaceDSC"].values[0]
sex = demographics_df["SexDSC"].values[0]
end_date = admission_df["HospitalDischargeDTS"].values[0]
# Check whether it exists a deceased date or not
end_stay_type = (
"Alive"
if str(demographics_df["DeathDTS"].values[0]) == "nan"
else "Deceased"
)
# Find local time, if patient is still in hospital, take today's date
if str(end_date) != "nan":
offsets = self._get_local_time(admin_date[:-1], end_date[:-1])
else:
today_date = datetime.today().strftime("%Y-%m-%d %H:%M:%S.%f")
offsets = self._get_local_time(admin_date[:-1], today_date)
offsets = list(set(offsets)) # Take unique local times
local_time = np.empty(0)
for offset in offsets:
local_time = np.append(local_time, f"UTC{int(offset/3600)}:00")
local_time = local_time.astype("S")
# Find medical, surgical and social history of patient
medical_hist = self._get_med_surg_hist("medhist_file")
surgical_hist = self._get_med_surg_hist("surghist_file")
tobacco_hist, alcohol_hist = self._get_social_hist()
return StaticData(
department_id,
department_nm,
room_bed,
move_time,
weight,
height,
admin_type,
admin_diag,
admin_date,
birth_date,
race,
sex,
end_date,
end_stay_type,
local_time,
medical_hist,
surgical_hist,
tobacco_hist,
alcohol_hist,
)
def get_med_doses(self, med_name: str) -> Medication:
"""
Get all the doses of the input medication given to the patient.
:param medication_name: <string> name of the medicine
:return: <Medication> wrapped list of medications doses
"""
(
signal_column,
status_column,
time_column,
route_column,
weight_column,
dose_column,
dose_unit_column,
infusion_column,
infusion_unit_column,
duration_column,
duration_unit_column,
) = EDW_FILES["med_file"]["columns"]
source = EDW_FILES["med_file"]["source"]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
med_df = med_df.sort_values(time_column)
if med_name not in med_df[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{med_name} was not found in {self.med_file}.")
idx = np.where(med_df[signal_column].astype("str").str.upper() == med_name)[0]
route = np.array(med_df[route_column])[idx[0]]
wt_base_dose = (
bool(1) if np.array(med_df[weight_column])[idx[0]] == "Y" else bool(0)
)
if med_df[duration_column].isnull().values[idx[0]]:
start_date = self._get_unix_timestamps(np.array(med_df[time_column])[idx])
action = np.array(med_df[status_column], dtype="S")[idx]
if (
np.array(med_df[status_column])[idx[0]] in [MED_ACTIONS[0]]
or med_df[infusion_column].isnull().values[idx[0]]
):
dose = np.array(med_df[dose_column], dtype="S")[idx]
units = np.array(med_df[dose_unit_column])[idx[0]]
else:
dose = np.array(med_df[infusion_column])[idx]
units = np.array(med_df[infusion_unit_column])[idx[0]]
else:
dose = np.array([])
units = np.array(med_df[infusion_unit_column])[idx[0]]
start_date = np.array([])
action = np.array([])
for _, row in med_df.iloc[idx, :].iterrows():
dose = np.append(dose, [row[infusion_column], 0])
time = self._get_unix_timestamps(np.array([row[time_column]]))[0]
conversion = 1
if row[duration_unit_column] == "Seconds":
conversion = 1
elif row[duration_unit_column] == "Minutes":
conversion = 60
elif row[duration_unit_column] == "Hours":
conversion = 3600
start_date = np.append(
start_date,
[time, time + float(row[duration_column]) * conversion],
)
action = np.append(action, [row[status_column], "Stopped"])
dose = self._ensure_contiguous(dose)
start_date = self._ensure_contiguous(start_date)
action = self._ensure_contiguous(action)
return Medication(
med_name,
dose,
units,
start_date,
action,
route,
wt_base_dose,
source,
)
def get_vitals(self, vital_name: str) -> Measurement:
"""
Get the vital signals from the EDW csv file 'flowsheet'.
:param vital_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return self._get_measurements(
"vitals_file",
vitals_df,
vital_name,
self.vitals_file,
)
def get_labs(self, lab_name: str) -> Measurement:
"""
Get the lab measurement from the EDW csv file 'labs'.
:param lab_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
labs_df = pd.read_csv(self.lab_file)
return self._get_measurements("lab_file", labs_df, lab_name, self.lab_file)
def get_surgery(self, surgery_type: str) -> Procedure:
"""
Get all the surgery information of the input type performed to the
patient.
:param surgery_type: <string> type of surgery
:return: <Procedure> wrapped list surgeries of the input type
"""
return self._get_procedures("surgery_file", self.surgery_file, surgery_type)
def get_other_procedures(self, procedure_type: str) -> Procedure:
"""
Get all the procedures of the input type performed to the patient.
:param procedure: <string> type of procedure
:return: <Procedure> wrapped list procedures of the input type
"""
return self._get_procedures(
"other_procedures_file",
self.other_procedures_file,
procedure_type,
)
def get_transfusions(self, transfusion_type: str) -> Procedure:
"""
Get all the input transfusions type that were done to the patient.
:param transfusion_type: <string> Type of transfusion.
:return: <Procedure> Wrapped list of transfusions of the input type.
"""
return self._get_procedures(
"transfusions_file",
self.transfusions_file,
transfusion_type,
)
def get_events(self, event_type: str) -> Event:
"""
Get all the input event type during the patient stay.
:param event_type: <string> Type of event.
:return: <Event> Wrapped list of events of the input type.
"""
signal_column, time_column = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
data = data.dropna(subset=[time_column])
data = data.sort_values([time_column])
if event_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{event_type} was not found in {self.events_file}.")
idx = np.where(data[signal_column].astype("str").str.upper() == event_type)[0]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
time = self._ensure_contiguous(time)
return Event(event_type, time)
def _get_local_time(self, init_date: str, end_date: str) -> np.ndarray:
"""
Obtain local time from init and end dates.
:param init_date: <str> String with initial date.
:param end_date: <str> String with end date.
:return: <np.ndarray> List of offsets from UTC (it may be two in
case the time shift between summer/winter occurs while the
patient is in the hospital).
"""
init_dt = datetime.strptime(init_date, "%Y-%m-%d %H:%M:%S.%f")
end_dt = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
offset_init = self.timezone.utcoffset( # type: ignore
init_dt,
is_dst=True,
).total_seconds()
offset_end = self.timezone.utcoffset( # type: ignore
end_dt,
is_dst=True,
).total_seconds()
return np.array([offset_init, offset_end], dtype=float)
def _get_unix_timestamps(self, time_stamps: np.ndarray) -> np.ndarray:
"""
Convert readable time stamps to unix time stamps.
:param time_stamps: <np.ndarray> Array with all readable time stamps.
:return: <np.ndarray> Array with Unix time stamps.
"""
try:
arr_timestamps = | pd.to_datetime(time_stamps) | pandas.to_datetime |
from datetime import datetime, timedelta
from pandas import json
from api.decorators import api_post, api_get
from api.helper import json_response, json_error_response
from api.utils import int_or_none
from broker.models import BrokerVehicle, Broker
from fms.decorators import authenticated_user
from fms.views import get_or_none
from owner.models import Vehicle
from supplier.helper import compare_format
from team.models import ManualBooking
from team.helper.helper import to_int
from transaction.models import VehicleAllocated, Transaction
from django.contrib.auth.models import User
import pandas as pd
from owner.vehicle_util import display_format
@api_post
@authenticated_user
def booking_history_data(request):
broker = Broker.objects.get(name=User.objects.get(username=request.user.username))
broker_vehicle_ids = BrokerVehicle.objects.filter(broker=broker).values_list('vehicle_id', flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime', 'id',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number', 'lr_number')
transaction_data = [{'id': v['id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
@api_post
@authenticated_user
def vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = get_or_none(Vehicle, id=vehicle_id)
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
broker_vehicle_ids = BrokerVehicle.objects.filter(vehicle=vehicle).values_list(
'vehicle_id',
flat=True)
allocated_vehicles_data = VehicleAllocated.objects.filter(vehicle_number_id__in=broker_vehicle_ids).values(
'transaction_id', 'total_out_ward_amount', 'total_amount_to_owner', 'transaction__shipment_datetime',
'source_city', 'destination_city', 'transaction_id', 'material', 'transaction__total_vehicle_requested',
'transaction__transaction_status', 'transaction__transaction_id', 'vehicle_number__vehicle_number',
'lr_number')
transaction_data = [{'id': v['transaction_id'],
'transaction_id': v['transaction__transaction_id'],
'status': v['transaction__transaction_status'],
'source_city': v['source_city'],
'destination_city': v['destination_city'],
'paid': str(int(v['total_out_ward_amount'])),
'amount': str(int(v['total_amount_to_owner'])),
'balance': str(int(v['total_amount_to_owner'] - v['total_out_ward_amount'])),
'total_vehicle_requested': v['transaction__total_vehicle_requested'],
'vehicle_number': display_format(v['vehicle_number__vehicle_number']),
'lr_number': v['lr_number'],
'shipment_date': v['transaction__shipment_datetime'].strftime('%d-%b-%Y')} for v in
allocated_vehicles_data]
return json_response({'status': 'success', 'data': transaction_data})
else:
vehicle = Vehicle()
@api_post
@authenticated_user
def mb_vehicle_trip_data(request):
data = request.data
vehicle_id = int_or_none(data.get('vehicleId', None))
if vehicle_id:
vehicle = int_or_none(get_or_none(Vehicle, id=vehicle_id))
if not vehicle:
return json_error_response('Vehicle with id=%s does not exist' % vehicle_id, 404)
else:
data = []
for booking in ManualBooking.objects.filter(
lorry_number__in=[display_format(compare_format(vehicle.vehicle_number))]).order_by(
'-shipment_date'):
if to_int(booking.total_amount_to_owner - booking.total_out_ward_amount) != 0:
data.append(
{
'status': 'unpaid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'transaction_id': booking.booking_id
}
)
else:
data.append(
{
'status': 'paid',
'lr_number': '\n'.join(booking.lr_numbers.values_list('lr_number', flat=True)),
'paid': to_int(booking.total_out_ward_amount),
'id': booking.id,
'total_vehicle_requested': None,
'vehicle_number': display_format(booking.lorry_number),
'source_city': booking.from_city,
'destination_city': booking.to_city,
'amount': to_int(booking.total_amount_to_owner),
'shipment_date': booking.shipment_date.strftime('%d-%b-%Y'),
'balance': to_int(booking.total_amount_to_owner - booking.total_out_ward_amount),
'final_payment_date': final_payment_date(booking=booking),
'transaction_id': booking.booking_id
}
)
return json_response({'status': 'success', 'data': data})
def get_allocated_vehicle(request):
data = | json.loads(request.body) | pandas.json.loads |
from datetime import datetime
from datetime import timedelta
from datetime import date
from collections import namedtuple
import pytz
from dateutil.rrule import *
import requests
import pandas as pd
# TODO: pull in 120 days of data from tingo --> date, close, open, high, low, vols
# TODO: calculate vols_adj, yc, yc_delta, body, range
# TODO: normalize the data using factors from training
# Named tuple for aid in the data parse
fields = ['date', 'open', 'close', 'high', 'low', 'vols']
TickerData = namedtuple('TickerData',fields)
def parse_json(response):
"""
Parameters
----------
response : requests.response object
The response object to be parsed
Returns
-------
records : list
list of named tuples that represent the ticker data
"""
json_response = response.json()
records = []
for json_object in json_response:
d = json_object['date']
o = json_object['open']
c = json_object['close']
h = json_object['high']
l = json_object['low']
v = json_object['volume']
ticker_data = TickerData(d, o, c, h, l, v)
records.append(ticker_data)
return records
def get_stock_data(stock_symbol, start_date, end_date):
"""
Make an REST API call to the tiingo API to get historic stock data
Parameters
----------
stock_symbol : str
US stock market symbol
start_date : str
yyyy-mm-dd formated date that begins time series
end_date : str
yyyy-mm-dd formated date that ends the time series
returns
-------
response : request.response
The response object to be parsed
"""
TIINGO_API_KEY='c391b96b9ea2e55ce48335fc7ab86245f2a41ec2'
base_url = f'https://api.tiingo.com/tiingo/daily/{stock_symbol}/prices?'
payload = {
'token':TIINGO_API_KEY,
'startDate':start_date,
'endDate':end_date
}
response = requests.get(base_url, params=payload)
return response
def make_features(df):
return df
def normalize_features():
data_mean = [2.31178717e+02, 2.31161386e+02, 2.32160913e+02, 2.30031823e+02, 9.82221797e+02, 2.31100043e+02, 7.82305809e-01, 1.05486792e+00, 2.12909069e+00]
data_std = [36.7099162, 36.75418153, 36.87378414, 36.55758872, 476.02262177, 36.71464012, 0.83594208, 1.17753345, 1.55720965]
def make_prediction():
pass
def main():
est = pytz.timezone('US/Eastern')
ticker = 'SPY'
end_date = last_close().astimezone(est)
start_date = end_date + timedelta(days=-200)
response = get_stock_data(
ticker,
start_date.strftime('%Y-%m-%d'),
end_date.strftime('%Y-%m-%d'))
records = parse_json(response)
df = | pd.DataFrame(records) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.