prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 13:13:19 2019
@author: <NAME>
"""
import pandas as pd
import os
import numpy as np
import math
# Get the location of this file
file_dir = os.path.dirname(os.path.abspath(__file__))
def read_attributes():
att_df = | pd.read_csv("all_catchment_attributes.csv", encoding='latin-1', sep=";", index_col=1) | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestPinyi(unittest.TestCase):
def test_one_hot(self):
data = np.array([
["assisbragasm", 1],
["assiseduc", 1],
["assist", 1],
["assiseduc", 1],
["assistebrasil", 1],
["assiseduc", 1],
["assistebrasil", 1],
["assistencialgsamsung", 1]
])
# load data
df = | pd.DataFrame({"query": data[:, 0], "weight": data[:, 1]}) | pandas.DataFrame |
"""Provide methodologies for preprocessing the data."""
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Dict, Generator, List, Optional, Tuple
import pandas as pd
from hoqunm.data_tools.base import (
AGE, BEGIN, BIRTH, CURRENT_CLASS, CURRENT_WARD, DATECOLUMNS, DIAGNR, END,
EXTERNAL, FA_BEGIN, FLOW, GLOB_BEGIN, GLOB_END, INTERNAL, PATIENT,
POST_CLASS, POST_WARD, PRE_CLASS, PRE_WARD, SEX, URGENCY, and_query,
column_query, or_query, split_data)
from hoqunm.utils.utils import get_logger
pd.set_option('mode.chained_assignment', None)
class Preprocessor:
"""A class holding different functionalities for preprocessing the data
from the hospital.
The aim is to preprocess the data in so far as to give it to class
Data. To do it clean i would imagine sth like a validation function,
which validates that the data is in the correct format.
:param filepath: The file path to the data (as csv/xls/...).
:param sep: The separator with which columns are separated.
:parma **kwargs: Additional arguments used by pandas.read_csv.
"""
def __init__(self,
filepath: Path = Path(),
sep: str = ";",
startdate: datetime = datetime(2018, 1, 1),
enddate: datetime = datetime(2020, 1, 1),
datescale: timedelta = timedelta(1),
logger: Optional[logging.Logger] = None,
**kwargs: Any) -> None:
self.data = pd.read_csv(filepath, sep=sep, **kwargs)
self.startdate = startdate
self.enddate = enddate
self.datescale = datescale
self.data_backup = self.data.copy()
self.logger = logger if logger is not None else get_logger(
"Preprocessor", filepath.parent.joinpath("preprocessor.log"))
def split_data(self) -> None:
"""The first row of the data could contain the start- and enddate.
Therefore, the data should be splitted.
"""
self.backup()
self.data, self.startdate, self.enddate = split_data(self.data)
def backup(self) -> None:
"""Save the current state of self.data in self.data_backup."""
self.data_backup = self.data.copy()
def reset(self) -> None:
"""Replace self.data with self.data_backup (last data state)."""
self.data = self.data_backup.copy()
def add(self,
filepath: Path = Path(),
sep: str = ";",
**kwargs: Any) -> None:
"""Add new data to existing data.
:param filepath: The file path to the data (as csv/xls/...).
:param sep: The separator with which columns are separated.
:parma **kwargs: Additional arguments used by pandas.read_csv.
"""
data = pd.read_csv(filepath, sep=sep, **kwargs)
assert all(data.columns == self.data.columns)
self.data = self.data.append(data, ignore_index=True)
def write(self, filepath: Path, sep: str = ";", **kwargs: Any) -> None:
"""Save the current state in a csv file.
:param filepath: The file path to the data (as csv/xls/...).
:param sep: The separator with which columns are separated.
:parma **kwargs: Additional arguments used by pandas.to_csv.
"""
data = pd.DataFrame(columns=self.data.columns)
time_ser = pd.Series(index=self.data.columns)
time_ser[GLOB_BEGIN] = self.startdate
time_ser[GLOB_END] = self.enddate
data = data.append(time_ser, ignore_index=True)
data = data.append(self.data, ignore_index=True)
data.to_csv(filepath, sep=sep, **kwargs)
# pylint: disable=redefined-builtin
def datetimes_to_float(self,
format: str = "%d.%m.%Y %H:%M",
startdate: Optional[datetime] = None,
scale: Optional[timedelta] = None,
columns: Optional[List[str]] = None) -> None:
"""Make date string to dates and the to floats. Set errors to NaN. Set
all datetimes before start of analysis to negative values.
:param format: The format in which the datetimes are currently given.
:param startdate: A date indicating, when analysis should start.
:param scale: T timedelta indicating at which level one should scale. Days seem reasonable.
:param columns: The columns under consideration.
"""
if startdate is None:
startdate = self.startdate
if scale is None:
scale = self.datescale
if columns is None:
columns = DATECOLUMNS
datecolumns = pd.DataFrame({
datecolumn: pd.to_datetime(self.data[datecolumn],
format=format,
errors="coerce")
for datecolumn in columns
})
naindex = [
datecolumns.loc[:, datecolumn].isnull() for datecolumn in columns
]
datecolumns = datecolumns.fillna(
datetime.max.replace(year=2100, second=0, microsecond=0))
self.data[columns] = (datecolumns - startdate) / scale
self.data[columns] = self.data[columns].astype("float")
for i, column in enumerate(columns):
self.data.loc[naindex[i], column] = float("inf")
def make_flow(self, split_str: str = " \\f:9919\\Þ\\f:-\\ ") -> None:
"""The csv file is not well formatted. Split the column FLOW into
PRE_WARD and CURRENT_WARD.
:param split_str: The string separating PRE_WARD from CURRENT_WARD.
"""
self.backup()
self.data.loc[:, PRE_WARD] = float("NaN")
self.data.loc[:, CURRENT_WARD] = float("NaN")
for i, row in self.data.iterrows():
split = row[FLOW].split(split_str)
self.data.loc[i, PRE_WARD] = split[0]
self.data.loc[i, CURRENT_WARD] = split[-1]
self.data = self.data.drop(columns=FLOW)
def replace_ward_keys(self,
ward_map: Optional[Dict[str, List[str]]] = None,
internal_prefix: Optional[List[str]] = None) -> None:
"""The csv file has more information than needed.
Convert unimportant wards (not given in ward_map) to EXTERNAL ward
and map the remaining wards as given via ward_map.
:param ward_map: Key is the ward name with its values. All names for it in the csv.
:param internal_prefix: List of internal prefixs to consider.
"""
if ward_map is not None:
self.backup()
for ward, keys in ward_map.items():
self.data = self.data.replace(keys, ward)
all_wards = list(self.data.loc[:, CURRENT_WARD].unique()) + list(
self.data.loc[:, PRE_WARD].unique())
internal_wards = [
ward for ward in all_wards
if ward not in ward_map and ((internal_prefix is None) or any(
ward.startswith(internal_prefix_)
for internal_prefix_ in internal_prefix))
]
external_wards = [
ward for ward in all_wards
if ward not in list(ward_map) and ward not in internal_wards
]
self.data = self.data.replace(internal_wards, INTERNAL)
self.data = self.data.replace(external_wards, EXTERNAL)
else:
self.logger.warning(
"No ward map given. No ward keys have been replace.")
def make_urgency(self, split_str: str = "\\f:9919\\Þ\\f:-\\ ") -> None:
"""Get the main information from URGENCY (N0,...,N5).
:split_st: String to split at.
"""
self.backup()
if URGENCY in self.data.columns:
for i, row in self.data.iterrows():
split = row[URGENCY].split(split_str)
if len(split[1]) > 1:
try:
self.data.loc[i, URGENCY] = int(split[1][1])
except ValueError:
self.data.loc[i, URGENCY] = float("NaN")
else:
self.data.loc[i, URGENCY] = float("NaN")
# pylint: disable=redefined-builtin
def make_age(self,
reference_date: datetime = datetime.today(),
format: str = "%d.%m.%Y") -> None:
"""Make an age value from the BIRTH column given a reference date.
This could be helfpul for CART and some other analysis.
:param reference_date: The data at which the age will be interpreted.
:param format: The date format of BIRTH.
"""
self.backup()
if BIRTH in self.data.columns:
scale = timedelta(365)
self.data.loc[:, AGE] = self.data.loc[:, BIRTH].copy()
self.datetimes_to_float(format=format,
startdate=reference_date,
scale=scale,
columns=[AGE])
self.data.loc[:, AGE] *= -1
self.data.loc[:, AGE] = self.data.loc[:, AGE].astype("int")
def clean_empty(self):
"""The data has empty END entries which can be dropped."""
self.backup()
qry = column_query(END, " ")
empty_index = self.data.query(qry).index
self.data = self.data.drop(index=empty_index)
def _clean_patient_data(self, patient_data: pd.DataFrame) -> pd.DataFrame:
"""Clean all the data for a given patient.
This should refelct the special needs for the data sheets
obtained from the hospital.
:param patient_data: The sorted entries for one patient.
:return: Cleaned data.
"""
df = pd.DataFrame(columns=patient_data.columns)
rowi = patient_data.iloc[0, :].copy()
if rowi.loc[PRE_WARD] == rowi.loc[CURRENT_WARD]:
rowi.loc[PRE_WARD] = EXTERNAL
for _, rowj in patient_data.iloc[1:, :].iterrows():
if rowi.loc[CURRENT_WARD] in [EXTERNAL, INTERNAL]:
# entry is not interesting, go on
rowi = rowj.copy()
elif rowi.loc[END] == rowj.loc[BEGIN] and rowi.loc[
CURRENT_WARD] == rowj.loc[CURRENT_WARD]:
# enough evidence for a multiple entry, change the END of the last row
rowi.loc[END] = rowj.loc[END]
elif rowi.loc[END] == rowj.loc[BEGIN] and rowi.loc[
CURRENT_WARD] == rowj.loc[PRE_WARD]:
# found a new row, so save rowi and make new
rowi.loc[POST_WARD] = rowj.loc[CURRENT_WARD]
df = df.append(rowi)
rowi = rowj.copy()
elif rowj.loc[PRE_WARD] in [EXTERNAL, INTERNAL]:
# maybe the patient visited again, so drop the current row and start sth new
rowi.loc[POST_WARD] = rowj.loc[PRE_WARD]
df = df.append(rowi)
rowi = rowj.copy()
else:
# there are some errors in the data set concerning multiple
# entries for different ICPM.
# just go to the next row, hoping not to mess things up
self.logger.warning("Warning. Something else happened.")
if rowi.loc[CURRENT_WARD] not in [EXTERNAL, INTERNAL]:
if not rowi.isna().loc[END]:
rowi.loc[POST_WARD] = EXTERNAL
df = df.append(rowi)
return df
def clean_data(self) -> None:
"""Clean the data from multiple entries regarding the same stay.
This should refelct the special needs for the data sheets
obtained from the hospital.
"""
self.backup()
data = self.data.copy()
data.loc[:, PATIENT] = float("NaN")
data.loc[:, POST_WARD] = float("NaN")
df = | pd.DataFrame(columns=data.columns) | pandas.DataFrame |
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
pdt.assert_frame_equal(test_df, null_df)
def test_parse_json_nan():
test_df = request_handling.parse_json("""
{"values":[
{"timestamp": "2018-10-29T12:00:00Z", "value": 32.93, "quality_flag": 0},
{"timestamp": "2018-10-29T13:00:00Z", "value": 25.17, "quality_flag": 0},
{"timestamp": "2018-10-29T14:00:00Z", "value": null, "quality_flag": 1},
{"timestamp": "2018-10-29T15:00:00Z", "value": null, "quality_flag": 0}
]}
""")
| pdt.assert_frame_equal(test_df, null_df) | pandas.testing.assert_frame_equal |
import random
import pandas as pd
from collections import Counter
from django.contrib.auth.models import User, Group
from django.db.models import Q
from psycopg2._psycopg import IntegrityError
from api.sms import send_sms
from broker.models import Broker, BrokerVehicle
from owner.models import Vehicle
from restapi.helper_api import generate_random_string
from team.models import ManualBookingSummary, ManualBooking
from utils.models import AahoOffice, State
def mumbai_directory():
df = pd.read_excel('../../data/Mumbai Transport Directory.xlsx')
df = df.fillna('')
print(df.columns)
for i, row in df.iterrows():
print(row['name'])
def broker_aaho_office():
data = []
for broker in Broker.objects.all():
bookings = broker.team_booking_broker.all()
if bookings.count() > 0:
aaho_source_offices = list(bookings.values_list('source_office__branch__name', flat=True))
aaho_destination_offices = list(bookings.values_list('destination_office__branch__name', flat=True))
print(['{}: {}'.format(office, aaho_source_offices.count(office)) for office in set(aaho_source_offices)])
data.append([
broker.id,
broker.get_name(),
broker.get_phone(),
bookings.order_by('shipment_date').first().shipment_date,
bookings.order_by('shipment_date').last().shipment_date,
'\n'.join(['{}: {}'.format(office, aaho_source_offices.count(office)) for office in
set(aaho_source_offices)]),
'\n'.join(['{}: {}'.format(office, aaho_destination_offices.count(office)) for office in
set(aaho_destination_offices)])
])
df = pd.DataFrame(data=data, columns=['ID', 'Name', 'Phone', 'First Booking Date', 'Last Booking Date',
'Source Booking Offices', 'Dest Booking Office'])
df.to_excel('Aaho office wise Brokers.xlsx', index=False)
def update_broker_aaho_office():
for broker in Broker.objects.all():
bookings = broker.team_booking_broker.all()
if bookings.count() > 0:
aaho_source_offices = list(bookings.values_list('source_office__id', flat=True))
data = {}
for office in set(aaho_source_offices):
data[office] = aaho_source_offices.count(office)
office_id = max(data.iterkeys(), key=lambda k: data[k])
aaho_office = AahoOffice.objects.get(id=office_id)
broker.aaho_office = aaho_office
broker.save()
# print([{'{}: {}'.format(office, aaho_source_offices.count(office))} for office in set(aaho_source_offices)])
def broker_aaho_office_data():
for broker in Broker.objects.all():
print(broker.aaho_office.to_json() if broker.aaho_office else {})
def broker_vehicle():
vehicle = Vehicle.objects.get(id=3834)
broker = Broker.objects.get(id=616)
BrokerVehicle.objects.create(broker=broker, vehicle=vehicle)
def broker_data():
data = []
for broker in Broker.objects.all().order_by('-id'):
print(broker)
data.append([
broker.id,
broker.get_name(),
broker.get_phone(),
broker.city.name if broker.city else '',
broker.aaho_office.branch_name if broker.aaho_office else ''
])
df = pd.DataFrame(data=data, columns=['ID', 'Name', 'Phone', 'City', 'Aaho Office'])
df.to_excel('Brokers.xlsx', index=False)
broker = Broker.objects.get(id=616)
# BrokerVehicle.objects.create(broker=broker, vehicle=vehicle)
def update_state_name():
df = | pd.read_excel('/Users/mani/Downloads/Supplier Destination States.xlsx', sheet_name='New') | pandas.read_excel |
import pdb
import numpy as np
import pandas as pd
from metabolite_mapping import folder2spectrumregion, folder2ppm, folder2dataset
# convert keys from folder names to dataset entries
dataset2spectrumregion = {}
for f_key in folder2spectrumregion.keys():
dataset2spectrumregion[folder2dataset[f_key]] = folder2spectrumregion[f_key]
dataset2ppmregion = {}
for f_key in folder2ppm.keys():
dataset2ppmregion[folder2dataset[f_key]] = folder2ppm[f_key]
# create dataframe from region dictionaries
ppm_region_df = pd.DataFrame(dict([(k, | pd.Series(v) | pandas.Series |
# Library for final plots used in the paper
# Created on: Jan 7, 2021
# Author: <NAME>
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join
import numpy as np
from scipy.stats import norm, stats, spearmanr
import pylab as pl
import matplotlib.ticker as mtick
import math
import matplotlib as mpl
from datetime import datetime, timezone
from dateutil import tz
from scipy.stats import zscore
from matplotlib import ticker
from scipy.signal import find_peaks
from matplotlib.lines import Line2D
mpl.use("pgf")
text_size = 14
plt.rcParams.update({'font.size': text_size})
plt.rc('xtick',labelsize=text_size)
plt.rc('ytick',labelsize=text_size)
preamble = [r'\usepackage{fontspec}',
r'\usepackage{physics}']
params = {'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex',
'pgf.preamble': preamble}
mpl.rcParams.update(params)
def plot_run_spread_temporally(path_to_cluster_info, save_dir):
'''
Plots the temporal behavior of runs in the clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering temporal run information.
Returns
-------
None
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
range = []
for n in df['Total Time']:
if(n<86400):
range.append('<1d')
elif(n<259200):
range.append('1-\n3d')
elif(n<604800):
range.append('3d-\n1w')
elif(n<(2592000/2)):
range.append('1w-\n2w')
elif(n<2592000):
range.append('2w-\n1M')
elif(n<7776000):
range.append('1-\n3M')
elif(n<15552000):
range.append('3-\n6M')
else:
print("don't forget: %d"%n)
df['Range'] = range
read_df = df[df['Operation']=='Read']
write_df = df[df['Operation']=='Write']
rm = np.median(read_df[read_df['Range']=='1w-\n2w']['Temporal Coefficient of Variation'])
wm = np.median(write_df[write_df['Range']=='1w-\n2w']['Temporal Coefficient of Variation'])
print('Median for read at 1-2w: %.3f'%rm)
print('Median for write at 1-2w: %.3f'%wm)
# Barplot of time periods to temporal CoV
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,1.8])
fig.subplots_adjust(left=0.19, right=0.990, top=0.96, bottom=0.48, wspace=0.03)
order = ['<1d', '1-\n3d', '3d-\n1w', '1w-\n2w', '2w-\n1M', '1-\n3M', '3-\n6M']
PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[0], x='Range', y='Temporal Coefficient of Variation', data=read_df, order=order, color='skyblue', fliersize=0, **PROPS)
PROPS = {'boxprops':{'facecolor':'maroon', 'edgecolor':'black'}, 'medianprops':{'color':'white', 'linewidth': 1.25},
'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.boxplot(ax=axes[1], x='Range', y='Temporal Coefficient of Variation', data=write_df, order=order,color='maroon', fliersize=0, **PROPS)
# iterate over boxes
for i,box in enumerate(axes[0].artists):
box.set_edgecolor('black')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.005, 0.45, 'Inter-arrival\nTimes CoV', rotation=90)
axes[0].set_xlabel('')
axes[1].set_xlabel('')
fig.text(0.38, 0.13, '(a) Read', ha='center')
fig.text(0.80, 0.13, '(b) Write', ha='center')
fig.text(0.58, 0.03, 'Cluster Time Span', ha='center')
#fig.text(0.001, 0.65, "Performance\nCoV (%)", rotation=90, va='center', multialignment='center')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_yticks([0,1000,2000,3000])
axes[0].set_ylim(0,3000)
plt.savefig(join('./time_period_v_temp_cov.pdf'))
plt.close()
plt.clf()
def plot_run_spread_span_frequency(path_to_cluster_info, save_dir):
'''
Plots the temporal behavior of runs in the clusters.
Parameters
----------
path_to_cluster_info: string
Path to csv file with clustering temporal run information.
Returns
-------
None
'''
df = pd.read_csv(path_to_cluster_info, index_col=0)
read_df = df[df['Operation']=='Read']
write_df = df[df['Operation']=='Write']
# CDF of time periods and frequency
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5, 2.2])
fig.subplots_adjust(left=0.15, right=0.965, top=.94, bottom=0.34, wspace=0.05)
read_info = read_df['Total Time']/86400
write_info = write_df['Total Time']/86400
read_median = np.median(read_info)
write_median = np.median(write_info)
read_info = np.log10(read_info)
write_info = np.log10(write_info)
read_median_plotting = np.median(read_info)
write_median_plotting = np.median(write_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
print(cdf_write[100])
axes[0].plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
axes[0].plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
axes[0].set_ylabel('CDF of Clusters')
axes[0].set_xlabel('(a) Cluster Time\nSpan (days)')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[0].set_ylim(0,1)
axes[0].set_xlim(0,3)
axes[0].set_yticks(np.arange(0,1.2,0.25))
positions = [1, 2, 3]
labels = ['$10^1$', '$10^2$', '$10^3$']
axes[0].xaxis.set_major_locator(ticker.FixedLocator(positions))
axes[0].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
vals = axes[0].get_yticks()
axes[0].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
axes[0].set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
axes[0].axvline(np.log10(4), color='skyblue', zorder=0, linestyle='--', linewidth=2)
axes[0].axvline(write_median_plotting, color='maroon', zorder=0, linestyle=':', linewidth=2)
print("Median of Read: %f"%read_median)
print("Median of Write: %f"%write_median)
# Add legend
axes[0].legend(loc='lower right', fancybox=True)
read_info = read_df['Average Runs per Day'].tolist()
write_info = write_df['Average Runs per Day'].tolist()
read_median = np.median(read_info)
write_median = np.median(write_info)
read_info = np.log10(read_info)
write_info = np.log10(write_info)
read_bins = np.arange(0, int(math.ceil(max(read_info)))+1, 0.01)
hist = np.histogram(read_info, bins=read_bins)[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
write_bins = np.arange(0, int(math.ceil(max(write_info)))+1, 0.01)
hist = np.histogram(write_info, bins=write_bins)[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
axes[1].plot(read_bins[:-1], cdf_read, color='skyblue', linewidth=2, label='Read')
axes[1].plot(write_bins[:-1], cdf_write, color='maroon', linewidth=2, label='Write')
axes[1].set_xlabel('(b) Run Frequency\n(runs/day)')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].set_axisbelow(True)
axes[1].set_ylim(0,1)
axes[1].set_xlim(0,3)
axes[1].set_yticks(np.arange(0,1.2,0.25))
positions = [1, 2, 3]
labels = ['$10^1$', '$10^2$', '$10^3$']
axes[1].xaxis.set_major_locator(ticker.FixedLocator(positions))
axes[1].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
vals = axes[0].get_yticks()
axes[1].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = []
tmp_ticks = [np.log10(x) for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
axes[1].set_xticks(f_ticks, minor=True)
# Add vertical lines for medians
axes[1].axvline(np.log10(read_median), color='skyblue', zorder=0, linestyle='--', linewidth=2)
axes[1].axvline(np.log10(write_median), color='maroon', zorder=0, linestyle=':', linewidth=2)
print("Median of Read: %f"%read_median)
print("Median of Write: %f"%write_median)
# Add legend
axes[0].legend(loc='lower right', fancybox=True)
#axes[1].get_legend().remove()
plt.savefig(join(save_dir, 'time_periods_freq.pdf'))
plt.close()
plt.clf()
return None
def plot_time_of_day_v_perf(path_to_data, save_dir):
'''
Plots time period effects on performance.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
range_tod = []
range_tow = []
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/Chicago')
for n in df['Start Time']:
datetime_time = datetime.fromtimestamp(n).replace(tzinfo=from_zone).astimezone(to_zone)
h = int(datetime_time.hour)
d = int(datetime_time.weekday())
# Group by time of day
'''
if(h == 0 or h == 1 or h == 2):
range_tod.append('12-\n3am')
elif(h == 3 or h == 4 or h == 5):
range_tod.append('3-\n6am')
elif(h == 6 or h == 7 or h == 8):
range_tod.append('6-\n9am')
elif(h == 9 or h == 10 or h == 11):
range_tod.append('9am-\n12pm')
elif(h == 12 or h == 13 or h == 14):
range_tod.append('12-\n3pm')
elif(h == 15 or h == 16 or h == 17):
range_tod.append('3-\n6pm')
elif(h == 18 or h == 19 or h == 20):
range_tod.append('6-\n9pm')
elif(h == 21 or h == 22 or h == 23):
range_tod.append('9pm-\n12am')
else:
print("don't forget: %d"%n)
'''
if(h == 0 or h == 1 or h == 2):
range_tod.append('0-\n3')
elif(h == 3 or h == 4 or h == 5):
range_tod.append('3-\n6')
elif(h == 6 or h == 7 or h == 8):
range_tod.append('6-\n9')
elif(h == 9 or h == 10 or h == 11):
range_tod.append('9-\n12')
elif(h == 12 or h == 13 or h == 14):
range_tod.append('12-\n15')
elif(h == 15 or h == 16 or h == 17):
range_tod.append('15-\n18')
elif(h == 18 or h == 19 or h == 20):
range_tod.append('18-\n21')
elif(h == 21 or h == 22 or h == 23):
range_tod.append('21-\n24')
else:
print("don't forget: %d"%n)
# Now for time of week
if(d == 0):
range_tow.append('Mo')
elif(d == 1):
range_tow.append('Tu')
elif(d == 2):
range_tow.append('We')
elif(d == 3):
range_tow.append('Th')
elif(d == 4):
range_tow.append('Fr')
elif(d == 5):
range_tow.append('Sa')
elif(d == 6):
range_tow.append('Su')
else:
print("don't forget: %d"%n)
df['Range, Time of Day'] = range_tod
df['Range, Time of Week'] = range_tow
# Rid of outliers to make cleaner plots
order = ['0-\n3', '3-\n6', '6-\n9', '9-\n12', '12-\n15', '15-\n18', '18-\n21', '21-\n24']
df_tod = pd.DataFrame(columns=['Range, Time of Day', 'Operation', 'Performance Z-Score'])
for tod in order:
working_df = df[df['Range, Time of Day']==tod].reset_index(drop=True)
working_df['Z-Score of Z-Scores'] = (working_df['Performance Z-Score'] - working_df['Performance Z-Score'].mean())/working_df['Performance Z-Score'].std(ddof=0)
working_df = working_df[working_df['Z-Score of Z-Scores'] < 2]
working_df = working_df[working_df['Z-Score of Z-Scores'] > -2]
working_df = working_df.drop(labels=['Application', 'Cluster Number', 'Start Time', 'Range, Time of Week', 'Z-Score of Z-Scores'], axis='columns')
df_tod = df_tod.append(working_df, ignore_index=True)
df_tow = pd.DataFrame(columns=['Range, Time of Week', 'Operation', 'Performance Z-Score'])
for tow in ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']:
working_df = df[df['Range, Time of Week']==tow].reset_index(drop=True)
working_df['Z-Score of Z-Scores'] = (working_df['Performance Z-Score'] - working_df['Performance Z-Score'].mean())/working_df['Performance Z-Score'].std(ddof=0)
working_df = working_df[working_df['Z-Score of Z-Scores'] < 2]
working_df = working_df[working_df['Z-Score of Z-Scores'] > -2]
working_df = working_df.drop(labels=['Application', 'Cluster Number', 'Start Time', 'Range, Time of Day', 'Z-Score of Z-Scores'], axis='columns')
df_tow = df_tow.append(working_df, ignore_index=True)
# Barplot of time of day to performance CoV
read_df = df_tod[df_tod['Operation']=='Read']
write_df = df_tod[df_tod['Operation']=='Write']
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,2])
fig.subplots_adjust(left=0.16, right=0.990, top=0.96, bottom=0.45, wspace=0.03)
#order = ['12-\n3am', '3-\n6am', '6-\n9am', '9am-\n12pm', '12-\n3pm', '3-\n6pm', '6-\n9pm', '9pm-\n12am']
#PROPS = {'boxprops':{'facecolor':'skyblue', 'edgecolor':'black'}, 'medianprops':{'color':'black'},
# 'whiskerprops':{'color':'black'},'capprops':{'color':'black'}}
sns.violinplot(ax=axes[0], x='Range, Time of Day', y='Performance Z-Score', data=read_df, order=order, color='skyblue', inner='quartile', linewidth=2)
sns.violinplot(ax=axes[1], x='Range, Time of Day', y='Performance Z-Score', data=write_df, order=order, color='maroon', inner='quartile', linewidth=2)
#violins = [art for art in axes[0].get_children()]
#for i in range(len(violins)):
# violins[i].set_edgecolor('black')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.37, 0.14, '(a) Read', ha='center')
fig.text(0.78, 0.14, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'Time of Day (24-hr)', ha='center')
fig.text(0.001, 0.65, "Performance\nZ-Score", rotation=90, va='center', multialignment='center')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(-3,3)
axes[0].set_yticks(range(-3,4,1))
axes[0].tick_params(axis='x', labelsize=13)
axes[1].tick_params(axis='x', labelsize=13)
axes[0].tick_params(axis='y', labelsize=14)
axes[1].tick_params(axis='y', labelsize=14)
for l in axes[0].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[0].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[1].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.8)
for l in axes[1].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'time_day_v_perf.pdf'))
plt.close()
plt.clf()
# Barplot of time of week to performance CoV
read_df = df_tow[df_tow['Operation']=='Read']
write_df = df_tow[df_tow['Operation']=='Write']
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[5,1.9])
fig.subplots_adjust(left=0.16, right=0.990, top=0.96, bottom=0.38, wspace=0.03)
order = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
sns.violinplot(ax=axes[0], x='Range, Time of Week', y='Performance Z-Score', data=read_df, order=order, color='skyblue', inner='quartile', edgecolor='black')
sns.violinplot(ax=axes[1], x='Range, Time of Week', y='Performance Z-Score', data=write_df, order=order, color='maroon', inner='quartile')
axes[0].set_ylabel('')
axes[1].set_ylabel('')
fig.text(0.37, 0.135, '(a) Read', ha='center')
fig.text(0.78, 0.135, '(b) Write', ha='center')
fig.text(0.58, 0.02, 'Day of Week', ha='center')
fig.text(0.001, 0.65, "Performance\nZ-Score", rotation=90, va='center', multialignment='center')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
axes[0].set_ylim(-3,3)
axes[0].set_yticks(range(-3,4,1))
axes[0].tick_params(axis='x', labelsize=14)
axes[1].tick_params(axis='x', labelsize=14)
axes[0].tick_params(axis='y', labelsize=14)
axes[1].tick_params(axis='y', labelsize=14)
for l in axes[0].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[0].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('black')
l.set_alpha(0.8)
for l in axes[1].lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.8)
for l in axes[1].lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.2)
l.set_color('white')
l.set_alpha(0.8)
plt.savefig(join(save_dir, 'time_week_v_perf.pdf'), backend='pgf')
plt.close()
plt.clf()
return None
def plot_no_user_app_characterizations(path_to_data, path_to_normal_data, save_dir):
'''
Plots the cluster characterizations of clusters formed without being separated
by user and application.
Parameters
----------
path_to_data: string
Path to directory with data from clusters without user/app sorting.
path_to_normal_data: string
Path to directory with data from clusters with user/app sorting.
save_dir: string
Path to the directory to save the plots in.
Returns
-------
None
'''
# Plot CoV of cluster sizes
path = join(path_to_data, 'no_runs_in_clusters_read.txt')
with open(path, 'r') as f:
no_read_clusters = f.read().split("\n")
f.close()
no_read_clusters = pd.Series(no_read_clusters).astype(int)
no_read_clusters = no_read_clusters[no_read_clusters > 40]
path = join(path_to_data, 'no_runs_in_clusters_write.txt')
with open(path, 'r') as f:
no_write_clusters = f.read().split("\n")
f.close()
no_write_clusters = pd.Series(no_write_clusters).astype(int)
no_write_clusters = no_write_clusters[no_write_clusters > 40]
path = join(path_to_normal_data, 'no_runs_in_clusters_read.txt')
with open(path, 'r') as f:
no_read_clusters_o = f.read().split("\n")
f.close()
no_read_clusters_o = pd.Series(no_read_clusters_o).astype(int)
no_read_clusters_o = no_read_clusters_o[no_read_clusters_o > 40]
path = join(path_to_normal_data, 'no_runs_in_clusters_write.txt')
with open(path, 'r') as f:
no_write_clusters_o = f.read().split("\n")
f.close()
no_write_clusters_o = pd.Series(no_write_clusters_o).astype(int)
no_write_clusters_o = no_write_clusters_o[no_write_clusters_o > 40]
fig, axes = plt.subplots(1, 2, sharey=True, figsize=[12,5])
fig.subplots_adjust(left=0.075, right=0.992, top=0.97, bottom=0.12, wspace=0.07)
n_bins = 10000
plt.setp(axes, xlim=(40,3000))
hist = np.histogram(no_read_clusters, bins=range(max(no_read_clusters)+1))[0]
cdf_read = np.cumsum(hist)
cdf_read = [x/cdf_read[-1] for x in cdf_read]
hist = np.histogram(no_write_clusters, bins=range(max(no_write_clusters)+1))[0]
cdf_write = np.cumsum(hist)
cdf_write = [x/cdf_write[-1] for x in cdf_write]
hist = np.histogram(no_read_clusters_o, bins=range(max(no_read_clusters_o)+1))[0]
cdf_read_o = np.cumsum(hist)
cdf_read_o = [x/cdf_read_o[-1] for x in cdf_read_o]
hist = np.histogram(no_write_clusters_o, bins=range(max(no_write_clusters_o)+1))[0]
cdf_write_o = np.cumsum(hist)
cdf_write_o = [x/cdf_write_o[-1] for x in cdf_write_o]
axes[0].plot(cdf_read, color='skyblue', label='False', linewidth=4)
axes[0].plot(cdf_read_o, color='mediumseagreen', label='True', linewidth=4, linestyle='--')
axes[1].plot(cdf_write, color='maroon', label='False', linewidth=4)
axes[1].plot(cdf_write_o, color='gold', label='True', linewidth=4, linestyle='--')
axes[0].set_ylim(0,1)
vals = axes[0].get_yticks()
axes[0].set_yticklabels(['{:,.0%}'.format(x) for x in vals])
axes[0].set_ylabel('Percent of Clusters')
axes[0].set_xlabel('Number of Runs in a Read Cluster')
axes[1].set_xlabel('Number of Runs in a Write Cluster')
axes[0].legend(title='Clustered by Application', loc='lower right')
axes[1].legend(title='Clustered by Application', loc='lower right')
axes[0].yaxis.grid(color='lightgrey', linestyle=':')
axes[1].yaxis.grid(color='lightgrey', linestyle=':')
axes[0].set_axisbelow(True)
axes[1].set_axisbelow(True)
ticks = [40, 500, 1000, 1500, 2000, 2500, 3000]
axes[0].set_xticks(ticks)
axes[1].set_xticks(ticks)
plt.savefig(join(save_dir, 'cluster_sizes_no_user_app.pdf'))
plt.clf()
plt.close()
def plot_no_runs_v_no_clusters(path_to_data, save_dir):
'''
shows correlation between an application having more runs and cluster.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = pd.read_csv(path_to_data, index_col=0)
df = df[df['Number of Runs']!=0]
fig, ax = plt.subplots(1, 1, figsize=[6,3])
fig.subplots_adjust(left=0.115, right=0.97, top=.95, bottom=0.21, wspace=0.03)
ax.set_ylim(-.1,3)
ax.set_xlim(1,5)
df['Number of Clusters'] = np.log10(df['Number of Clusters'])
df['Number of Runs'] = np.log10(df['Number of Runs'])
sns.regplot(data=df[df['Operation']=='Read'], x='Number of Runs', y='Number of Clusters', color='skyblue', ax=ax,
ci=None, order=0, label='Read', scatter_kws={'edgecolors':'black', 'zorder':1}, line_kws={'zorder':0})
sns.regplot(data=df[df['Operation']=='Write'], x='Number of Runs', y='Number of Clusters', color='maroon', ax=ax,
ci=None, order=0, label='Write', scatter_kws={'edgecolors':'black', 'zorder':1}, line_kws={'zorder':0})
ax.yaxis.grid(color='lightgrey', linestyle=':')
ax.set_axisbelow(True)
ax.legend(loc='upper left', fancybox=True)
ax.set_ylabel('Number of Clusters')
ax.set_xlabel('Number of Runs of an Application')
positions = [0, 1, 2, 3]
labels = ['$10^0$', '$10^1$', '$10^2$', '$10^3$']
ax.yaxis.set_major_locator(ticker.FixedLocator(positions))
ax.yaxis.set_major_formatter(ticker.FixedFormatter(labels))
positions = [0, 1, 2, 3, 4 , 5]
labels = ['0', '$10^1$', '$10^2$', '$10^3$', '$10^4$', '$10^5$']
ax.xaxis.set_major_locator(ticker.FixedLocator(positions))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Add minor ticks
ticks = [1,2,3,4,5,6,7,8,9]
f_ticks = [np.log10(x) for x in ticks]
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_yticks(f_ticks, minor=True)
f_ticks = []
tmp_ticks = [np.log10(x)+1 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+2 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+3 for x in ticks]
f_ticks = f_ticks + tmp_ticks
tmp_ticks = [np.log10(x)+4 for x in ticks]
f_ticks = f_ticks + tmp_ticks
ax.set_xticks(f_ticks, minor=True)
plt.savefig(join(save_dir, 'no_runs_v_no_clusters_in_app.pdf'))
plt.clf()
plt.close()
def plot_cluster_sizes(path_to_data, save_dir):
'''
CDFs of read and write cluster sizes.
Parameters
----------
path_to_data: string
Returns
-------
None
'''
df = | pd.read_csv(path_to_data, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import csv
from jsonapi_client import Session, Filter
from plotnine import *
import pandas
API_BASE = "https://www.ebi.ac.uk/metagenomics/api/v1"
TAX_RANK = "phylum"
# MGYS00002474 (DRP001073) Metabolically active microbial communities
# in marine sediment under high-CO2 and low-pH extremes
study_accession = "MGYS00002474"
# MGYS00002421 (ERP009568) Prokaryotic microbiota associated to the digestive
# cavity of the jellyfish Cotylorhiza tuberculata
# study_accession = "MGYS00002421"
# MGYS00002371 (DRP000490) Porcupine Seabight 16S Ribosomal RNA
# study_accession = "MGYS00002371"
# MGYS00002441 EMG produced TPA metagenomics assembly of
# the doi: 10.3389/fmicb.2016.00579
# study_accession = "MGYS00002441"
# MGYS00002394 (SRP051741) Subgingival plaque and peri-implant biofilm
# study_accession = "MGYS00002394"
# MGYS00001211 (SRP076746) Human gut metagenome Metagenome
# study_accession = "MGYS00001211"
# MGYS00000601 (ERP013908) Assessment of Bacterial DNA Extraction Procedures for Metagenomic Sequencing Evaluated
# on Different Environmental Matrices.
# study_accession = "MGYS00000601"
# MGYS00002115
# The study includes fungal genetic diversity assessment by ITS-1 next generation sequencing (NGS) analyses
# study_accession = "MGYS00002115"
resource = "studies/" + study_accession + "/analyses"
rows = []
with Session(API_BASE) as session:
analyses = session.get(resource).resources
analyses_accs = [a.accession for a in analyses]
for analysis_accession in analyses_accs:
tax_annotations = session.get(
"/".join(["analyses", analysis_accession, "taxonomy", "ssu"])
).resources
for t in tax_annotations:
if t.hierarchy.get(TAX_RANK):
rows.append(
{
"analysis": analysis_accession,
"study": study_accession,
TAX_RANK: t.hierarchy.get(TAX_RANK),
"count": t.count,
"rel_abundance": 0, # this will be filled afterwards
},
)
data_frame = | pandas.DataFrame(rows) | pandas.DataFrame |
from pull_datacb import getCBHistory, getLastRow
from datetime import timedelta, datetime
from dotenv import load_dotenv
from max_min import MaxMin
from slope import slope, slope_tick
import pandas as pd
import cbpro
import sys
import os
# While working within the sandbox you will only be able to trade BTC-USD and ETH-BTC
# When connected to the actual cbpro clien you will be able to trade all pairs available on coinbase
load_dotenv()
cb_key = os.getenv("CBP_SB_KEY")
cb_pass = os.getenv("CBP_SB_PHRASE")
cb_secret = os.getenv("CBP_SB_SECRET")
# A websocket class to get streaming data from the coinbase pr API
# Currently testing on the sandbox
class TextWebsocketClient(cbpro.WebsocketClient):
def __init__(self, url="wss://ws-feed.pro.coinbase.com", products=None, message_type="subscribe", mongo_collection=None, should_print=True, auth=False, api_key="", api_secret="", api_passphrase="", channels: str=None, period: int=None, symbol: str=None):
super().__init__(url, products, message_type, mongo_collection, should_print, auth, api_key, api_secret, api_passphrase, channels)
try:
self.period = int(period)
except:
print("You must enter an integer")
self.symbol = symbol.upper()
self.order_book = []
def on_open(self):
# Open socket connect with sandbox init message counter
self.client = cbpro.AuthenticatedClient(
cb_key,
cb_secret,
cb_pass,
api_url=url
)
print('websocket open')
self.url = "wss://ws-feed-public.sandbox.exchange.coinbase.com"
self.message_count = 0
self.dataframe = getCBHistory(self.symbol, self.period, cycles=4)
def updater(self, symbol: str, period: int):
row = getLastRow(symbol, period)
row = row[::-1]
row_time = pd.to_datetime(row.index[-1])
ltime = | pd.to_datetime(self.dataframe.index[-1]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import copy
import functools
import os
from time import time
import numpy as np
import pandas as pd
from pandapower.io_utils import JSONSerializableClass
from pandapower.io_utils import mkdirs_if_not_existent
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.idx_bus import VM, VA, NONE, BUS_TYPE
from pandapower.run import _init_runpp_options
from pandapower.timeseries.read_batch_results import v_to_i_s, get_batch_line_results, get_batch_trafo3w_results, \
get_batch_trafo_results, get_batch_bus_results
try:
import pplog
except ImportError:
import logging as pplog
logger = pplog.getLogger(__name__)
class OutputWriter(JSONSerializableClass):
"""
The OutputWriter class is used to store and format specific outputs from a time series calculation.
It contains a python-dictionary *output* which is a container for result DataFrames or arbitrary information you
would like to store. By default a pandas DataFrame is initialized for the key *Parameters*, which has the columns
"time_step", "controller_unstable", "powerflow_failed".
To simple log outputs during a time series simulation use **ow.log_variable(table, column)** where table is the name
of the (result) table, e.g. "res_bus", and column the name of the column, e.g. "vm_pu".
More sophisticated outputs can be added as well since for each value to be stored a function is
added to the *output_list* which is called at the end of each time step.
The function reads the calculation results and returns the desired values to store.
These values are then stored in the *output* DataFrame in a column named after the function you implemented.
Such a function can be used to store only some information of the power flow results, like the highest values
of the line loading in a time step or the mean values. Check the "advanced time series example" jupyter notebook
for an example.
INPUT:
**net** - The pandapower format network
**time_steps** (list) - time_steps to calculate as a list (or range)
OPTIONAL:
**output_path** (string, None) - Path to a folder where the output is written to.
**output_file_type** (string, ".p") - output filetype to use.
Allowed file extensions: [*.xls, *.xlsx, *.csv, *.p, *.json]
Note: XLS has a maximum number of 256 rows.
**csv_separator** (string, ";") - The separator used when writing to a csv file
**write_time** (int, None) - Time to save periodically to disk in minutes. Deactivated by default
**log_variables** (list, None) - list of tuples with (table, column) values to be logged by output writer.
Defaults are: res_bus.vm_pu and res_line.loading_percent. Additional variables can be added later on
with ow.log_variable or removed with ow.remove_log_variable
EXAMPLE:
>>> from pandapower.timeseries.output_writer import OutputWriter
>>> from pandapower.networks as nw
>>> net = nw.simple_four_bus_system()
>>> ow = OutputWriter(net) # create an OutputWriter
>>> ow.log_variable('res_bus', 'vm_pu') # add logging for bus voltage magnitudes
>>> ow.log_variable('res_line', 'loading_percent') # add logging for line loadings in percent
"""
def __init__(self, net, time_steps=None, output_path=None, output_file_type=".p", write_time=None,
log_variables=None, csv_separator=";"):
super().__init__()
self.net = net
self.output_path = output_path
self.output_file_type = output_file_type
self.write_time = write_time
self.log_variables = log_variables
# these are the default log variables which are added if log_variables is None
self.default_log_variables = [("res_bus", "vm_pu"), ("res_line", "loading_percent")]
self._add_log_defaults()
self.csv_separator = csv_separator
if write_time is not None:
self.write_time *= 60.0 # convert to seconds
# init the matrix and the list of output functions
self.output = dict()
# internal results stored as numpy arrays in dict. Is created from output_list
self.np_results = dict()
# output list contains functools.partial with tables, variables, index...
self.output_list = []
# real time is tracked to save results to disk regularly
self.cur_realtime = time()
# total time steps to calculate
self.time_steps = time_steps
# add output_writer to net
self.add_to_net(element="output_writer", index=0, overwrite=True)
# inits dataframes and numpy arrays which store results
# self.init_all()
# Saves all parameters as object attributes to store in JSON
def __str__(self):
# return self.__class__.__name__
return self.__repr__()
def __repr__(self):
s = "%s: writes output to '%s'" % (self.__class__.__name__, self.output_path)
s += " and logs:"
for output in self.log_variables:
table, variable = output[0], output[1]
s += "\n'" + str(table) + "." + str(variable) + "'"
return s
def _monkey_patch(self, method, new):
from types import MethodType
setattr(self, method, MethodType(new, self))
def _add_log_defaults(self):
if self.log_variables is None:
self.log_variables = list()
self.log_variables = copy.copy(self.default_log_variables)
if not isinstance(self.log_variables, list):
raise TypeError("log_variables must be None or a list of tuples like [('res_bus', 'vm_pu')]")
def init_log_variables(self):
"""
inits the log variables given to output writer.
log_variables is a list with tuples of DataFrame columns to log.
Example: [("res_bus", "vm_pu"), ("res_bus", "va_degree")]
If None are given the defaults are:
res_bus.vm_pu
res_line.loading_percent
"""
for log_args in self.log_variables:
# add log variable
self._init_log_variable(*log_args)
def init_all(self):
if isinstance(self.time_steps, list) or isinstance(self.time_steps, range):
self.output = dict()
self.np_results = dict()
self.output_list = list()
self.init_log_variables()
self.init_timesteps(self.time_steps)
self._init_np_results()
self._init_output()
else:
logger.debug("Time steps not set at init ")
def _init_output(self):
self.output = dict()
# init parameters
self.output["Parameters"] = pd.DataFrame(False, index=self.time_steps,
columns=["time_step", "controller_unstable",
"powerflow_failed"])
self.output["Parameters"].loc[:, "time_step"] = self.time_steps
def _init_np_results(self):
# inits numpy array (contains results)
self.np_results = dict()
for partial_func in self.output_list:
self._init_np_array(partial_func)
def _save_separate(self, append):
for partial in self.output_list:
if isinstance(partial, tuple):
# if batch output is used
table = partial[0]
variable = partial[1]
else:
# if output_list contains functools.partial
table = partial.args[0]
variable = partial.args[1]
if table is not "Parameters":
file_path = os.path.join(self.output_path, table)
mkdirs_if_not_existent(file_path)
if append:
file_name = str(variable) + "_" + str(self.cur_realtime) + self.output_file_type
else:
file_name = str(variable) + self.output_file_type
file_path = os.path.join(file_path, file_name)
data = self.output[self._get_output_name(table, variable)]
if self.output_file_type == ".json":
data.to_json(file_path)
elif self.output_file_type == ".p":
data.to_pickle(file_path)
elif self.output_file_type in [".xls", ".xlsx"]:
try:
data.to_excel(file_path)
except ValueError as e:
if data.shape[1] > 255:
raise ValueError("pandas.to_excel() is not capable to handle large data" +
"with more than 255 columns. Please use other " +
"file_extensions instead, e.g. 'json'.")
else:
raise ValueError(e)
elif self.output_file_type == ".csv":
data.to_csv(file_path, sep=self.csv_separator)
def dump_to_file(self, append=False, recycle_options=None):
"""
Save the output to separate files in output_path with the file_type output_file_type. This is called after
the time series simulation by default.
**append** (bool, False) - Option for appending instead of overwriting the file
"""
save_single = False
self._np_to_pd()
if recycle_options not in [None, False]:
self.get_batch_outputs(recycle_options)
if self.output_path is not None:
try:
if save_single and self.output_file_type in [".xls", ".xlsx"]:
self._save_single_xls_sheet(append)
elif self.output_file_type in [".csv", ".xls", ".xlsx", ".json", ".p"]:
self._save_separate(append)
else:
raise UserWarning(
"Specify output file with .csv, .xls, .xlsx, .p or .json ending")
if append:
self._init_output()
except Exception:
raise
def dump(self, recycle_options=None):
append = False if self.time_step == self.time_steps[-1] else True
self.dump_to_file(append=append, recycle_options=recycle_options)
self.cur_realtime = time() # reset real time counter for next period
def save_results(self, time_step, pf_converged, ctrl_converged, recycle_options=None):
# Saves the results of the current time step to a matrix,
# using the output functions in the self.output_list
# remember the last time step
self.time_step = time_step
# add an entry to the output matrix if something failed
if not pf_converged:
self.save_nans_to_parameters()
self.output["Parameters"].loc[time_step, "powerflow_failed"] = True
elif not ctrl_converged:
self.output["Parameters"].loc[time_step, "controller_unstable"] = True
else:
self.save_to_parameters()
# if write time is exceeded or it is the last time step, data is written
if self.write_time is not None:
if time() - self.cur_realtime > self.write_time:
self.dump()
if self.time_step == self.time_steps[-1]:
self.dump(recycle_options)
def save_to_parameters(self):
# Saves the results of the current time step to self.output,
# using the output functions in the self.output_list
for of in self.output_list:
try:
of()
except:
import traceback
traceback.print_exc()
logger.error("Error in output function! Stored NaN for '%s' in time-step %i"
% (of.__name__, self.time_step))
self.save_nans_to_parameters()
def save_nans_to_parameters(self):
# Saves NaNs to for the given time step.
time_step_idx = self.time_step_lookup[self.time_step]
for of in self.output_list:
self.output["Parameters"].loc[time_step_idx, of.__name__] = np.NaN
def remove_log_variable(self, table, variable=None):
"""
removes a logged variable from outputs
INPUT:
**table** (str) - name of the DataFrame table (example: "res_bus")
OPTIONAL:
**variable** (str, None) - column name of the DataFrame table (example: "vm_pu"). If None all are variables of
table are removed
"""
# remove variables from list
if variable is not None:
self.output_list = [o for o in self.output_list if not (o.args[0] == table and o.args[1] == variable)]
self.log_variables = [o for o in self.log_variables if not (o[0] == table and o[1] == variable)]
else:
self.output_list = [o for o in self.output_list if not (o.args[0] == table)]
self.log_variables = [o for o in self.log_variables if not (o[0] == table)]
# init output container again
self._init_np_results()
def log_variable(self, table, variable, index=None, eval_function=None, eval_name=None):
"""
Adds a variable to log during simulation and appends it to output_list.
INPUT:
**table** (str) - The DataFrame table where the variable is located as a string (e.g. "res_bus")
**variable** (str) - variable that should be logged as string (e.g. "p_mw")
OPTIONAL:
**index** (iterable, None) - Can be either one index or a list of indices, or a numpy array of indices,
or a pandas Index, or a pandas Series (e.g. net.load.bus) for which
the variable will be logged. If no index is given, the variable will be logged for all elements in the table
**eval_function** (function, None) - A function to be applied on the table / variable / index combination.
example: pd.min or pd.mean
**eval_name** (str, None) - The name for an applied function. If None the name consists of the table, variable,
index and eval function
example: "Sir_Lancelot"
EXAMPLE:
>>> ow.log_variable('res_bus', 'vm_pu') # add logging for bus voltage magnitudes
>>> ow.log_variable('res_line', 'loading_percent', index=[0, 2, 5]) # add logging for line loading of lines with indices 0, 2, 5
>>> ow.log_variable('res_line', 'loading_percent', eval_function=pd.max) # get the highest line loading only
"""
del_indices = list()
append_args = set()
append = True
# check if new log_variable is already in log_variables. If so either append or delete
for i, log_args in enumerate(self.log_variables):
if log_args[0] == table and log_args[1] == variable:
# table and variable exist in log_variables
if eval_function is not None or eval_name is not None:
append = True
continue
if len(log_args) == 2 and eval_function is None:
# everything from table / variable is logged
append = False
continue
if log_args[2] is not None and index is not None and eval_function is None:
# if index is given and an index was given before extend the index and get unique
log_args[2] = set(log_args[2].extend(index))
else:
del_indices.append(i)
append_args.add((table, variable))
append = False
for i in del_indices:
del self.log_variables[i]
for log_arg in append_args:
self.log_variables.append(log_arg)
if append:
self.log_variables.append((table, variable, index, eval_function, eval_name))
def _init_ppc_logging(self, table, variable, index, eval_function, eval_name):
var_name = self._get_output_name(table, variable)
ppc = self.net["_ppc"]
if ppc is None:
# if no ppc is in net-> create one
options = dict(algorithm='nr', calculate_voltage_angles="auto", init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model="t",
trafo_loading="current", enforce_q_lims=False, check_connectivity=True,
voltage_depend_loads=True, consider_line_temperature=False)
_init_runpp_options(self.net, **options)
ppc, _ = _pd2ppc(self.net)
self.net["_ppc"] = ppc
index = list(range(sum(ppc['bus'][:, BUS_TYPE] != NONE)))
self._append_output_list(table, variable, index, eval_function, eval_name, var_name, func=self._log_ppc)
return index
def _init_log_variable(self, table, variable, index=None, eval_function=None, eval_name=None):
if "ppc" in table:
index = self._init_ppc_logging(table, variable, index, eval_function, eval_name)
if np.any(pd.isnull(index)):
# check how many elements there are in net
index = self.net[table.split("res_")[-1]].index
if not hasattr(index, '__iter__'):
index = [index]
if isinstance(index, (np.ndarray, pd.Index, pd.Series)):
index = index.tolist()
if eval_function is not None and eval_name is None:
eval_name = "%s.%s.%s.%s" % (table, variable, str(index), eval_function.__name__)
if eval_function is None and eval_name is not None:
logger.info("'eval_name' is to give a name in case of evaluation functions. Since " +
"no function is given for eval_name '%s', " % eval_name +
"eval_name is neglected.")
eval_name = None
# var_name = self._get_hash((table, variable, index, eval_function))
var_name = self._get_output_name(table, variable)
idx = self._get_same_log_variable_partial_func_idx(table, variable, eval_function,
eval_name)
if idx is not None:
self._append_existing_log_variable_partial_func(idx, index)
else:
self._append_output_list(table, variable, index, eval_function, eval_name, var_name)
def _get_same_log_variable_partial_func_idx(self, table, variable, eval_function, eval_name):
""" Returns the position index in self.output_list of partial_func which has the same table
and variable and no evaluation function. """
if eval_function is None and eval_name is None:
for i, partial_func in enumerate(self.output_list):
partial_args = partial_func.args
match = partial_args[0] == table
match &= partial_args[1] == variable
if match:
return i
def _append_existing_log_variable_partial_func(self, idx, index):
""" Appends the index of existing, same partial_func in output_list. """
for i in index:
if i not in self.output_list[idx].args[2]:
self.output_list[idx].args[2].append(i)
def _append_output_list(self, table, variable, index, eval_function, eval_name, var_name, func=None):
""" Appends the output_list by an additional partial_func. """
func = self._log if func is None else func
partial_func = functools.partial(func, table, variable, index, eval_function, eval_name)
partial_func.__name__ = var_name
self.output_list.append(partial_func)
if self.time_steps is not None:
self._init_np_array(partial_func)
def _log(self, table, variable, index, eval_function=None, eval_name=None):
try:
# ToDo: Create a mask for the numpy array in the beginning and use this one for getting the values. Faster
if self.net[table].index.equals(pd.Index(index)):
# if index equals all values -> get numpy array directly
result = self.net[table][variable].values
else:
# get by loc (slow)
result = self.net[table].loc[index, variable].values
if eval_function is not None:
result = eval_function(result)
# save results to numpy array
time_step_idx = self.time_step_lookup[self.time_step]
hash_name = self._get_np_name((table, variable, index, eval_function, eval_name))
self.np_results[hash_name][time_step_idx, :] = result
except Exception as e:
logger.error("Error at index %s for %s[%s]: %s" % (index, table, variable, e))
def _log_ppc(self, table, variable, index, eval_function=None, eval_name=None):
# custom log function fo ppc results
ppci = self.net["_ppc"]["internal"]
if variable == "vm":
v = VM
elif variable == "va":
v = VA
else:
raise NotImplementedError("No other variable implemented yet.")
result = ppci[table.split("_")[-1]][:, v]
if eval_function is not None:
result = eval_function(result)
# save results to numpy array
time_step_idx = self.time_step_lookup[self.time_step]
hash_name = self._get_np_name((table, variable, index, eval_function, eval_name))
self.np_results[hash_name][time_step_idx, :] = result
def _np_to_pd(self):
# convert numpy arrays (faster so save results) into pd Dataframes (user friendly)
# intended use: At the end of time series simulation write results to pandas
for partial_func in self.output_list:
(table, variable, index, eval_func, eval_name) = partial_func.args
# res_name = self._get_hash(table, variable)
res_name = self._get_output_name(table, variable)
np_name = self._get_np_name(partial_func.args)
columns = index
if eval_name is not None:
columns = [eval_name]
res_df = pd.DataFrame(self.np_results[np_name], index=self.time_steps, columns=columns)
if res_name in self.output and eval_name is not None:
try:
self.output[res_name] = pd.concat([self.output[res_name], res_df], axis=1,
sort=False)
except TypeError:
# pandas legacy < 0.21
self.output[res_name] = | pd.concat([self.output[res_name], res_df], axis=1) | pandas.concat |
"""Class and methods to retrieve and analyze EDGAR text data
- SEC Edgar, 10K, 8K, MD&A, Business Descriptions
- BeautifulSoup, requests, regular expressions
Author: <NAME>
License: MIT
"""
import lxml
from bs4 import BeautifulSoup
import pandas as pd
from pandas import DataFrame, Series
import os, sys, time, re
import requests, zipfile, io, gzip, csv, json, unicodedata, glob
import numpy as np
import matplotlib.pyplot as plt
import config
def _print(*args, echo=config.ECHO, **kwargs):
if echo: print(*args, **kwargs)
def requests_get(url, params=None, retry=7, sleep=2, timeout=3, delay=0.25,
trap=False, headers=config.headers, echo=config.ECHO):
"""Wrapper over requests.get, with retry loops and delays
Parameters
----------
url : str
URL address to request
params : dict of {key: value} (optional), default is None
Payload of &key=value to append to url
headers : dict (optional)
e.g. User-Agent, Connection and other headers parameters
timeout : int (optional), default is 3
Number of seconds before timing out one request try
retry : int (optional), default is 5
Number of times to retry request
sleep : int (optional), default is 2
Number of seconds to wait between retries
trap : bool (optional), default is True
On timed-out after retries: if True raise exception, else return False
delay : int (optional), default is 0
Number of seconds to initially wait
echo : bool (optional), default is True
whether to display verbose messages to aid debugging
Returns
-------
r : requests.Response object, or None
None if timed-out or status_code != 200
"""
_print(url, echo=echo)
if delay:
time.sleep(delay + (delay * np.random.rand()))
for i in range(retry):
try:
r = requests.get(url, headers=headers,timeout=timeout,params=params)
assert(r.status_code >= 200 and r.status_code <= 404)
break
except Exception as e:
time.sleep(sleep * (2 ** i) + sleep*np.random.rand())
_print(e, r.status_code, echo=echo)
r = None
if r is None: # likely timed-out after retries:
if trap: # raise exception if trap, else silently return None
raise Exception(f"requests_get: {url} {time.time()}")
return None
if r.status_code != 200:
_print(r.status_code, r.content, echo=echo)
return None
return r
class Edgar:
"""Class to retrieve and pre-process Edgar website documents
Attributes
----------
forms_ : dict with key in {'10-K', '10-Q', '8-K'}
Values are list of form names str
Notes
-----
https://www.sec.gov/edgar/searchedgar/accessing-edgar-data.htm
https://www.investor.gov/introduction-investing/general-resources/
news-alerts/alerts-bulletins/investor-bulletins/how-read-8
"""
# Create .forms_ list: EDGAR_Forms_v2.1.py from ND-SRAF / McDonald 201606
f_10K = ['10-K', '10-K405', '10KSB', '10-KSB', '10KSB40']
f_10KA = ['10-K/A', '10-K405/A', '10KSB/A', '10-KSB/A', '10KSB40/A']
f_10KT = ['10-KT', '10KT405', '10-KT/A', '10KT405/A']
f_10Q = ['10-Q', '10QSB', '10-QSB']
f_10QA = ['10-Q/A', '10QSB/A', '10-QSB/A']
f_10QT = ['10-QT', '10-QT/A']
forms_ = {'10-K' : f_10K + f_10KA + f_10KT,
'10-Q' : f_10Q + f_10QA + f_10QT,
'8-K' : ['8-K']}
url_prefix = 'https://www.sec.gov/Archives'
#
# Static methods to fetch documents from SEC Edgar website
#
@staticmethod
def basename(date, form, cik, pathname, **kwargs):
"""Construct base filename from components of the filing pathname"""
base = os.path.split(pathname)[-1]
return f"{date}_{form.replace('/A', '-A')}_edgar_data_{cik}_{base}"
@staticmethod
def from_path(pathname, filename=None):
"""Extract meta info from edgar pathname"""
items = pathname.split('.')[0].split('/')
adsh = items[-1].replace('-','')
resource = os.path.join(*items[:-1], adsh)
indexname = os.path.join(resource, items[-1] + '-index.html')
return (os.path.join(resource, filename) if filename
else {'root': Edgar.url_prefix,
'adsh': adsh,
'indexname': indexname,
'resource' : resource})
@staticmethod
def fetch_tickers(echo=config.ECHO):
"""Fetch tickers-to-cik lookup from SEC web page as a pandas Series"""
url = 'https://www.sec.gov/include/ticker.txt'
tickers = requests_get(url, echo=echo).text
df = DataFrame(data = [t.split('\t') for t in tickers.split('\n')],
columns = ['ticker','cik'])
return df.set_index('ticker')['cik'].astype(int)
@staticmethod
def fetch_index(date=None, year=None, quarter=None, echo=config.ECHO):
"""Fetch edgar daily or master index, or walk to get all daily dates"""
# https://www.sec.gov/Archives/edgar/data/51143/0000051143-13-000007.txt
if year and quarter: # get full-index by year/quarter
root = 'https://www.sec.gov/Archives/edgar/full-index/'
url = f"{root}{year}/QTR{quarter}/master.idx"
r = requests_get(url, echo=echo)
if r is None:
return None
df = pd.read_csv(
io.BytesIO(r.content), sep='|', quoting=3, encoding='latin-1',
header=None, low_memory=False, na_filter=False, #skiprows=7,
names=['cik', 'name', 'form', 'date', 'pathname'])
df['date'] = df['date'].str.replace('-','')
df = df[df['date'].str.isdigit() & df['cik'].str.isdigit()]
df = df.drop_duplicates(['pathname', 'date', 'form', 'cik'])
df = df[df['date'].str.isdigit() & df['cik'].str.isdigit()]
df['cik'] = df['cik'].astype(int)
df['date'] = df['date'].astype(int)
return df.reset_index(drop=True)
elif date is not None: # get daily-index
root = 'https://www.sec.gov/Archives/edgar/daily-index/'
q = (((date // 100) % 100) + 2) // 3
url = f"{root}{date//10000}/QTR{q}/master.{date}.idx"
r = requests_get(url, echo=echo)
if r is None:
d = ((date // 10000) % 100) + ((date % 10000) * 100)
url = f"{root}{date//10000}/QTR{q}/master.{d:06d}.idx"
r = requests_get(url, echo=echo)
df = pd.read_csv(
io.BytesIO(r.content), sep='|', quoting=3, encoding='utf-8',
low_memory=False, na_filter=False, header=None, #skiprows=7,
names=['cik', 'name', 'form', 'date', 'pathname'])
df = df[df['date'].str.isdigit() & df['cik'].str.isdigit()]
df['cik'] = df['cik'].astype(int)
df['date'] = df['date'].astype(int)
return df.reset_index(drop=True)
elif not date:
raise Exception('Invalid arguments to fetch_index')
# called with no arguments => fetch category tree
leaf = {}
queue = ['']
while len(queue):
sub = queue.pop()
f = io.BytesIO(requests_get(root + sub + "index.json",
echo=echo).content)
nodes = json.loads(f.read().decode('utf-8'))['directory']['item']
for node in nodes:
if node['type'] == 'dir':
queue += [sub + node['href']]
#print(str(node))
else:
s = node['name'].split('.')
if s[0] == 'master' and s[2] == 'idx':
d = int(s[1])
if d <= 129999:
d = (d%100)*10000 + (d//100) # 070194->940701
if d <= 129999:
d += 20000000 # 091231->20011231
if d <= 999999:
d += 19000000 # 970102->19970102
leaf[d] = sub + node['name']
return | Series(leaf) | pandas.Series |
from io import StringIO
from pathlib import Path
import pytest
import pandas as pd
from pandas import DataFrame, read_json
import pandas._testing as tm
from pandas.io.json._json import JsonReader
@pytest.fixture
def lines_json_df():
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars():
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_to_jsonl():
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(read_json(result, lines=True), df)
@pytest.mark.parametrize("chunksize", [1, 1.0])
def test_readjson_chunks(lines_json_df, chunksize):
# Basic test that read_json(chunks=True) gives the same result as
# read_json(chunks=False)
# GH17048: memory usage when lines=True
unchunked = read_json(StringIO(lines_json_df), lines=True)
reader = read_json(StringIO(lines_json_df), lines=True, chunksize=chunksize)
chunked = pd.concat(reader)
tm.assert_frame_equal(chunked, unchunked)
def test_readjson_chunksize_requires_lines(lines_json_df):
msg = "chunksize can only be passed if lines=True"
with pytest.raises(ValueError, match=msg):
pd.read_json(StringIO(lines_json_df), lines=False, chunksize=2)
def test_readjson_chunks_series():
# Test reading line-format JSON to Series with chunksize param
s = pd.Series({"A": 1, "B": 2})
strio = StringIO(s.to_json(lines=True, orient="records"))
unchunked = pd.read_json(strio, lines=True, typ="Series")
strio = StringIO(s.to_json(lines=True, orient="records"))
chunked = pd.concat(pd.read_json(strio, lines=True, typ="Series", chunksize=1))
tm.assert_series_equal(chunked, unchunked)
def test_readjson_each_chunk(lines_json_df):
# Other tests check that the final result of read_json(chunksize=True)
# is correct. This checks the intermediate chunks.
chunks = list(pd.read_json(StringIO(lines_json_df), lines=True, chunksize=2))
assert chunks[0].shape == (2, 2)
assert chunks[1].shape == (1, 2)
def test_readjson_chunks_from_file():
with tm.ensure_clean("test.json") as path:
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
chunked = pd.concat(pd.read_json(path, lines=True, chunksize=1))
unchunked = pd.read_json(path, lines=True)
| tm.assert_frame_equal(unchunked, chunked) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import logging
import warnings
import os
import pandas_datareader as pdr
from collections import Counter
from scipy import stats
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_percentage_error, mean_absolute_error
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
logging.basicConfig(filename='warnings.log',level=logging.WARNING)
logging.captureWarnings(True)
warnings.simplefilter("ignore")
def mape(y,pred):
return None if 0 in y else mean_absolute_percentage_error(y,pred) # average o(1) worst-case o(n)
def rmse(y,pred):
return mean_squared_error(y,pred)**.5
def mae(y,pred):
return mean_absolute_error(y,pred)
def r2(y,pred):
return r2_score(y,pred)
_estimators_ = {'arima', 'mlr', 'mlp', 'gbt', 'xgboost', 'rf', 'prophet', 'hwes', 'elasticnet','svr','knn','combo'}
_metrics_ = {'r2','rmse','mape','mae'}
_determine_best_by_ = {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2','InSampleRMSE','InSampleMAPE','InSampleMAE',
'InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE',
'LevelTestSetR2',None}
_colors_ = [
'#FFA500','#DC143C','#00FF7F','#808000','#BC8F8F','#A9A9A9',
'#8B008B','#FF1493','#FFDAB9','#20B2AA','#7FFFD4','#A52A2A',
'#DCDCDC','#E6E6FA','#BDB76B','#DEB887'
]*10
class ForecastError(Exception):
class CannotUndiff(Exception):
pass
class NoGrid(Exception):
pass
class PlottingError(Exception):
pass
class Forecaster:
def __init__(self,
y=pd.Series([]),
current_dates= | pd.Series([]) | pandas.Series |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
| tm.assert_index_equal(res, base) | pandas.util.testing.assert_index_equal |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)[["chrom1", "start", "end", "strand", "animal"]]
df2_na = (pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])).astype(
{
"start2": pd.Int64Dtype(),
"end2": pd.Int64Dtype(),
}
)[["chrom2", "start2", "end2", "strand", "animal"]]
counts_nans_inserted_after = (
pd.concat([pd.DataFrame([pd.NA]), counts_no_nans, | pd.DataFrame([pd.NA]) | pandas.DataFrame |
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': pd.StringDtype(),
'Actor2Geo_Lat': pd.StringDtype(),
'Actor2Geo_Long': pd.StringDtype(),
'ActionGeo_Type': type(1),
'ActionGeo_FullName': pd.StringDtype(),
'ActionGeo_Lat': pd.StringDtype(),
'ActionGeo_Long': pd.StringDtype(),
'DATEADDED' : pd.StringDtype(),
'SOURCEURL': pd.StringDtype(),
}
timecheckG = time()
print(" Creating another thread-safe connection to MongoDB...")
localDb = {}
localDb['client'] = pymongo.MongoClient()
localDb['database'] = localDb['client'].capstone
localDb['collection'] = localDb['database'].GDELT.events
configFilePath = os.path.join(os.path.abspath(__file__),
"GDELTeventsEDAconfig_batch.yaml"),
datetimeField = "DATEADDED"
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strftimeFormat = "%Y-%m-%dh%Hm%M"
print(" Pulling events records (long wait)... ", end = '')
eventsDF = pd.DataFrame.from_records(
list(localDb['collection'].find(projection = {"_id" : False},
allow_disk_use=True,
no_cursor_timeout = True)),
columns = columnNames,
)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
timecheckG = time()
print(" Setting dtypes... ", end='')
eventsDF = eventsDF.astype(dtype = columnTypes, copy = False)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
print(" Converting datetimes...", end = '')
eventsDF[datetimeField] = pd.to_datetime(eventsDF[datetimeField],
format = datetimeFormat)
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
print("\n Events records DataFrame .info():\n")
print(eventsDF.info())
edaDates = "".join([
eventsDF[datetimeField].min().strftime(strftimeFormat),"_to_",
eventsDF[datetimeField].max().strftime(strftimeFormat),
])
edaLogName = "".join(["GDELT_events_EDA_", edaDates,".html"])
timecheckG = time()
print(" Complete!( %0.3f s )" % (float(time())-float(timecheckG)))
timecheckG = time()
print(" File output:", edaLogName, "\n")
print(" Generating events 'batch' EDA report...")
eventsProfile = ProfileReport(eventsDF, config_file = configFilePath)
eventsProfile.to_file(edaLogName)
del eventsDF
del eventsProfile
print(" Complete!( %0.fd s )" % (float(time())-float(timecheckG)))
print("All Events EDA operations complete. Please check EDAlogs",
"directories for any resulting Events EDA profile reports.")
return True
# B03
def mentionsBatchEDA(mode):
'''Performs automatic EDA on GDELT Mentions record subsets. See
function batchEDA() for "if table == 'mentions':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Mentions records up to at least the size of the batch EDA test subset
used in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it it present only to receive a
parameter determined by imap(chunksize = 1), e.g. one iteration of the
function will execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
print(" Creating new thread-safe connection to MongoDB...")
columnNames = [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'EventTimeDate' : pd.StringDtype(),
'MentionTimeDate' : pd.StringDtype(),
'MentionType' : pd.StringDtype(),
'MentionSourceName' : pd.StringDtype(),
'MentionIdentifier' : pd.StringDtype(),
'InRawText' : type(True),
'Confidence' : type(1),
'MentionDocTone' : type(1.1),
}
localDb = {}
localDb['client'] = pymongo.MongoClient()
localDb['database'] = localDb['client'].capstone
localDb['collection'] = localDb['database'].GDELT.mentions
configFileName = "GDELTmentionsEDAconfig_batch.yaml"
datetimeField01 = "MentionTimeDate"
datetimeField02 = "EventTimeDate"
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strftimeFormat = "%Y-%m-%dh%Hm%M"
print("\n Pulling mentions records (long wait)...")
tableDF = pd.DataFrame.from_records(
list(localDb['collection'].find(projection = {"_id" : False},
allow_disk_use=True,
no_cursor_timeout = True)),
columns = columnNames,
)
print(" Complete!")
print(" Setting dtypes...")
tableDF = tableDF.astype(dtype = columnTypes, copy = False)
print(" Complete!")
print(" Converting datetimes...")
tableDF[datetimeField01] = pd.to_datetime(tableDF[datetimeField01],
format = datetimeFormat)
tableDF[datetimeField02] = pd.to_datetime(tableDF[datetimeField02],
format = datetimeFormat)
print(" Complete!")
print(" Mentions records DataFrame .info():")
print(tableDF.info())
edaDates = "".join([
tableDF[datetimeField01].min().strftime(strftimeFormat),"_to_",
tableDF[datetimeField01].max().strftime(strftimeFormat),
])
edaLogName = "".join(["GDELT_mentions_EDA_", edaDates,".html"])
print(" File output:", edaLogName, "\n")
print("\n Generating mentions 'batch' EDA report...")
profile = ProfileReport(tableDF, config_file= configFileName)
profile.to_file(edaLogName)
print("\n Complete!")
return True
# B04
def gkgBatchEDA(self):
'''Performs automatic EDA on GDELT Global Knowledge Graph (GKG)
record subsets.
Makes use of these helper functions for multiprocessing.Pool.map()
calls, from GDELTedaGKGhelpers. a separate file as part of ensuring
compatibility with this class's Python.multiprocessing calls (all 'AXX'
tags are for 'find' use in GDELTedaGKGhelpers.py):
A02 - pullMainGKGcolumns
A03 - applyDtypes
A04 - convertDatetimes
A05 - convertGKGV15Tone
A06 - mainReport
A07 - locationsReport
A08 - countsReport
A09 - themesReport
A10 - personsReport
A11 - organizationsReport
The intent behind this implementation is to reduce the amount of total
RAM required for all operations, as .close() upon appropriate process
pools should result in deallocation of their memory structures, which
just isn't going to be forced otherwise due to Pandas and Python memory
handling.
Well-known issues with underlying treatment of allocation and
deallocation of DataFrames, regardless of whether all references to a
DataFrame are passed to 'del' statements, restrict completion of the
processing necessary for normalization of all GKG columns, which is
necessary for execution of EDA on those columns. The apparent RAM
requirements for those operations on the batch test GKG data set are not
mitigable under these hardware circumstances, barring further subset of
the data into small enough component pieces with each their own EDA
profile, which could not be converted to batch EDA without processing
outside the scope of this capstone project.
The uncommented code in this function represent a working state which
can produce full batch EDA on at least the primary information-holding
GKG columns, but not for the majority of variable-length and subfielded
columns. V1Locations batch EDA has been produced from at least one
attempt, but no guarantee of its error-free operation on similarly-sized
subsets of GDELT GKG records is intended or encouraged.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
timecheck = time()
print(" Pulling non-variable-length GKG columns...")
pool = multiprocessing.Pool(1)
# For pullMainGKGcolumns documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A02'
tableDF = pool.map(GDELTedaGKGhelpers.pullMainGKGcolumns, ['batch'])
pool.close()
pool.join()
print(" Records acquired. ( %0.3f s )" % (float(time())-float(timecheck)))
print("\n GKG records DataFrame .info():")
tableDF = tableDF.pop()
pp(tableDF.info())
timecheck = time()
print("\n Setting dtypes...")
# This only sets 'GKGRECORDID', 'V21DATE', 'V2SourceCommonName',
# and 'V2DocumentIdentifier' dtypes to pd.StringDtype()
pool = multiprocessing.Pool(1)
# For applyDtypes documentation, See GDELTedaGKGhelpers.py, 'find'
# tag '# A03'
tableDF = pool.map(GDELTedaGKGhelpers.applyDtypes, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
timecheck = time()
print("\n Converting datetimes...")
pool = multiprocessing.Pool(1)
# For convertDatetimes documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A04'
tableDF = pool.map(GDELTedaGKGhelpers.convertDatetimes, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
timecheck = time()
print("\n Splitting V15Tone dicts to columns...")
pool = multiprocessing.Pool(1)
# For convertGKGV15Tone code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A05'
tableDF = pool.map(GDELTedaGKGhelpers.convertGKGV15Tone, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
tableDF = tableDF.pop()
print("\n Main GKG records (non-variable-length columns) DataFrame",
".info():\n")
pp(tableDF.info())
# Generating report excluding fields that require substantially more
# records (normalization), and so more resources for reporting.
timecheck = time()
print("\n Generating non-variable-length subfield report...")
pool = multiprocessing.Pool(1)
# For mainReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A06'
booleanSuccess = pool.map(GDELTedaGKGhelpers.mainReport, [tableDF])
pool.close()
pool.join()
print("\n Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
# # These columns may be dropped from this point on in order to
# # accomodate the increased RAM, CPU, and disk I/O requirements for
# # normalizing variable length columns, but this is commented out in order
# # to further check RAM requirements for full normalization.
# timecheck = time()
# print("\n Dropping excess columns before normalizing for variable-length",
# "columns...")
# tableDF.drop(columns = ['V2SourceCommonName',
# 'V2DocumentIdentifier',
# 'V15Tone_Positive',
# 'V15Tone_Negative',
# 'V15Tone_Polarity',
# 'V15Tone_ARD',
# 'V15Tone_SGRD',
# 'V15Tone_WordCount'], inplace = True)
# print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
# Working implementation with locationsReport
timecheck = time()
print("\n Splitting V1Locations dicts and generating report...")
pool = multiprocessing.Pool(1)
# For locationsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A07'
booleanSuccess = pool.map(GDELTedaGKGhelpers.locationsReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
'''
# This section of calls are commented out due to their current inability
# to complete processing for the batch EDA test subset of GKG records.
# Future attempts to complete these sections will see this comment area
# removed and this section cleaned of work-in-progress documentation.
# Non-working implementation of countsReport. Tempted to think it's due
# to Pandas limitations for long-running jobs, but it's also just a huge
# DataFrame I'm attempting to normalize, thanks to the variable quantities
# of 'V1Counts' values with subfielded values per record.
# timecheck = time()
# print("\n Splitting V1Counts lists and generating report...")
# pool = multiprocessing.Pool(1)
# # For countsReport code/documentation, See GDELTedaGKGhelpers.py,
# # 'find' tag '# A08'
# booleanSuccess = pool.map(GDELTedaGKGhelpers.countsReport, [tableDF])
# pool.close()
# pool.join()
# print(" Complete! (%0.3f seconds)" % (float(time())-float(timecheck)))
# Ditto the rest of the normalization helper functions, because the
# normalization necessary for EDA on this large a subset of GKG records is
# just too high, given how this field has so many values of varying and
# sometimes ridiculous length. If Pandas Profiling could be coerced to not
# care about allocating smaller data structures for columns of varying
# type, this wouldn't matter, because I could leave all but key column
# values as NaN, but Pandas wants to allocate massive amounts of empty
# memory structures, and when records total over 16 million and Pandas
# wants that many 64-bit float values, even if there's only 8 million real
# values in the column to work with, Pandas will create however many extra
# copies of those 8 million empty 64-bit float memory signatures, or at
# least demand them until the system can't help but except the process.
timecheck = time()
print("\n Splitting V1Themes lists and generating report...")
pool = multiprocessing.Pool(1)
# For themesReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A09'
booleanSuccess = pool.map(GDELTedaGKGhelpers.themesReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
timecheck = time()
print("\n Generating Persons report...")
pool = multiprocessing.Pool(1)
# For personsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A10'
booleanSuccess = pool.map(GDELTedaGKGhelpers.personsReport, [tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
timecheck = time()
print("\n Generating Organizations report...")
pool = multiprocessing.Pool(1)
# For organizationsReport code/documentation, See GDELTedaGKGhelpers.py,
# 'find' tag '# A11'
booleanSuccess = pool.map(GDELTedaGKGhelpers.organizationsReport,[tableDF])
pool.close()
pool.join()
print(" Complete! ( %0.3f s )" % (float(time())-float(timecheck)))
'''
print("All GKG EDA operations complete. Please check EDAlogs directories",
" for any resulting EDA profile reports.")
print("\n--------------------------------------------------------------\n")
# B05
def realtimeEDA(self, tableList = ['events','mentions','gkg']):
'''Performs automatic EDA on the latest GDELT datafiles for records
from Events/Mentions and GKG. This function is enabled by loopEDA() to
download a specified window of datafiles, or else just most-recent
datafiles if called by itself, or for a default loopEDA() call.
Current testing on recent GDELT updates confirms that this function
may complete all EDA processing on each datafile set well within the
fifteen minute window before each successive update.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting of operations to one or more tables.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
# using a tuple to track failed execution
return (False, 'tableList')
print("--------------------------------------------------------------\n")
print("Beginning realtime GDELT EDA collection for these tables:")
print(tableList)
# Decrementing remaining iterations to track 'last' run, in order to
# delay report generation until desired window is collected.
if self.realtimeWindow > 1:
self.realtimeWindow -= 1
self.realtimeLooping == True
elif self.realtimeWindow == 1:
self.realtimeWindow -= 1
lastRun = False
if self.realtimeWindow < 1:
lastRun = True
fileURLs = {
'events' : '',
'mentions' : '',
'gkg' : '',
}
priorURLs = {
'events' : '',
'mentions' : '',
'gkg' : '',
}
EDAFiles = {
'events' : [],
'mentions' : [],
'gkg' : [],
}
# Tracking function runtime
timecheckF = time()
# applicable for all tables
datetimeFormat = "%Y-%m-%dT%H:%M:%S.000000Z"
strptimeFormat = "%Y%m%d%H%M%S"
strftimeFormat = "%Y-%m-%dh%Hm%M"
# Downloading and parsing lastupdate.txt
# That text file consists of three lines, one for each main GDELT
# table's last datafile update. URLs/Filenames follow conventions used in
# GDELTbase download, cleaning, and MongoDB export functions, e.g. a base
# URL followed by 14 char numeric strings for the current UTC datetime at
# the resolution of seconds, at 15-minute intervals, followed by csv and
# zip extensions.
# As of 2021/09/08, that page is working and updates on the schedule
# described in GDELT docs. Exact update times likely vary with volume of
# current world news coverage, but should be at least every fifteen
# minutes, with all dates and times reported by UTC zone.
os.chdir(self.gBase.toolData['path']['base'])
print(" Checking http://data.gdeltproject.org/gdeltv2/lastupdate.txt...")
lastFilesURL = 'http://data.gdeltproject.org/gdeltv2/lastupdate.txt'
lastFiles = wget.download(lastFilesURL, 'lastupdate.txt')
with open(lastFiles) as lastupdate:
lines = lastupdate.readlines()
# Table order is reversed from most other loops in this project, here,
# because list.pop() pulls in reverse and I'm too short on time to bother
# juggling these strings more than necessary. Regardless, GKG is first in
# this order, distinct from most elsewhere.
for table in ['gkg', 'mentions', 'events']:
fileURLs[table] = lines.pop().split(' ')[-1].replace("\n", '')
# form a current datetime from lastupdate.txt strings
thisDatetime = \
datetime.strptime(fileURLs[table][37:51],
strptimeFormat).replace(tzinfo = timezone.utc)
# Form a 'last' datetime and string from the current lastupdates.txt
# datetime string. Worst-case is dead midnight UTC ( 5pm PST, 6pm MST,
# 8pm EST ) since the "last" file before then will be an irregular amount
# of time prior: 00:00 UTC - 22:45 UTC = -1 hour 15 minutes
if thisDatetime.hour == 0 and thisDatetime.minute == 0:
lastDatetime = lastDatetime - timedelta(hours = 1)
lastDatetime = (thisDatetime - timedelta(minutes = 15))
lastDatestring = lastDatetime.strftime(strptimeFormat)
# First-run datetime, timedelta, and string juggling for generating
# last-most-recent URLS for download.
for table in ['gkg', 'mentions', 'events']:
priorURLs[table] = ''.join([self.gBase.toolData['URLbase'],
lastDatestring, '.',
self.gBase.toolData['extensions'][table]])
# Shouldn't apply for first run, since no last/next file is set yet, and
# shouldn't matter for the last run, since self.realtimeWindow running out
# will halt execution in loopEDA() anyway.
if self.lastRealDatetime != '' and self.nextRealDatetime != '':
if thisDatetime == self.lastRealDatetime:
print("\n----------------------------------------------------------\n")
print("Isn't %s the same as %s ? Too early! No new update yet!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooEarly')
elif thisDatetime > self.nextRealDatetime:
print("\n----------------------------------------------------------\n")
print("%s is a little later than %s . Too late! We missed one!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooLate')
print(" URLs acquired:\n")
print("current:")
pp(fileURLs)
print("prior:")
pp(priorURLs)
print("Beginning per-table operations...\n")
for table in tableList:
# B05a - every-table operations
# Note that order of execution for all tables will be project-typical.
# Tracking per-table loop times
timecheckT = time()
print("Trying downloading and cleaning for most recent", table,
"file...")
# making use of alternate-mode functionality for GDELTbase methods.
thisDL = self.gBase.downloadGDELTFile(fileURLs[table], table,
verbose = True, mode = 'realtime')
# Matching the same input formatting requirements, typically performed
# in the 'table' versions of GDELTbase methods
fileName = fileURLs[table].replace(self.gBase.toolData['URLbase'], '')
fileName = fileName.replace('.zip', '')
# cleaning the file (exported to realtimeClean as .json)
thisClean = self.gBase.cleanFile(fileName, verbose = True,
mode = 'realtime')
# tracking prior URLs, still, might delete this section
lastFileName = priorURLs[table].replace(self.gBase.toolData['URLbase'], '')
lastFileName = lastFileName.replace('.zip', '')
# GKG still has different extensions...
if table == 'gkg':
cleanFileName = fileName.replace('.csv', '.json')
cleanLastFileName = lastFileName.replace('.csv', '.json')
else:
cleanFileName = fileName.replace('.CSV', '.json')
cleanLastFileName = lastFileName.replace('.CSV', '.json')
# Each iterative run of this function will add another most-recent
# datafile, so long as it hasn't already been collected and cleaned, but
# the first run should wipe per-table collections before populating 'em
# with records.
if not self.realtimeStarted:
print(" Dropping any old realtime GDELT MongoDB collection...")
self.gBase.localDb['collections']['realtime'][table].drop()
print("Starting MongoDB export for acquired file...")
thisMongo = self.gBase.mongoFile(cleanFileName, table, verbose = True,
mode = 'realtime')
print('')
# Permitting delay of report generation for N iterations
if lastRun:
pass
# bails on this loop iteration if not final realtimeEDA() iteration
else:
continue
# If lastRun == True, EDA processing will be executed in this iteration
# for any records in the 'realtime' MongoDB collection for this table.
print("Beginning EDA processing...")
# switching to table-appropriate logPath directory...
os.chdir(self.logPath[table]['realtime'])
# B05b - Events/Mentions handling
# Per-table records querying, DataFrame shaping, and Pandas Profiling
# EDA ProfileReport() generation.
if table == 'events' or table == 'mentions':
timecheckG = time()
print("\n Loading", table, "realtimeEDA files held locally...",
end = '')
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names'][table]['reduced'])
print(" ")
print(" Setting dtypes...")
thisDF = thisDF.astype(
dtype = self.gBase.toolData['columnTypes'][table],
copy = False,
)
print(" Converting datetimes...")
if table == 'events':
datetimeField = 'DATEADDED'
# mentions has an extra datetime field, 'EventTimeDate', converted here
if table == 'mentions':
datetimeField = 'MentionTimeDate'
thisDF['EventTimeDate'] = pd.to_datetime(thisDF['EventTimeDate'],
format = datetimeFormat)
thisDF[datetimeField] = pd.to_datetime(thisDF[datetimeField],
format = datetimeFormat)
print("\n ", table, "DataFrame .info():\n")
print(thisDF.info(),'\n')
edaDateString = thisDF[datetimeField].min().strftime(strftimeFormat)
if table == 'events':
configName = "GDELTeventsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Events_realtime_EDA_", edaDateString,
".html"])
if table == 'mentions':
configName = "GDELTmentionsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Mentions_realtime_EDA_", edaDateString,
".html"])
print(" File to output:", edaLogName)
profile = ProfileReport(thisDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
del profile
del thisDF
print('')
print('------------------------------------------------------------\n')
# B05c - GKG handling
if table == 'gkg':
print("\n Pulling any", table, "realtime EDA files...", end = '')
timecheckG = time()
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names']['gkg']['reduced'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
# Reusing GDELTedaGKGhelpers.py functions, since they'll work
# in this context. See that file for code and documentation.
timecheckG = time()
print(" Applying initial dtypes...", end = '')
thisDF = GDELTedaGKGhelpers.applyDtypes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Converting datetimes...", end = '')
thisDF = GDELTedaGKGhelpers.convertDatetimes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
edaDateString = thisDF['V21DATE'].min().strftime(strftimeFormat)
timecheckG = time()
print(" Splitting and forming columns from V15Tone...")
thisDF = GDELTedaGKGhelpers.convertGKGV15Tone(thisDF)
print(" ( took %0.3f s )" % (float(time()) - float(timecheckG)))
# B05d - GKG non-variable-length EDA generation
# Isolating main columns for their own EDA, dropping variable-length
# columns for copy (not inplace).
timecheckG = time()
print(" Starting EDA generation for main GKG columns only...", end='')
mainDF = thisDF.drop(columns = ['V1Locations',
'V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
print(" ( drop/copy: %0.3f s )" % (float(time()) - float(timecheckG)))
print("\n GKG main columns DataFrame .info():\n")
print(mainDF.info())
print('')
# constructing EDA output filename
configName = "GDELTgkgMainEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_main_EDA_", edaDateString,
".html"])
# Generating non-variable-length-subfield column EDA
print("\n File to output:", edaLogName)
profile = ProfileReport(mainDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del profile
del mainDF
print(" Continuing processing with separate normalization of each",
"variable-length subfield...\n")
# B05e - V1Locations EDA generation
timecheckG = time()
print(" Exploding V1Locations...", end = '')
locationDF = thisDF.drop(columns = ['V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
locationDF = locationDF.explode('V1Locations')
print(" ( drop/explode: %0.3f s )" % \
(float(time()) - float(timecheckG)))
timecheckG = time()
print(" Normalizing V1Locations...", end = '')
subcols = pd.json_normalize(locationDF['V1Locations'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Renaming columns, dropping old, rejoining, astyping...",
end = '')
subcols.columns = [f"V1Locations_{c}" for c in subcols.columns]
locationDF = locationDF.drop(columns = ['V1Locations']).join(
subcols).astype({'V1Locations_FullName' : pd.StringDtype(),
'V1Locations_CountryCode' : pd.StringDtype(),
'V1Locations_ADM1Code' : | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 World Charts
# <NAME>, 2020
# In[1]:
"""
LICENSE MIT
2020
<NAME>
Website : http://www.guillaumerozier.fr
Mail : <EMAIL>
This file contains scripts that download data from CSSE (John Hopkins) Github Repository and then process it to build many graphes.
I'm currently cleaning the code, please come back soon it will be easier to read and edit it!
The charts are exported to 'charts/images/'.
Data is download to/imported from 'data/'.
"""
# In[2]:
import requests
import random
from tqdm import tqdm
import json
from datetime import date
from datetime import datetime
import numpy as np
import math
import sys
import chart_studio
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly
from plotly.subplots import make_subplots
import chart_studio.plotly as py
import sys
import matplotlib.pyplot as plt
from plotly.validators.scatter.marker import SymbolValidator
colors = px.colors.qualitative.D3 + plotly.colors.DEFAULT_PLOTLY_COLORS + px.colors.qualitative.Plotly + px.colors.qualitative.Dark24 + px.colors.qualitative.Alphabet
#If you want to uplaod charts to your Plotly account (and switch "upload" to True just below):
#chart_studio.tools.set_credentials_file(username='', api_key='')
PATH = "../../"
today = datetime.now().strftime("%Y-%m-%d %H:%M")
"build : " + today
# If you want to display charts here, please change "show" variable to True:
# In[3]:
upload = False
show = False
export = True
# In[4]:
if len(sys.argv) >= 2:
if (sys.argv[1]).lower() == "true":
upload = True
if len(sys.argv) >= 3:
if (sys.argv[2]).lower() == "true":
show = True
if len(sys.argv) >= 4:
if (sys.argv[3]).lower() == "true":
export = True
"build : " + today
# ## Functions
# In[5]:
def compute_offset(df, col_of_reference, col_to_align, countries):
diffs = []
for offset in range(len(df)-15):
a = df[col_of_reference][1:].shift(offset, fill_value=0)/countries[col_of_reference]["pop"]
b = df[col_to_align][1:]/countries[col_to_align]["pop"]
if len(a) > len(b):
a = a[:-2]
m = min(len(a), len(b))
delta = ((a[offset:] - b[offset:])**2)**(1/2)
diffs.append(abs(delta.sum()))
xa = [i for i in range(offset, len(a))]
xb = [i for i in range(offset, len(b))]
ret = diffs.index(min(diffs))
if col_of_reference == col_to_align:
return 0
return ret
#
# ## DATA
# #### Download data
# In[6]:
def download_data():
#url_confirmed = "https://cowid.netlify.com/data/total_cases.csv"
#url_deaths = "https://cowid.netlify.com/data/total_deaths.csv"
url_confirmed_csse = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
url_deaths_csse = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
url_france_data = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv"
#r_confirmed = requests.get(url_confirmed)
#r_deaths = requests.get(url_deaths)
r_confirmed_csse = requests.get(url_confirmed_csse)
r_deaths_csse = requests.get(url_deaths_csse)
r_france_data = requests.get(url_france_data)
#with open('data/total_cases_who.csv', 'wb') as f:
#f.write(r_confirmed.content)
#with open('data/total_deaths_who.csv', 'wb') as f:
#f.write(r_deaths.content)
with open(PATH+'data/total_cases_csse.csv', 'wb') as f:
f.write(r_confirmed_csse.content)
with open(PATH+'data/total_deaths_csse.csv', 'wb') as f:
f.write(r_deaths_csse.content)
with open(PATH+'data/france_data.csv', 'wb') as f:
f.write(r_france_data.content)
print("> data downloaded")
#"build : " + today
# #### Import data and merge
# In[7]:
def import_files():
# CSSE data
df_confirmed_csse = pd.read_csv(PATH+'data/total_cases_csse.csv')
df_deaths_csse = pd.read_csv(PATH+'data/total_deaths_csse.csv')
# WHO data
#df_confirmed_who = pd.read_csv('data/total_cases_who.csv')
#df_deaths_who = pd.read_csv('data/total_deaths_who.csv')
# Perso data
df_confirmed_perso = pd.read_csv(PATH+'data/total_cases_perso.csv')
df_deaths_perso = pd.read_csv(PATH+'data/total_deaths_perso.csv')
df_france_data = | pd.read_csv(PATH+'data/france_data.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# school-bot-demo
# All doxxing information has been removed.
#Image-------------------------------------------------------------------------
import re
#try:
# from PIL import Image
#except ImportError:
# import Image
#import pytesseract
#
#pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#
#def readimage(imagepath):
# return(pytesseract.image_to_string(Image.open(imagepath)))
#
#
#def findclasses(theschedule):
# person = []
# for i in range(len(classdata)):
# try:
# m = re.search(classdata['Key'][i], theschedule.lower())
# if m:
# person.append(i)
# except AttributeError:
# continue
# if 7 in person and 18 in person:
# person.remove(7)
# return person
#Data--------------------------------------------------------------------------
import pandas as pd
botpath = ''
#botpath = './'
#botpath = ''
#botpath = ''
classdata = pd.read_csv(botpath + 'classes.csv')
classdata = classdata.set_index('ID')
usrdata = pd.read_csv(botpath + 'users.csv')
graderole = {'6': '6th Grade', '7': '7th Grade', '8': '8th Grade', '9': 'Freshman', '10': 'Sophomore', '11': 'Junior', '12': 'Senior', '13': 'Graduate', '14': 'Teacher'}
guestStatus = {0 : "Not in SCHOOL", 1 : "SCHOOL 1", 2 : "SCHOOL 2", 3 : "Other SCHOOL", '0' : "Not in SCHOOL", '1' : "SCHOOL 1", '2' : "SCHOOL 2", '3' : "Other SCHOOL"}
#Register----------------------------------------------------------------------
async def Register(user):
global usrdata
issues = 0
print(datetime.datetime.now(), "Registering", user.name)
await user.send("Welcome to the SCHOOL 1 discord (unofficial)! You may say 'cancel' at any point to exit and '" + prefix + "register' to retry.")
embed = discord.Embed(title = "Are you currently in SCHOOL? (Graduates included)", description = "0: Not in SCHOOL\n1: In SCHOOL 1\n2: SCHOOL 2\n3: Other SCHOOL School", color = discord.Color.dark_purple())
chooseGuest = await user.send(embed = embed)
emojilist = [str(i) + "\N{combining enclosing keycap}" for i in range(0,4)]
for i in emojilist:
await chooseGuest.add_reaction(i)
def check2(reaction, person):
nonlocal emojilist
return person == user and str(reaction) in emojilist
try:
reaction, _ = await client.wait_for('reaction_add', timeout = 600.0, check = check2)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at choose from list")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
guest = str(reaction)[0]
await user.send("What is your real name? (First and last, if you would not like to give your name say 'Anonymous')")
print(datetime.datetime.now(), user.name, "on step name")
while True:
def check(m):
return m.guild == None and m.author == user
try:
msg = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at name")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at name")
return None
elif ''.join(re.split(' |-|,', msg.content)).isalpha():
irlname = msg.content.lower()
break
else:
await user.send("Please only use letters a-z in your name. Enter your name again and contact an admin if you continue having issues.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at name")
continue
await user.send("Now, please say your grade (number 6-12, graduate = 13, teacher = 14)")
print(datetime.datetime.now(), user.name, "on step grade")
while True:
try:
msg2 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at grade")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg2.content in graderole:
grade = msg2.content
break
elif msg2.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at grade")
return None
else:
await user.send("Please only use numbers 6-14 in your grade. Enter your grade again and contact an admin if you continue having issues.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at grade")
continue
if guest == "1":
await user.send("Great, now begin to list your classes one by one (most abbreviations are allowed) or send a picture of your schedule (Coming soon!) and say 'done' when you are done. (Say done now to skip) (For precalc use 'pre-calc')")
print(datetime.datetime.now(), user.name, "on step classes")
listofclasses = []
while True:
if listofclasses:
embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
embed.set_footer(text = "Continue listing your classes and say 'done' when all of your classes are on this list")
embed.set_thumbnail(url = user.avatar_url)
await user.send(embed = embed)
try:
msg3 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at classes")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg3.attachments:
await user.send("Feature not implemented yet, please list your classes through text.")
continue
# await user.send("Reading schedule...")
# await msg3.attachments[0].save(botpath + 'Saved/sched_' + user.name + '.png')
# print(datetime.datetime.now(), "Saved schedule from", user.name, "as sched_" + user.name + ".png")
# classes = pytesseract.image_to_string(Image.open(botpath + 'Saved/sched_' + user.name + '.png'))
# listofclasses.append(findclasses(classes))
# if len(listofclasses) >= 7:
# embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
# embed.set_thumbnail(url = user.avatar_url)
# await user.send(embed = embed)
# await user.send("Is this correct?")
# try:
# msg4 = await client.wait_for('message', timeout = 60.0, check = check)
# except asyncio.TimeoutError:
# print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
# await user.send("Registration failed. You may do " + prefix + "register to retry.")
# return None
# if msg4.content.lower().startswith("y"):
# listofclasses.sort()
# usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade]}), sort = False, ignore_index = True)
# usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
# usrdata = pd.read_csv(botpath + 'users.csv')
# print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
# break
# elif msg4.content.lower() == "cancel":
# await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
# print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at image (Check classes)")
# return None
# else:
# await user.send("Please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (incorrect classes)")
# continue
# else:
# await user.send("Only found " + str(len(listofclasses)) + " classes, please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (too few classes - " + str(len(listofclasses)) + ")")
# continue
elif msg3.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at classes (send)")
return None
elif msg3.content.lower() == "done":
if len(listofclasses) >= 7:
listofclasses.sort()
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
break
elif listofclasses:
await user.send("You have only added " + str(len(listofclasses)) + " classes, are you sure?")
try:
msg4 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg4.content.lower().startswith("y"):
listofclasses.sort
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
break
elif msg4.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at classes (Check classes)")
return None
else:
await user.send("Please continue listing classes one by one and say 'done' when all of your classes are added.")
continue
else:
await user.send("No classes added. Are you sure you would like to continue without adding your classes?")
try:
msg4 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg4.content.lower().startswith("y"):
listofclasses = [0]
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':['[0]'], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Registered", user.name, "without classes in users.csv and", issues, "issues")
break
elif msg4.content.lower() == "cancel":
await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at classes (Check classes)")
return None
else:
await user.send("Please continue listing classes one by one and say 'done' when all of your classes are added.")
continue
else:
classmatches = []
for i in range(len(classdata)):
matches = 0
for word in msg3.content.lower().split(" "):
if word == "i":
word = "1"
elif word == "ii":
word = "2"
elif word == "iii":
word = "3"
classname = classdata['Name'][i].lower().split(" ")
for part in range(len(classname)):
if classname[part] == "i":
classname[part] = "1"
elif classname[part] == "ii":
classname[part] = "2"
elif classname[part] == "iii":
classname[part] = "3"
classname = ''.join([i + " " for i in classname])[:-1]
if word in classname:
matches += 1
if matches == len(msg3.content.split(" ")):
classmatches.append(i)
if len(classmatches) == 0:
await user.send("Class " + msg3.content + " not found, please try again. Write the class as it is written on the schedule, but abbreviations such as 'honors chem' and 'ap lang' are allowed.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at listclasses (class not found - " + msg3.content + ")")
continue
elif len(classmatches) == 1:
await user.send("Found class " + classdata['Name'][classmatches[0]] + ", is this correct?")
try:
msg4 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at choose from list")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if msg4.content.lower().startswith("y"):
listofclasses.append(classmatches[0])
await user.send("Class " + classdata['Name'][classmatches[0]] + " added to your schedule.")
continue
else:
await user.send("Please try again. Write the class as it is written on the schedule, but abbreviations such as 'honors chem' and 'ap lang' are allowed.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at listclasses (incorrect classes)")
continue
elif len(classmatches) > 8:
await user.send("Found " + str(len(classmatches)) + " matches, please be more specific.")
else:
embed = discord.Embed(title = "Multiple classes found, please select the correct one by number:", description = "0: None of these\n" + ''.join([str(j + 1) + ": " + classdata['Name'][classmatches[j]] + "\n" for j in range(len(classmatches))]), color = discord.Color.dark_purple())
chooseclass = await user.send(embed = embed)
emojilist = ['0\N{combining enclosing keycap}'] + [str(i + 1) + '\N{combining enclosing keycap}' for i in range(len(classmatches))]
for i in emojilist:
await chooseclass.add_reaction(i)
def check2(reaction, person):
nonlocal emojilist
return person == user and str(reaction) in emojilist
try:
reaction, _ = await client.wait_for('reaction_add', timeout = 300.0, check = check2)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at choose from list")
await user.send("Registration failed. You may do " + prefix + "register to retry.")
return None
if str(reaction)[0] == "0":
await user.send("Please try again. Write the class as it is written on the schedule, but abbreviations such as 'honors chem' and 'ap lang' are allowed.")
issues += 1
print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at listclasses (incorrect classes)")
continue
else:
listofclasses.append(classmatches[int(str(reaction)[0]) - 1])
await user.send("Class " + classdata['Name'][classmatches[int(str(reaction)[0]) - 1]] + " added to your schedule.")
continue
else:
listofclasses = [0]
usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':['[0]'], 'IRL' : [irlname], 'Grade' : [grade], 'Guest' : [guest]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Registered", user.name, "without classes in users.csv and", issues, "issues")
if guest == "0":
await discord.utils.find(lambda m: m.id == user.id, schoolserver.members).add_roles(discord.utils.get(schoolserver.roles, name = "Not in SCHOOL"))
elif guest == "2":
await discord.utils.find(lambda m: m.id == user.id, schoolserver.members).add_roles(discord.utils.get(schoolserver.roles, name = "SCHOOL 2"))
elif guest == "3":
await discord.utils.find(lambda m: m.id == user.id, schoolserver.members).add_roles(discord.utils.get(schoolserver.roles, name = "Other SCHOOL"))
elif guest == "1":
await discord.utils.find(lambda m: m.id == user.id, schoolserver.members).add_roles(discord.utils.get(schoolserver.roles, name = graderole[grade]))
await user.send("Thank you for registering! Your info is now visible through the .userinfo (user) command and you will be given access to the proper channels")
await editwhois()
#Discord-----------------------------------------------------------------------
import asyncio
#import nest_asyncio
#nest_asyncio.apply()
import datetime
import discord
from discord.ext import commands
prefix = "."
client = commands.Bot(command_prefix = prefix)
client.remove_command('help')
schoolserver = ''
whoischannel = ''
@client.event
async def on_ready():
print(datetime.datetime.now(), "Connected as", client.user)
await client.change_presence(activity = discord.Game(".register to be added!"))
global schoolserver, whoischannel
schoolserver = client.get_guild(InsertID)
whoischannel = schoolserver.get_channel(InsertID)
global teacherlist, graduatelist, seniorlist, juniorlist, sophomorelist, freshmanlist, eighthlist, seventhlist, sixthlist, school2list, otherschoollist, notinschoollist
teacherlist = await whoischannel.fetch_message(InsertID)
graduatelist = await whoischannel.fetch_message(InsertID)
seniorlist = await whoischannel.fetch_message(InsertID)
juniorlist = await whoischannel.fetch_message(InsertID)
sophomorelist = await whoischannel.fetch_message(InsertID)
freshmanlist = await whoischannel.fetch_message(InsertID)
eighthlist = await whoischannel.fetch_message(InsertID)
seventhlist = await whoischannel.fetch_message(InsertID)
sixthlist = await whoischannel.fetch_message(InsertID)
school2list = await whoischannel.fetch_message(InsertID)
otherschoollist = await whoischannel.fetch_message(InsertID)
notinschoollist = await whoischannel.fetch_message(InsertID)
@client.event
async def on_member_join(member):
print(datetime.datetime.now(), member.name, "joined, attempting to register")
if 'a' + str(member.id) in usrdata.values:
print(datetime.datetime.now(), "Not registering", member.name + ", already registered")
else:
await Register(member)
@client.event
async def on_member_remove(member):
print(datetime.datetime.now, member.name, "left, attempting to remove from data")
global usrdata
if 'a' + str(member.id) in usrdata.values:
usrdata = usrdata.set_index('User')
usrdata = usrdata.drop('a' + str(member.id), axis = 0)
usrdata.to_csv(botpath + 'users.csv', encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Deleted info for", member.name, "from users.csv")
await editwhois()
else:
print(datetime.datetime.now, member.name, "was not registered")
@client.command()
async def ping(ctx):
await ctx.send("Pong! (Latency: " + str(round(client.latency * 1000, 1)) + " ms)")
print(datetime.datetime.now(), "Pinged by", ctx.author.name, ", latency was", str(round(client.latency * 1000, 1)), "ms")
@client.command()
async def reloadclasses(ctx):
print(datetime.datetime.now(), ctx.author.name, "did command reloadclasses")
global classdata
if ctx.message.author.guild_permissions.administrator:
classdata = pd.read_csv(botpath + 'classes.csv')
classdata = classdata.set_index('ID')
await ctx.send("Reloaded classes.csv")
print(datetime.datetime.now(), "Reloaded classes.csv")
else:
print(datetime.datetime.now(), "Didn't reload, insufficient permissions")
await ctx.send("You do not have permissions for this command!")
@client.command()
async def reloadusers(ctx):
print(datetime.datetime.now(), ctx.author.name, "did command reloadusers")
global usrdata
if ctx.message.author.guild_permissions.administrator:
usrdata = pd.read_csv(botpath + 'users.csv')
await ctx.send("Reloaded users.csv")
print(datetime.datetime.now(), "Reloaded users.csv")
else:
print(datetime.datetime.now(), "Didn't reload, insufficient permissions")
await ctx.send("You do not have permissions for this command!")
@client.command()
async def register(ctx, args = ''):
if args and ctx.message.author.guild_permissions.administrator:
try:
user = ctx.message.mentions[0]
await ctx.send("Messaged " + user.name)
except IndexError:
user = ctx.message.author
else:
user = ctx.message.author
print(datetime.datetime.now(), ctx.author.name, "did command register for", user.name)
if 'a' + str(user.id) in usrdata.values:
if user == ctx.message.author:
await ctx.send("Your info has already been saved! Use " + prefix + "delinfo to change it.")
else:
await ctx.send(user.name, "has already been registered!")
print(datetime.datetime.now(), "Not registering", user.name + ", already registered")
else:
if ctx.guild:
if user == ctx.message.author:
await ctx.send("You have been messaged, please answer the messages through DM")
elif user != ctx.message.author:
await ctx.send(user, "has been messaged.")
await Register(user)
@client.command()
async def delinfo(ctx, args = ''):
if ctx.message.author.guild_permissions.administrator:
try:
user = ctx.message.mentions[0]
except IndexError:
user = ctx.message.author
global usrdata
print(datetime.datetime.now(), ctx.author.name, "did command delinfo for", user)
if 'a' + str(user.id) in usrdata.values:
if user == ctx.message.author:
await ctx.send("Are you sure you want to delete your info? This cannot be undone, and you will have to re-do .register")
else:
await ctx.send("Are you sure you want to delete info for " + user.name + "? This cannot be undone.")
def check(m):
return m.channel == ctx.channel and m.author == ctx.author
try:
msg = await client.wait_for('message', check = check, timeout = 60.0)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Delinfo for", user.name, "failed: Timed out")
await ctx.send("Delinfo failed. You may do " + prefix + "delinfo to retry.")
return None
if msg.content.lower().startswith("y"):
await ctx.send("Deleting info...")
usrdata = usrdata.set_index('User')
usrdata = usrdata.drop('a' + str(user.id), axis = 0)
usrdata.to_csv(botpath + 'users.csv', encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
await ctx.send("Deleted info.")
print(datetime.datetime.now(), "Deleted info for", user.name, "from users.csv")
await editwhois()
else:
if user == ctx.message.author:
await ctx.send("Alright, I won't delete your info.")
else:
await ctx.send("Alright, I won't delete " + user.name + "'s info.")
else:
if user == ctx.message.author:
await ctx.send("You don't have your info saved! Use " + prefix + "register to add your info.")
else:
await ctx.send(user.name + " doesn't have their info saved!")
else:
print(datetime.datetime.now(), ctx.author.name, "did command delinfo, no permissions")
await ctx.send("You do not have permissions for this command!")
@client.command()
async def userinfo(ctx, arg = ""):
if arg:
try:
user = ctx.message.mentions[0]
except IndexError:
user = ctx.message.author
else:
user = ctx.message.author
print(datetime.datetime.now(),ctx.author.name, "did command userinfo for", user.name)
if 'a' + str(user.id) in usrdata.values:
for i in range(len(usrdata)):
if usrdata['User'][i] == 'a' + str(user.id):
embed = discord.Embed(color = discord.Color.dark_purple())
embed.set_author(name = "Info for " + user.name + ":", icon_url = user.avatar_url)
embed.add_field(name = "Name:", value = usrdata['IRL'][i].title(), inline = True)
embed.add_field(name = "Grade:", value = usrdata['Grade'][i], inline = True)
embed.add_field(name = "SCHOOL Status:", value = guestStatus[usrdata['Guest'][i]], inline = False)
embed.add_field(name = "Classes:", value = ''.join([classdata.loc[int(j)]['Name'] + "\n" for j in usrdata['Classes'][i][1:-1].split(', ')]), inline = False)
embed.set_thumbnail(url = user.avatar_url)
await ctx.send(embed = embed)
else:
if user == ctx.message.author:
await ctx.send("You are not registered! Use " + prefix + "register to add your info.")
else:
await ctx.send(user.name + " is not registered! Use " + prefix + "info to add your info.")
@client.command()
async def rawuserinfo(ctx, arg = ""):
if arg:
try:
user = ctx.message.mentions[0]
except IndexError:
user = ctx.message.author
else:
user = ctx.message.author
print(datetime.datetime.now(),ctx.author.name, "did command rawuserinfo for", user.name)
if 'a' + str(user.id) in usrdata.values:
for i in range(len(usrdata)):
if usrdata['User'][i] == 'a' + str(user.id):
await ctx.send(usrdata['User'][i] + ", " + str(usrdata['Guest'][i]) + ", " + str(usrdata['Grade'][i]) + ", " + str(usrdata['Classes'][i]) + ", "+ usrdata['IRL'][i])
else:
if user == ctx.message.author:
await ctx.send("You are not registered! Use " + prefix + "register to add your info.")
else:
await ctx.send(user.name + " is not registered! Use " + prefix + "info to add your info.")
@client.command()
async def getroles(ctx):
print(datetime.datetime.now(), ctx.author.name, "did command getroles")
if 'a' + str(ctx.author.id) in usrdata.values:
for i in range(len(usrdata)):
if usrdata['User'][i] == 'a' + str(ctx.author.id):
if int(usrdata['Guest'][i]) == 1:
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name = graderole[usrdata['Grade'][i]]))
else:
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name = guestStatus[usrdata['Guest'][i]]))
else:
await ctx.send("You are not registered! Use " + prefix + "register to add your info.")
# @client.command()
# async def listusers(ctx):
# print(datetime.datetime.now(), ctx.author.name, "did command listusers")
# users = []
# for i in range(len(usrdata)):
# users.append(discord.utils.find(lambda m: m.id == int(usrdata['User'][i][1:]), schoolserver.members).mention + " - " + usrdata['IRL'][i].title())
# embed = discord.Embed(title = "Registered Users:", description = ''.join([j + "\n" for j in users]), color = discord.Color.dark_purple())
# embed.set_footer(text = "Total number of users: " + str(len(usrdata)))
# await ctx.send(embed = embed)
@client.command()
async def listclasses(ctx):
if ctx.message.author.guild_permissions.administrator:
print(datetime.datetime.now(), ctx.author.name, "did command listclasses")
classes = []
for i in range(1, int(len(classdata)/2)):
classes.append(classdata['Name'][i])
embed = discord.Embed(title = "Classes:", description = ''.join([", " + j for j in classes])[2:], color = discord.Color.dark_purple())
embed.set_footer(text = "Total number of classes: " + str(len(classdata) - 1))
await ctx.send(embed = embed)
classes = []
for i in range(int(len(classdata)/2), len(classdata)):
classes.append(classdata['Name'][i])
embed = discord.Embed(title = "Classes:", description = ''.join([", " + j for j in classes])[2:], color = discord.Color.dark_purple())
embed.set_footer(text = "Total number of classes: " + str(len(classdata) - 1))
await ctx.send(embed = embed)
else:
print(datetime.datetime.now(), ctx.author.name, "did command listclasses, no permissions")
await ctx.send("You do not have permissions for this command")
@client.command()
async def edit(ctx, name = '', change = '', *args):
if ctx.message.author.guild_permissions.administrator:
print(datetime.datetime.now(), ctx.author.name, "did command edit")
if name and change and args:
if change.lower() == "classes":
to_change = 1
elif change.lower() == "irl" or change.lower() == "name":
to_change = 2
elif change.lower() == "grade":
to_change = 3
elif change.lower() == "guest":
to_change = 4
else:
await ctx.send("Invalid syntax: use " + prefix + "edit (user) (field) (value)")
print(datetime.datetime.now(), ctx.author.name, "did command edit, invalid syntax")
return None
try:
user = ctx.message.mentions[0]
except IndexError:
await ctx.send("Invalid syntax: use " + prefix + "edit (user) (field) (value)")
print(datetime.datetime.now(), ctx.author.name, "did command edit, invalid syntax")
return None
global usrdata
for i in range(len(usrdata)):
if 'a' + str(user.id) == usrdata['User'][i]:
person = [usrdata['User'][i], usrdata['Classes'][i], usrdata['IRL'][i], usrdata['Grade'][i], usrdata['Guest'][i]]
await user.remove_roles(discord.utils.get(schoolserver.roles, name = graderole[str(person[3])]))
await user.remove_roles(discord.utils.get(schoolserver.roles, name = guestStatus[str(person[4])]))
if to_change == 2 or to_change == 1:
person[to_change] = "".join([" " + i for i in args])[1:]
else:
person[to_change] = args[0]
usrdata = usrdata.set_index('User')
usrdata = usrdata.drop('a' + str(user.id), axis = 0)
usrdata.to_csv(botpath + 'users.csv', encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
usrdata = usrdata.append(pd.DataFrame({'User' : [person[0]], 'Classes' : [person[1]], 'IRL' : [person[2]], 'Grade' : [person[3]], 'Guest' : [person[4]]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
if person[4] == "0":
await user.add_roles(discord.utils.get(schoolserver.roles, name = "Not in SCHOOL"))
elif person[4] == "2":
await user.add_roles(discord.utils.get(schoolserver.roles, name = "SCHOOL 2"))
elif person[4] == "3":
await user.add_roles(discord.utils.get(schoolserver.roles, name = "Other SCHOOL"))
elif person[4] == "1":
await user.add_roles(discord.utils.get(schoolserver.roles, name = graderole[str(person[3])]))
print(datetime.datetime.now(), "Updated", user.name, "in users.csv")
embed = discord.Embed(color = discord.Color.dark_purple())
embed.set_author(name = "Info for " + user.name + ":", icon_url = user.avatar_url)
embed.add_field(name = "Name:", value = person[2].title(), inline = True)
embed.add_field(name = "Grade:", value = person[3], inline = True)
embed.add_field(name = "SCHOOL Status:", value = guestStatus[person[4]], inline = False)
embed.add_field(name = "Classes:", value = ''.join([classdata.loc[int(j)]['Name'] + "\n" for j in person[1][1:-1].split(', ')]), inline = False)
embed.set_thumbnail(url = user.avatar_url)
await ctx.send("Updated info for " + user.name, embed = embed)
break
await editwhois()
else:
await ctx.send("Invalid syntax: use " + prefix + "edit (user) (field) (value)")
print(datetime.datetime.now(), ctx.author.name, "did command edit, invalid syntax")
else:
print(datetime.datetime.now(), ctx.author.name, "did command edit, no permissions")
await ctx.send("You do not have permissions for this command")
@client.command()
async def addclasses(ctx):
print(datetime.datetime.now(), ctx.author.name, "did command addclasses")
await ctx.send("You have been messaged, please answer the messages through DM")
user = ctx.message.author
await user.send("Begin to list your classes one by one (most abbreviations are allowed) or send a picture of your schedule (Coming soon!) and say 'done' when you are done. (For precalc use 'pre-calc')")
listofclasses = []
issues = 0
global usrdata
while True:
if listofclasses:
embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
embed.set_footer(text = "Continue listing your classes and say 'done' when all of your classes are on this list")
embed.set_thumbnail(url = user.avatar_url)
await user.send(embed = embed)
def check(m):
return m.guild == None and m.author == user
try:
msg3 = await client.wait_for('message', timeout = 300.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Addclasses for", user.name, "failed: Timed out at classes")
await user.send("Addclasses failed. You may do " + prefix + "addclasses to retry.")
return None
if msg3.attachments:
await user.send("Feature not implemented yet, please list your classes through text.")
continue
# await user.send("Reading schedule...")
# await msg3.attachments[0].save(botpath + 'Saved/sched_' + user.name + '.png')
# print(datetime.datetime.now(), "Saved schedule from", user.name, "as sched_" + user.name + ".png")
# classes = pytesseract.image_to_string(Image.open(botpath + 'Saved/sched_' + user.name + '.png'))
# listofclasses.append(findclasses(classes))
# if len(listofclasses) >= 7:
# embed = discord.Embed(title = "Classes for " + user.name + ":", description = ''.join([classdata.loc[i]['Name'] + "\n" for i in listofclasses]), color = discord.Color.dark_purple())
# embed.set_thumbnail(url = user.avatar_url)
# await user.send(embed = embed)
# await user.send("Is this correct?")
#
# try:
# msg4 = await client.wait_for('message', timeout = 60.0, check = check)
# except asyncio.TimeoutError:
# print(datetime.datetime.now(), "Registration for", user.name, "failed: Timed out at check classes")
# await user.send("Registration failed. You may do " + prefix + "register to retry.")
# return None
# if msg4.content.lower().startswith("y"):
# listofclasses.sort()
# usrdata = usrdata.append(pd.DataFrame({'User':['a' + str(user.id)], 'Classes':[str(listofclasses)], 'IRL' : [irlname], 'Grade' : [grade]}), sort = False, ignore_index = True)
# usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
# usrdata = pd.read_csv(botpath + 'users.csv')
# print(datetime.datetime.now(), "Registered", user.name, "with classes in users.csv and", issues, "issues")
# break
# elif msg4.content.lower() == "cancel":
# await user.send("Cancelled registration. You may do " + prefix + "register to retry.")
# print(datetime.datetime.now(), "User", user.name, "cancelled registration with", issues, "issues at image (Check classes)")
# return None
# else:
# await user.send("Please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (incorrect classes)")
# continue
# else:
# await user.send("Only found " + str(len(listofclasses)) + " classes, please send a better image or say no to skip adding classes. You may contact an admin if you continue having issues.")
# issues += 1
# print(datetime.datetime.now(), "User", user.name, "had issue", issues, "with register at image (too few classes - " + str(len(listofclasses)) + ")")
# continue
elif msg3.content.lower() == "cancel":
await user.send("Cancelled addclasses. You may do " + prefix + "addclasses to retry.")
print(datetime.datetime.now(), "User", user.name, "cancelled addclasses with", issues, "issues")
return None
elif msg3.content.lower() == "done":
if len(listofclasses) >= 7:
listofclasses.sort()
for i in range(len(usrdata)):
if 'a' + str(user.id) == usrdata['User'][i]:
person = [usrdata['User'][i], usrdata['Classes'][i], usrdata['IRL'][i], usrdata['Grade'][i], usrdata['Guest'][i]]
person[1] = listofclasses
usrdata = usrdata.set_index('User')
usrdata = usrdata.drop('a' + str(user.id), axis = 0)
usrdata.to_csv(botpath + 'users.csv', encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
usrdata = usrdata.append(pd.DataFrame({'User' : [person[0]], 'Classes' : [person[1]], 'IRL' : [person[2]], 'Grade' : [person[3]], 'Guest' : [person[4]]}), sort = False, ignore_index = True)
usrdata.to_csv(botpath + 'users.csv', index = False, encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
print(datetime.datetime.now(), "Added classes for", user.name, "in users.csv")
embed = discord.Embed(color = discord.Color.dark_purple())
embed.set_author(name = "Info for " + user.name + ":", icon_url = user.avatar_url)
embed.add_field(name = "Name:", value = person[2].title(), inline = True)
embed.add_field(name = "Grade:", value = person[3], inline = True)
embed.add_field(name = "SCHOOL Status:", value = guestStatus[person[4]], inline = False)
embed.add_field(name = "Classes:", value = ''.join([classdata.loc[int(j)]['Name'] + "\n" for j in str(person[1])[1:-1].split(', ')]), inline = False)
embed.set_thumbnail(url = user.avatar_url)
await user.send("Updated info for " + user.name, embed = embed)
break
print(datetime.datetime.now(), "Added classes for", user.name, "in users.csv with", issues, "issues")
break
elif listofclasses:
await user.send("You have only added " + str(len(listofclasses)) + " classes, are you sure?")
try:
msg4 = await client.wait_for('message', timeout = 60.0, check = check)
except asyncio.TimeoutError:
print(datetime.datetime.now(), "Addclasses for", user.name, "failed: Timed out at check classes")
await user.send("Addclasses failed. You may do " + prefix + "register to retry.")
return None
if msg4.content.lower().startswith("y"):
listofclasses.sort
for i in range(len(usrdata)):
if 'a' + str(user.id) == usrdata['User'][i]:
person = [usrdata['User'][i], usrdata['Classes'][i], usrdata['IRL'][i], usrdata['Grade'][i]]
person[1] = listofclasses
usrdata = usrdata.set_index('User')
usrdata = usrdata.drop('a' + str(user.id), axis = 0)
usrdata.to_csv(botpath + 'users.csv', encoding = 'utf8')
usrdata = pd.read_csv(botpath + 'users.csv')
usrdata = usrdata.append( | pd.DataFrame({'User' : [person[0]], 'Classes' : [person[1]], 'IRL' : [person[2]], 'Grade' : [person[3]], 'Guest' : [person[4]]}) | pandas.DataFrame |
import os
import spacy
import pandas as pd
from ...utils.time_functions import time_it
class NaicsStandard:
@time_it()
def __init__(self, english_pipeline: str='en_core_web_lg'):
self.filedir = os.path.abspath(os.path.dirname(__file__))
self.load_nlp(english_pipeline=english_pipeline)
self.load_naics()
def load_nlp(self, english_pipeline):
'''
Create self.nlp
It is necessary to have the vectors of words downloaded.
This snippet downloads it automatically if not found (first time).
'''
try:
self.nlp = spacy.load(english_pipeline)
except Exception:
os.system(f"python -m spacy download {english_pipeline}")
self.nlp = spacy.load(english_pipeline)
def load_naics(self):
naics_full_path = os.path.join(self.filedir, 'naics.csv')
self.naics_df = pd.read_csv(naics_full_path, dtype={'Codes': str})
self.naics_df.drop('Total Marketable US Businesses', axis=1, inplace=True)
# Filtering level 3
self.naics_df = self.naics_df[self.naics_df.Codes.str.len() <= 4]
self.naics_df['level'] = self.naics_df.Codes.str.len() / 2
self.naics_df['nlp'] = self.naics_df.Titles.apply(self.nlp)
def _classify(self, text, n=1, th=None):
text_nlp = self.nlp(text)
self.naics_df['similarity'] = self.naics_df.nlp.apply(
lambda x: text_nlp.similarity(x)
)
l2 = self.naics_df.loc[self.naics_df.level == 2, :].copy()
l2.drop(['nlp', 'level'], axis=1, inplace=True)
l2['text'] = text
l2.rename(columns={'Codes': 'naics_2', 'Titles': 'title_2'}, inplace=True)
l2.sort_values('similarity', ascending=False, inplace=True)
if th is not None:
top_2 = l2[l2.similarity >= th].copy()
else:
top_2 = l2.iloc[0: n].copy()
top_2['naics_1'] = top_2.naics_2.str[:2]
top_2['title_1'] = top_2.naics_1.map(
self.naics_df[self.naics_df.level == 1].set_index('Codes')['Titles']
)
top_2 = top_2[[
'text',
'title_1',
'naics_1',
'title_2',
'naics_2',
'similarity'
]]
print(f'Processed: {self.counter} / {self.total}', end='\r')
self.counter += 1
return top_2
@time_it()
def classify(self, text: str or list, n=1, th=None, as_df=True):
text = [text] if isinstance(text, str) else text
unique_text = set(text)
self.total = len(unique_text)
self.counter = 1
print(f'To be processed: {self.total}')
results = [self._classify(i, n=n, th=th) for i in unique_text]
df = | pd.concat(results, axis=0, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""Datareader for cell testers and potentiostats.
This module is used for loading data and databases created by different cell
testers. Currently it only accepts arbin-type res-files (access) data as
raw data files, but we intend to implement more types soon. It also creates
processed files in the hdf5-format.
Example:
>>> d = CellpyData()
>>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs
>>> voltage_curves = d.get_cap()
>>> d.save("mytest.hdf")
Todo:
* Remove mass dependency in summary data
* use pd.loc[row,column] e.g. pd.loc[:,"charge_cap"] for col or
pd.loc[(pd.["step"]==1),"x"]
"""
import os
from pathlib import Path
import logging
import sys
import collections
import warnings
import csv
import itertools
import time
from scipy import interpolate
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
from cellpy.parameters import prms
from cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_cellpy_units,
get_headers_normal,
get_headers_step_table,
ATTRS_CELLPYFILE,
)
from cellpy.readers.core import (
FileID,
Cell,
CELLPY_FILE_VERSION,
MINIMUM_CELLPY_FILE_VERSION,
xldate_as_datetime,
)
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# TODO: @jepe - performance warnings - mixed types within cols (pytables)
performance_warning_level = "ignore" # "ignore", "error"
warnings.filterwarnings(
performance_warning_level, category=pd.io.pytables.PerformanceWarning
)
pd.set_option("mode.chained_assignment", None) # "raise", "warn", None
module_logger = logging.getLogger(__name__)
class CellpyData(object):
"""Main class for working and storing data.
This class is the main work-horse for cellpy where all the functions for
reading, selecting, and tweaking your data is located. It also contains the
header definitions, both for the cellpy hdf5 format, and for the various
cell-tester file-formats that can be read. The class can contain
several tests and each test is stored in a list. If you see what I mean...
Attributes:
cells (list): list of DataSet objects.
"""
def __str__(self):
txt = "<CellpyData>\n"
if self.name:
txt += f"name: {self.name}\n"
if self.table_names:
txt += f"table_names: {self.table_names}\n"
if self.tester:
txt += f"tester: {self.tester}\n"
if self.cells:
txt += "datasets: [ ->\n"
for i, d in enumerate(self.cells):
txt += f" ({i})\n"
for t in str(d).split("\n"):
txt += " "
txt += t
txt += "\n"
txt += "\n"
txt += "]"
else:
txt += "datasets: []"
txt += "\n"
return txt
def __bool__(self):
if self.cells:
return True
else:
return False
def __init__(
self,
filenames=None,
selected_scans=None,
profile=False,
filestatuschecker=None, # "modified"
fetch_one_liners=False,
tester=None,
initialize=False,
):
"""CellpyData object
Args:
filenames: list of files to load.
selected_scans:
profile: experimental feature.
filestatuschecker: property to compare cellpy and raw-files;
default read from prms-file.
fetch_one_liners: experimental feature.
tester: instrument used (e.g. "arbin") (checks prms-file as
default).
initialize: create a dummy (empty) dataset; defaults to False.
"""
if tester is None:
self.tester = prms.Instruments.tester
else:
self.tester = tester
self.loader = None # this will be set in the function set_instrument
self.logger = logging.getLogger(__name__)
self.logger.debug("created CellpyData instance")
self.name = None
self.profile = profile
self.minimum_selection = {}
if filestatuschecker is None:
self.filestatuschecker = prms.Reader.filestatuschecker
else:
self.filestatuschecker = filestatuschecker
self.forced_errors = 0
self.summary_exists = False
if not filenames:
self.file_names = []
else:
self.file_names = filenames
if not self._is_listtype(self.file_names):
self.file_names = [self.file_names]
if not selected_scans:
self.selected_scans = []
else:
self.selected_scans = selected_scans
if not self._is_listtype(self.selected_scans):
self.selected_scans = [self.selected_scans]
self.cells = []
self.status_datasets = []
self.selected_cell_number = 0
self.number_of_datasets = 0
self.capacity_modifiers = ["reset"]
self.list_of_step_types = [
"charge",
"discharge",
"cv_charge",
"cv_discharge",
"taper_charge",
"taper_discharge",
"charge_cv",
"discharge_cv",
"ocvrlx_up",
"ocvrlx_down",
"ir",
"rest",
"not_known",
]
# - options
self.force_step_table_creation = prms.Reader.force_step_table_creation
self.force_all = prms.Reader.force_all
self.sep = prms.Reader.sep
self._cycle_mode = prms.Reader.cycle_mode
# self.max_res_filesize = prms.Reader.max_res_filesize
self.load_only_summary = prms.Reader.load_only_summary
self.select_minimal = prms.Reader.select_minimal
# self.chunk_size = prms.Reader.chunk_size # 100000
# self.max_chunks = prms.Reader.max_chunks
# self.last_chunk = prms.Reader.last_chunk
self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles
# self.load_until_error = prms.Reader.load_until_error
self.ensure_step_table = prms.Reader.ensure_step_table
self.daniel_number = prms.Reader.daniel_number
# self.raw_datadir = prms.Reader.raw_datadir
self.raw_datadir = prms.Paths.rawdatadir
# self.cellpy_datadir = prms.Reader.cellpy_datadir
self.cellpy_datadir = prms.Paths.cellpydatadir
# search in prm-file for res and hdf5 dirs in loadcell:
self.auto_dirs = prms.Reader.auto_dirs
# - headers and instruments
self.headers_normal = get_headers_normal()
self.headers_summary = get_headers_summary()
self.headers_step_table = get_headers_step_table()
self.table_names = None # dictionary defined in set_instruments
self.set_instrument()
# - units used by cellpy
self.cellpy_units = get_cellpy_units()
if initialize:
self.initialize()
def initialize(self):
self.logger.debug("Initializing...")
self.cells.append(Cell())
@property
def cell(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
cell = self.cells[self.selected_cell_number]
return cell
@property
def dataset(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
warnings.warn("The .dataset property is deprecated, please use .cell instead.")
cell = self.cells[self.selected_cell_number]
return cell
@property
def empty(self):
"""gives False if the CellpyData object is empty (or un-functional)"""
return not self.check()
# TODO: @jepe - merge the _set_xxinstrument methods into one method
def set_instrument(self, instrument=None):
"""Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat)
"""
self.logger.debug(f"Setting instrument: {instrument}")
if instrument is None:
instrument = self.tester
if instrument in ["arbin", "arbin_res"]:
self._set_arbin()
self.tester = "arbin"
elif instrument == "arbin_sql":
self._set_arbin_sql()
self.tester = "arbin"
elif instrument == "arbin_experimental":
self._set_arbin_experimental()
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
self._set_pec()
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
self._set_biologic()
self.tester = "biologic"
elif instrument == "custom":
self._set_custom()
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'")
def _set_biologic(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import biologics_mpr as instr
self.loader_class = instr.MprLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_pec(self):
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments import pec as instr
self.loader_class = instr.PECLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _set_maccor(self):
warnings.warn("not implemented")
def _set_custom(self):
# use a custom format (csv with information lines on top)
from cellpy.readers.instruments import custom as instr
self.loader_class = instr.CustomLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
logging.debug("setting custom file-type (will be used when loading raw")
self.loader = self.loader_class.loader
def _set_arbin_sql(self):
warnings.warn("not implemented")
def _set_arbin(self):
from cellpy.readers.instruments import arbin as instr
self.loader_class = instr.ArbinLoader()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
# def _set_arbin_experimental(self):
# # Note! All these _set_instrument methods can be generalized to one
# # method. At the moment, I find it
# # more transparent to separate them into respective methods pr
# # instrument.
# from .instruments import arbin_experimental as instr
# self.loader_class = instr.ArbinLoader()
# # get information
# self.raw_units = self.loader_class.get_raw_units()
# self.raw_limits = self.loader_class.get_raw_limits()
# # send information (should improve this later)
# # loader_class.load_only_summary = self.load_only_summary
# # loader_class.select_minimal = self.select_minimal
# # loader_class.max_res_filesize = self.max_res_filesize
# # loader_class.chunk_size = self.chunk_size
# # loader_class.max_chunks = self.max_chunks
# # loader_class.last_chunk = self.last_chunk
# # loader_class.limit_loaded_cycles = self.limit_loaded_cycles
# # loader_class.load_until_error = self.load_until_error
#
# # create loader
# self.loader = self.loader_class.loader
def _create_logger(self):
from cellpy import log
self.logger = logging.getLogger(__name__)
log.setup_logging(default_level="DEBUG")
def set_cycle_mode(self, cycle_mode):
"""set the cycle mode"""
self._cycle_mode = cycle_mode
@property
def cycle_mode(self):
return self._cycle_mode
@cycle_mode.setter
def cycle_mode(self, cycle_mode):
self.logger.debug(f"-> cycle_mode: {cycle_mode}")
self._cycle_mode = cycle_mode
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("Directory does not exist")
return
self.raw_datadir = directory
def set_cellpy_datadir(self, directory=None):
"""Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info("Directory does not exist")
return
self.cellpy_datadir = directory
def check_file_ids(self, rawfiles, cellpyfile):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
Returns:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
If return_res is True it also returns list of raw-file_names as
second argument.
"""
txt = "Checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True
def _check_raw(self, file_names, abort_on_missing=False):
"""Get the file-ids for the res_files."""
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
def _check_cellpy_file(self, filename):
"""Get the file-ids for the cellpy_file."""
strip_filenames = True
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select("CellpyData/fidtable")
except KeyError:
self.logger.warning("no fidtable -" " you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None
@staticmethod
def _compare_ids(ids_res, ids_cellpy_file):
similar = True
l_res = len(ids_res)
l_cellpy = len(ids_cellpy_file)
if l_res == l_cellpy and l_cellpy > 0:
for name, value in list(ids_res.items()):
if ids_cellpy_file[name] != value:
similar = False
else:
similar = False
return similar
def loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
only_summary=False,
only_first=False,
force_raw=False,
use_cellpy_stat_file=None,
):
"""Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
only_first (bool): only use the first file fitting search criteria
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>>
"""
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.debug("cellpy file(s) needs updating - loading raw")
self.logger.info("Loading raw-file")
self.logger.debug(raw_files)
self.from_raw(raw_files)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
if mass:
self.set_mass(mass)
return self
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError(
"Too many files to merge - "
"could be a p2-p3 zip thing"
)
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug(
"the first dataset (or only dataset) loaded from the raw data file is empty"
)
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.cells.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_res(self, filenames=None, check_file_type=True):
"""Convenience function for loading arbin-type data into the
datastructure.
Args:
filenames: ((lists of) list of raw-file names): uses
cellpy.file_names if None.
If list-of-list, it loads each list into separate datasets.
The files in the inner list will be merged.
check_file_type (bool): check file type if True
(res-, or cellpy-format)
"""
raise DeprecatedFeature
def _validate_datasets(self, level=0):
self.logger.debug("validating test")
level = 0
# simple validation for finding empty datasets - should be expanded to
# find not-complete datasets, datasets with missing prms etc
v = []
if level == 0:
for test in self.cells:
# check that it contains all the necessary headers
# (and add missing ones)
# test = self._clean_up_normal_table(test)
# check that the test is not empty
v.append(self._is_not_empty_dataset(test))
self.logger.debug(f"validation array: {v}")
return v
def check(self):
"""Returns False if no datasets exists or if one or more of the datasets
are empty"""
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False
def _is_not_empty_dataset(self, dataset):
if dataset is self._empty_dataset():
return False
else:
return True
def _clean_up_normal_table(self, test=None, dataset_number=None):
# check that test contains all the necessary headers
# (and add missing ones)
raise NotImplementedError
def _report_empty_dataset(self):
self.logger.info("Empty set")
@staticmethod
def _empty_dataset():
return None
def _invent_a_name(self, filename=None, override=False):
if filename is None:
self.name = "nameless"
return
if self.name and not override:
return
path = Path(filename)
self.name = path.with_suffix("").name
def load(self, cellpy_file, parent_level=None, return_cls=True):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level.
return_cls (bool): Return the class.
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def _load_hdf5(self, filename, parent_level=None):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData")
Returns:
loaded datasets (DataSet-object)
"""
# TODO: option for reading version and relabelling dfsummary etc
# if the version is older
data = None
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
"Using non-default parent label for the "
"hdf-store: {}".format(parent_level)
)
if CELLPY_FILE_VERSION > 4:
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
meta_dir = "/info" # hard-coded
fid_dir = prms._cellpyfile_fid
else:
raw_dir = "/raw"
step_dir = "/step_table"
summary_dir = "/dfsummary"
meta_dir = "/info"
fid_dir = "/fidtable"
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion
if data.cellpy_file_version < CELLPY_FILE_VERSION:
if data.cellpy_file_version < 5:
self.logger.debug(f"version: {data.cellpy_file_version}")
_raw_dir = "/dfdata"
_step_dir = "/step_table"
_summary_dir = "/dfsummary"
_fid_dir = "/fidtable"
self._check_keys_in_cellpy_file(
meta_dir, parent_level, _raw_dir, store, _summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, _summary_dir
)
self._extract_raw_from_cellpy_file(
data, parent_level, _raw_dir, store
)
self._extract_steps_from_cellpy_file(
data, parent_level, _step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
_fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
warnings.warn(
"Loaded old cellpy-file version (<5). "
"Please update and save again."
)
else:
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(
data, parent_level, step_dir, store
)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
data.raw_data_files, data.raw_data_files_length = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _create_initial_data_set_from_cellpy_file(self, meta_dir, parent_level, store):
# Remark that this function is run before selecting loading method
# based on version. If you change the meta_dir prm to something else than
# "/info" it will most likely fail.
data = Cell()
meta_table = None
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError as e:
self.logger.info("This file is VERY old - no info given here")
self.logger.info("You should convert the files to a newer version!")
self.logger.debug(e)
try:
data.cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.debug(f"cellpy file version. {data.cellpy_file_version}")
return data, meta_table
def _check_keys_in_cellpy_file(
self, meta_dir, parent_level, raw_dir, store, summary_dir
):
required_keys = [raw_dir, summary_dir, meta_dir]
required_keys = ["/" + parent_level + _ for _ in required_keys]
for key in required_keys:
if key not in store.keys():
self.logger.info(
f"This cellpy-file is not good enough - "
f"at least one key is missing: {key}"
)
raise Exception(
f"OH MY GOD! At least one crucial key" f"is missing {key}!"
)
self.logger.debug(f"Keys in current cellpy-file: {store.keys()}")
def _extract_raw_from_cellpy_file(self, data, parent_level, raw_dir, store):
data.raw = store.select(parent_level + raw_dir)
def _extract_summary_from_cellpy_file(self, data, parent_level, store, summary_dir):
data.summary = store.select(parent_level + summary_dir)
def _extract_fids_from_cellpy_file(self, fid_dir, parent_level, store):
try:
fid_table = store.select(
parent_level + fid_dir
) # remark! changed spelling from
# lower letter to camel-case!
fid_table_selected = True
except Exception as e:
self.logger.debug(e)
self.logger.debug("could not get fid from cellpy-file")
fid_table = []
warnings.warn("no fid_table - you should update your cellpy-file")
fid_table_selected = False
return fid_table, fid_table_selected
def _extract_steps_from_cellpy_file(self, data, parent_level, step_dir, store):
try:
data.steps = store.select(parent_level + step_dir)
except Exception as e:
self.logging.debug("could not get steps from cellpy-file")
data.steps = pd.DataFrame()
warnings.warn(f"Unhandled exception raised: {e}")
def _extract_meta_from_cellpy_file(self, data, meta_table, filename):
# get attributes from meta table
for attribute in ATTRS_CELLPYFILE:
value = self._extract_from_dict(meta_table, attribute)
# some fixes due to errors propagated into the cellpy-files
if attribute == "creator":
if not isinstance(value, str):
value = "no_name"
if attribute == "test_no":
if not isinstance(value, (int, float)):
value = 0
setattr(data, attribute, value)
if data.mass is None:
data.mass = 1.0
else:
data.mass_given = True
data.loaded_from = str(filename)
# hack to allow the renaming of tests to datasets
try:
name = self._extract_from_dict_hard(meta_table, "name")
if not isinstance(name, str):
name = "no_name"
data.name = name
except KeyError:
self.logger.debug(f"missing key in meta table: name")
print(meta_table)
warnings.warn("OLD-TYPE: Recommend to save in new format!")
try:
name = self._extract_from_dict(meta_table, "test_name")
except Exception as e:
name = "no_name"
self.logger.debug("name set to 'no_name")
warnings.warn(f"Unhandled exception raised: {e}")
data.name = name
# unpacking the raw data limits
for key in data.raw_limits:
try:
data.raw_limits[key] = self._extract_from_dict_hard(meta_table, key)
except KeyError:
self.logger.debug(f"missing key in meta_table: {key}")
warnings.warn("OLD-TYPE: Recommend to save in new format!")
@staticmethod
def _extract_from_dict(t, x, default_value=None):
try:
value = t[x].values
if value:
value = value[0]
except KeyError:
value = default_value
return value
@staticmethod
def _extract_from_dict_hard(t, x):
value = t[x].values
if value:
value = value[0]
return value
def _create_infotable(self, dataset_number=None):
# needed for saving class/DataSet to hdf5
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
test = self.get_cell(dataset_number)
infotable = collections.OrderedDict()
for attribute in ATTRS_CELLPYFILE:
value = getattr(test, attribute)
infotable[attribute] = [value]
infotable["cellpy_file_version"] = [CELLPY_FILE_VERSION]
limits = test.raw_limits
for key in limits:
infotable[key] = limits[key]
infotable = pd.DataFrame(infotable)
self.logger.debug("_create_infotable: fid")
fidtable = collections.OrderedDict()
fidtable["raw_data_name"] = []
fidtable["raw_data_full_name"] = []
fidtable["raw_data_size"] = []
fidtable["raw_data_last_modified"] = []
fidtable["raw_data_last_accessed"] = []
fidtable["raw_data_last_info_changed"] = []
fidtable["raw_data_location"] = []
fidtable["raw_data_files_length"] = []
fids = test.raw_data_files
fidtable["raw_data_fid"] = fids
if fids:
for fid, length in zip(fids, test.raw_data_files_length):
fidtable["raw_data_name"].append(fid.name)
fidtable["raw_data_full_name"].append(fid.full_name)
fidtable["raw_data_size"].append(fid.size)
fidtable["raw_data_last_modified"].append(fid.last_modified)
fidtable["raw_data_last_accessed"].append(fid.last_accessed)
fidtable["raw_data_last_info_changed"].append(fid.last_info_changed)
fidtable["raw_data_location"].append(fid.location)
fidtable["raw_data_files_length"].append(length)
else:
warnings.warn("seems you lost info about your raw-data")
fidtable = pd.DataFrame(fidtable)
return infotable, fidtable
def _convert2fid_list(self, tbl):
self.logger.debug("converting loaded fidtable to FileID object")
fids = []
lengths = []
counter = 0
for item in tbl["raw_data_name"]:
fid = FileID()
fid.name = item
fid.full_name = tbl["raw_data_full_name"][counter]
fid.size = tbl["raw_data_size"][counter]
fid.last_modified = tbl["raw_data_last_modified"][counter]
fid.last_accessed = tbl["raw_data_last_accessed"][counter]
fid.last_info_changed = tbl["raw_data_last_info_changed"][counter]
fid.location = tbl["raw_data_location"][counter]
length = tbl["raw_data_files_length"][counter]
counter += 1
fids.append(fid)
lengths.append(length)
if counter < 1:
self.logger.debug("info about raw files missing")
return fids, lengths
def merge(self, datasets=None, separate_datasets=False):
"""This function merges datasets into one set."""
self.logger.info("Merging")
if separate_datasets:
warnings.warn(
"The option seperate_datasets=True is"
"not implemented yet. Performing merging, but"
"neglecting the option."
)
else:
if datasets is None:
datasets = list(range(len(self.cells)))
first = True
for dataset_number in datasets:
if first:
dataset = self.cells[dataset_number]
first = False
else:
dataset = self._append(dataset, self.cells[dataset_number])
for raw_data_file, file_size in zip(
self.cells[dataset_number].raw_data_files,
self.cells[dataset_number].raw_data_files_length,
):
dataset.raw_data_files.append(raw_data_file)
dataset.raw_data_files_length.append(file_size)
self.cells = [dataset]
self.number_of_datasets = 1
return self
def _append(self, t1, t2, merge_summary=True, merge_step_table=True):
self.logger.debug(
f"merging two datasets (merge summary = {merge_summary}) "
f"(merge step table = {merge_step_table})"
)
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
# finding diff of time
start_time_1 = t1.start_datetime
start_time_2 = t2.start_datetime
diff_time = xldate_as_datetime(start_time_2) - xldate_as_datetime(start_time_1)
diff_time = diff_time.total_seconds()
if diff_time < 0:
self.logger.warning("Wow! your new dataset is older than the old!")
self.logger.debug(f"diff time: {diff_time}")
sort_key = self.headers_normal.datetime_txt # DateTime
# mod data points for set 2
data_point_header = self.headers_normal.data_point_txt
try:
last_data_point = max(t1.raw[data_point_header])
except ValueError:
last_data_point = 0
t2.raw[data_point_header] = t2.raw[data_point_header] + last_data_point
# mod cycle index for set 2
cycle_index_header = self.headers_normal.cycle_index_txt
try:
last_cycle = max(t1.raw[cycle_index_header])
except ValueError:
last_cycle = 0
t2.raw[cycle_index_header] = t2.raw[cycle_index_header] + last_cycle
# mod test time for set 2
test_time_header = self.headers_normal.test_time_txt
t2.raw[test_time_header] = t2.raw[test_time_header] + diff_time
# merging
if not t1.raw.empty:
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
# checking if we already have made a summary file of these datasets
# (to be used if merging summaries (but not properly implemented yet))
if t1.summary_made and t2.summary_made:
dfsummary_made = True
else:
dfsummary_made = False
# checking if we already have made step tables for these datasets
if t1.steps_made and t2.steps_made:
step_table_made = True
else:
step_table_made = False
if merge_summary:
# check if (self-made) summary exists.
self_made_summary = True
try:
test_it = t1.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
try:
test_it = t2.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
if self_made_summary:
# mod cycle index for set 2
last_cycle = max(t1.summary[cycle_index_header])
t2.summary[cycle_index_header] = (
t2.summary[cycle_index_header] + last_cycle
)
# mod test time for set 2
t2.summary[test_time_header] = (
t2.summary[test_time_header] + diff_time
)
# to-do: mod all the cumsum stuff in the summary (best to make
# summary after merging) merging
else:
t2.summary[data_point_header] = (
t2.summary[data_point_header] + last_data_point
)
summary2 = pd.concat([t1.summary, t2.summary], ignore_index=True)
test.summary = summary2
if merge_step_table:
if step_table_made:
cycle_index_header = self.headers_normal.cycle_index_txt
t2.steps[self.headers_step_table.cycle] = (
t2.raw[self.headers_step_table.cycle] + last_cycle
)
steps2 = | pd.concat([t1.steps, t2.steps], ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append("../")
import pandas as pd
import numpy as np
import pathlib
import pickle
import os
import itertools
import argparse
import logging
import helpers.feature_helpers as fh
from collections import Counter
OUTPUT_DF_TR = 'df_steps_tr.csv'
OUTPUT_DF_VAL = 'df_steps_val.csv'
OUTPUT_DF_TRAIN = 'df_steps_train.csv'
OUTPUT_DF_TEST = 'df_steps_test.csv'
OUTPUT_DF_SESSIONS = 'df_sessions.csv'
OUTPUT_ENCODING_DICT = 'enc_dicts_v02.pkl'
OUTPUT_CONFIG = 'config.pkl'
OUTPUT_NORMLIZATIONS_VAL = 'Dwell_normalizations_val.pkl'
OUTPUT_NORMLIZATIONS_SUBM = 'Dwell_normalizations_submission.pkl'
DEFAULT_FEATURES_DIR_NAME = 'nn_vnormal'
DEFAULT_PREPROC_DIR_NAME = 'data_processed_vnormal'
def setup_args_parser():
parser = argparse.ArgumentParser(description='Create cv features')
parser.add_argument('--processed_data_dir_name', help='path to preprocessed data', default=DEFAULT_PREPROC_DIR_NAME)
parser.add_argument('--features_dir_name', help='features directory name', default=DEFAULT_FEATURES_DIR_NAME)
#parser.add_argument('--split_option', help='split type. Options: normal, future', default=DEFAULT_SPLIT)
parser.add_argument('--debug', help='debug mode (verbose output and no saving)', action='store_true')
return parser
def setup_logger(debug):
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def main():
parser = setup_args_parser()
args = parser.parse_args()
logger = setup_logger(args.debug)
#logger.info('split option: %s' % args.split_option)
logger.info(100*'-')
logger.info('Running 013_Features_Dwell.py')
logger.info(100*'-')
logger.info('processed data directory name: %s' % args.processed_data_dir_name)
logger.info('features directory name: %s' % args.features_dir_name)
#Set up arguments
# # split_option
# if args.split_option=='normal':
# SPLIT_OPTION = 'normal'
# elif args.split_option=='future':
# SPLIT_OPTION = 'leave_out_only_clickout_with_nans'
# processed data path
DATA_PATH = '../data/' + args.processed_data_dir_name + '/'
#os.makedirs(DATA_PATH) if not os.path.exists(DATA_PATH) else None
logger.info('processed data path: %s' % DATA_PATH)
# features data path
FEATURES_PATH = '../features/' + args.features_dir_name + '/'
#os.makedirs(FEATURES_PATH) if not os.path.exists(FEATURES_PATH) else None
logger.info('features path: %s' % FEATURES_PATH)
# End of set up arguments
config = pickle.load(open(DATA_PATH+OUTPUT_CONFIG, "rb" ))
config
# ### read data
df_steps_tr = pd.read_csv(DATA_PATH+OUTPUT_DF_TR)
df_steps_val = pd.read_csv(DATA_PATH+OUTPUT_DF_VAL)
df_steps_train = | pd.read_csv(DATA_PATH+OUTPUT_DF_TRAIN) | pandas.read_csv |
######################################################################
# Copyright (C) 2021 BFH
#
# Script with base routines for processing steps in the FINT-CH project.
#
# Author: <NAME>, BFH-HAFL, December 2021
######################################################################
import os
import sys
import math
from rasterstats import zonal_stats
#Path to the folder containing pyFINT
PYFINT_HOME = os.environ.get("PYFINT_HOME")
sys.path.append(PYFINT_HOME)
from pyfintcontroller import *
#Path to the folder containing the FINT-CH artifacts
FINTCH_HOME = os.environ.get("FINTCH_HOME")
sys.path.append(os.path.join(FINTCH_HOME,"Common"))
from fintch_utilities import *
import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas.tools import sjoin
from osgeo import ogr, osr, gdal
from osgeo.gdalconst import *
from osgeo.gdalnumeric import *
import psycopg2
from shapely.wkt import dumps, loads
from shapely.geometry import Point, box
from datetime import datetime, date, time, timedelta
import time
from multiprocessing import Process, Pool, Queue, JoinableQueue, current_process, freeze_support
import logging
import traceback
def create_db_tables(table_schema, table_base_name, table_owner, srid, db_connection):
"""Method for creating the PostGIS database tables needed in the FINT-CH process.
Existing tables are dropped beforehand.
Args:
table_schema (string): Name of the schema to create the tables in
table_base_name (string): Base name for the created tables
table_owner (string): Owner of the created tables
db_connection (connection): psycopg2 connection to use for creating the tables
"""
create_table_template = """
----
-- Table: raw detected trees
----
DROP TABLE IF EXISTS {0}.{1}_tree_detected;
CREATE TABLE {0}.{1}_tree_detected
(
gid serial NOT NULL,
x double precision,
y double precision,
hoehe real,
dominanz real,
bhd real,
geom geometry(Point,{3}),
parameterset_id smallint,
perimeter_id integer,
flaeche_id integer,
hoehe_modified real,
CONSTRAINT {1}_tree_detected_pkey PRIMARY KEY (gid)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE {0}.{1}_tree_detected
OWNER to {2};
-- Index: geom
CREATE INDEX sidx_{1}_tree_detected_geom_idx
ON {0}.{1}_tree_detected USING gist
(geom)
TABLESPACE pg_default;
-- Index parameterset_id
CREATE INDEX idx_{1}_tree_detected_parameterset_id
ON {0}.{1}_tree_detected USING btree
(parameterset_id)
TABLESPACE pg_default;
-- Index parameterset_id, perimeter_id
CREATE INDEX idx_{1}_tree_detected_parameterset_id_perimeter_id
ON {0}.{1}_tree_detected USING btree
(parameterset_id, perimeter_id)
TABLESPACE pg_default;
----
-- Table: detection perimeter
----
DROP TABLE IF EXISTS {0}.{1}_perimeter;
CREATE TABLE {0}.{1}_perimeter
(
gid serial NOT NULL,
geom geometry(Polygon,{3}),
perimeter_id integer,
flaeche_id integer,
CONSTRAINT {1}_perimeter_pkey PRIMARY KEY (gid)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE {0}.{1}_perimeter
OWNER to {2};
-- Index: geom
CREATE INDEX sidx_{1}_perimeter_geom
ON {0}.{1}_perimeter USING gist
(geom)
TABLESPACE pg_default;
----
-- Table: forest structure type raster
----
DROP TABLE IF EXISTS {0}.{1}_fst_raster;
CREATE TABLE {0}.{1}_fst_raster
(
gid serial NOT NULL,
geom geometry(Polygon,{3}),
flaeche_id integer,
perimeter_id integer,
tile_id bigint,
hdom smallint,
dg smallint,
nh smallint,
fst smallint,
CONSTRAINT {1}_fst_raster_pkey PRIMARY KEY (gid)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE {0}.{1}_fst_raster
OWNER to {2};
-- Index: geom
CREATE INDEX sidx_{1}_fst_raster_geom_idx
ON {0}.{1}_fst_raster USING gist
(geom)
TABLESPACE pg_default;
-- Index flaeche_id, perimeter_id
CREATE INDEX idx_{1}_fst_raster_flaeche_id_perimeter_id
ON {0}.{1}_fst_raster USING btree
(flaeche_id, perimeter_id)
TABLESPACE pg_default;
----
-- Table: trees filtered by forest structure type
----
DROP TABLE IF EXISTS {0}.{1}_processed_tree;
CREATE TABLE {0}.{1}_processed_tree
(
gid serial NOT NULL,
x double precision,
y double precision,
hoehe real,
dominanz real,
bhd real,
geom geometry(Point,{3}),
parameterset_id smallint,
fst_raster_id integer,
flaeche_id integer,
hoehe_modified real,
fst smallint,
CONSTRAINT {1}_processed_tree_pkey PRIMARY KEY (gid)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE {0}.{1}_processed_tree
OWNER to {2};
-- Index: geom
CREATE INDEX sidx_{1}_processed_tree_geom_idx
ON {0}.{1}_processed_tree USING gist
(geom)
TABLESPACE pg_default;
"""
cursor = db_connection.cursor()
sql = create_table_template.format(table_schema, table_base_name, table_owner, srid)
cursor.execute(sql)
db_connection.commit()
cursor.close()
def generate_grid(min_x, min_y, max_x, max_y, out_shape_path, crs=2056, step_x=25,step_y=25):
# create output file
srs = osr.SpatialReference()
srs.ImportFromEPSG( crs )
logger = logging.getLogger()
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(out_shape_path):
delete_shapefile(out_shape_path)
out_ds = out_driver.CreateDataSource(out_shape_path)
out_layer = None
try:
out_layer = out_ds.CreateLayer(out_shape_path,srs=srs,geom_type=ogr.wkbPolygon )
except Error as ex:
logger.error("Error generating grid ", out_shape_path)
logger.error(traceback.format_exception(*sys.exc_info()))
raise ex
out_layer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger64))
feature_defn = out_layer.GetLayerDefn()
cur_x = min_x
cur_y = min_y
col = 0
row = -1
e = len(str(int(min_x)))
f = 10**e
# create grid cells
while cur_y < max_y:
row += 1
cur_x = min_x
col = -1
while cur_x < max_x:
col += 1
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(cur_x, cur_y)
ring.AddPoint(cur_x+step_x, cur_y)
ring.AddPoint(cur_x+step_x, cur_y+step_y)
ring.AddPoint(cur_x, cur_y+step_y)
ring.AddPoint(cur_x, cur_y)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
# add new geom to layer
out_feature = ogr.Feature(feature_defn)
out_feature.SetGeometry(poly)
out_feature.SetField('id', cur_x*f+cur_y )
out_layer.CreateFeature(out_feature)
out_feature.Destroy
cur_x += step_x
cur_y += step_y
# Close DataSources
out_ds.Destroy()
def determine_fst(grid_path,vhm150_path,mixing_degree_path,envelope):
output_folder = os.path.dirname(grid_path)
logger = logging.getLogger()
#Clip VHM
vhm_output_file = os.path.join(output_folder,"vhm150_clip.tif")
try:
crop_image(vhm150_path, vhm_output_file, [envelope]) # Nodata Value may depend on source!
except ValueError as ve:
logger.error(grid_path)
logger.error(traceback.format_exception(*sys.exc_info()))
return -1
#Clip NH
mg_output_file = os.path.join(output_folder,"mg_clip.tif")
try:
crop_image(mixing_degree_path, mg_output_file, [envelope]) # Nodata Value may depend on source!
except ValueError as ve:
logger.error(grid_path)
logger.error(traceback.format_exception(*sys.exc_info()))
return -1
##
## Calculate hdom
##
stats = zonal_stats(grid_path, vhm_output_file, stats=['percentile_80'], all_touched=True)
# open grid polygon shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
grid_ds = driver.Open(grid_path, 1)
layer = grid_ds.GetLayer()
# add grid attribute fields
layer.CreateField(ogr.FieldDefn('hdom', ogr.OFTInteger))
layer.CreateField(ogr.FieldDefn('nh', ogr.OFTInteger))
layer.CreateField(ogr.FieldDefn('dg_min', ogr.OFTReal))
layer.CreateField(ogr.FieldDefn('dg', ogr.OFTInteger))
layer.CreateField(ogr.FieldDefn('FST', ogr.OFTInteger))
# iterate over all features and add stand attribute values
counter = 0
for feature in layer:
hp80 = 0
if stats[counter].get('percentile_80') is not None:
hp80 = stats[counter].get('percentile_80')
# set and store hdom
feature.SetField('hdom', hp80)
layer.SetFeature(feature)
counter += 1
grid_ds, layer = None, None
##
## Calculate nh
##
stats = zonal_stats(grid_path, mg_output_file, stats=['mean'], all_touched=True)
grid_ds = driver.Open(grid_path, 1)
layer = grid_ds.GetLayer()
# iterate over all features and add stand attribute values
counter = 0
for feature in layer:
nh = 0
if stats[counter].get('mean') is not None:
nh = stats[counter].get('mean')
nh = round(nh/100)
# set and store nh
feature.SetField('nh', nh)
layer.SetFeature(feature)
counter += 1
grid_ds, layer = None, None
##
## Calculate dg
##
grid_ds = driver.Open(grid_path, 1)
layer = grid_ds.GetLayer()
# tmp files
dg_classified_path = os.path.join(output_folder,"dg_layer.tif")
tmp_lim_dg_path = os.path.join(output_folder,"dg_lim_dg.tif")
# Layer threshold values (based on NFI definition, www.lfi.ch)
min_height_hdom_factor_ms = 1.0 / 3.0
min_height_hdom_factor_os = 2.0 / 3.0
for feature in layer:
# calculate and store dg_min
hdom = feature.GetFieldAsInteger('hdom')
if hdom < 14: #Fix small stands issue
dg_min = hdom*min_height_hdom_factor_ms
else:
dg_min = hdom*min_height_hdom_factor_os
feature.SetField('dg_min', dg_min )
layer.SetFeature(feature)
counter += 1
# Rasterize dg_min
vhm_output_file
vhm_ds = gdal.Open(vhm_output_file,GA_ReadOnly)
driver_gtiff = gdal.GetDriverByName('GTiff')
dg_min_ds = driver_gtiff.Create(tmp_lim_dg_path,vhm_ds.RasterXSize, vhm_ds.RasterYSize,1,gdal.GDT_Float32)
dg_min_ds.SetGeoTransform(vhm_ds.GetGeoTransform())
dg_min_ds.SetProjection(vhm_ds.GetProjection())
dst_options = ['ATTRIBUTE=dg_min']
gdal.RasterizeLayer(dg_min_ds, [1], layer, None, options=dst_options)
# Produce "1" / "0" raster for each layer
vhm_b1 = vhm_ds.GetRasterBand(1)
dg_min_b1 = dg_min_ds.GetRasterBand(1)
data_vhm = np.array(vhm_b1.ReadAsArray())
data_dg_min = np.array(dg_min_b1.ReadAsArray())
data_out = data_vhm>data_dg_min
zoLembda = lambda x: 1 if x else 0
vfunc = np.vectorize(zoLembda)
data_out = vfunc(data_out)
# Write the out file
dst_options = ['COMPRESS=LZW']
dg_ds = driver_gtiff.Create(dg_classified_path, vhm_ds.RasterXSize, vhm_ds.RasterYSize, 1, gdal.GDT_Byte, dst_options)
CopyDatasetInfo(vhm_ds, dg_ds)
band_out = dg_ds.GetRasterBand(1)
BandWriteArray(band_out, data_out)
vhm_ds, dg_min_ds, dg_ds = None, None, None
# Zonal stats
stats = zonal_stats(grid_path, dg_classified_path, stats=['mean'], all_touched=True)
# iterate over all features and add stand attribute values
counter = 0
for feature in layer:
dg = 0
if stats[counter].get('mean') is not None:
dg = stats[counter].get('mean')
dg = round(dg*100)
hdom = feature.GetFieldAsInteger('hdom')
nh = feature.GetFieldAsInteger('nh')
if nh <= 30:
digit1 = 1
elif 30 < nh <= 70:
digit1 = 2
else:
digit1 = 3
if dg <= 80:
digit2 = 1
else:
digit2 = 2
if hdom <= 22:
digit3 = 1
else:
digit3 = 2
fst = int(str(digit1) + str(digit2) + str(digit3))
# set and store dg and FST
feature.SetField('dg', dg)
feature.SetField('FST', fst)
layer.SetFeature(feature)
counter += 1
grid_ds, layer = None, None
# Cleanup
delete_raster(dg_classified_path)
delete_raster(tmp_lim_dg_path)
delete_raster(vhm_output_file)
delete_raster(mg_output_file)
return 0
def process_detection(record, db_connection):
cursor = db_connection.cursor()
fint_controller = pyFintController()
logger = logging.getLogger()
table_schema = record["table_schema"]
table_base_name = record["table_base_name"]
perimeter_insert_template = "INSERT INTO "+table_schema+"."+table_base_name+"_perimeter(geom, perimeter_id, flaeche_id) VALUES (ST_SetSRID(ST_GeomFromText('{0}'),{1}), {2}, {3});"
tree_insert_template = "INSERT INTO "+table_schema+"."+table_base_name+"_tree_detected(x, y, hoehe, bhd, dominanz, geom, parameterset_id, perimeter_id, flaeche_id, hoehe_modified) VALUES ({0}, {1}, {2}, {3}, {4}, ST_SetSRID(ST_GeomFromText('{5}'),{6}), {7}, {8},{9},{10});"
result_base_path = record["result_base_path"]
perimeter_buffer = record["perimeter_buffer"]
r_max = record["r_max"]
epsg = record["epsg"]
crs = record["crs"]
perimeter_id = record["perimeter_id"]
flaeche_id = record["flaeche_id"]
folder_name = "{0}_{1}".format(flaeche_id,perimeter_id)
output_folder = os.path.join(result_base_path,folder_name)
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
geom = record["geometry"]
(minx, miny, maxx, maxy) = geom.bounds
#Envelope by group
bx = box(minx, miny, maxx, maxy)
envelope = bx.buffer(perimeter_buffer, resolution=1).envelope
sql = perimeter_insert_template.format(geom.wkt,epsg,perimeter_id,flaeche_id)
cursor.execute(sql)
db_connection.commit()
parameter_sets = record["parameter_sets"]
vhm_input_file = record["vhm_input_file"]
fint_tree_dataframes = []
for paramterset_id in parameter_sets:
parameter_set = parameter_sets[paramterset_id]
parameter_set["id"] = paramterset_id
detection_result = detect_trees(parameter_set, output_folder, vhm_input_file, envelope, crs, fint_controller)
if type(detection_result) == type(None):
continue
else:
detection_result["parameterset_id"] = paramterset_id
fint_tree_dataframes.append(detection_result)
if len(fint_tree_dataframes)==0:
cursor.close()
return
fint_trees_df = gpd.GeoDataFrame( | pd.concat(fint_tree_dataframes, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : dataset.py
# Author : <NAME> <<EMAIL>>
# Date : 01.11.2020
# Last Modified Date: 09.11.2021
# Last Modified By : <NAME> <<EMAIL>>
#
# Copyright (c) 2020, Imperial College, London
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Imperial College nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# this library provides support for the primary dataset class
import random
import numpy as np
import pandas as pd
import torch
import cv2
import pytesseract
from PIL import Image
from transformers import AutoTokenizer, AutoConfig
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
class ToxDataset(Dataset):
def __init__(
self,
dataframes,
transforms=None,
tokenizer="bert-base-uncased",
text_only=0,
selection_probs=[],
max_sequence_length=None,
test=False,
language_model=False,
read_text=True,
read_title=True,
read_img=True,
read_img_text=True,
):
"""
Dataset, supports mono and multitask learning
Args:
dataframes: a list of dataframes to use
transforms: transforms to apply to the images
tokenizer: name of the huggingface tokenizer to use
text_only: deprecated, ignored
selection_probs: list of probabilities of selecting each task
max_sequence_length: maximum number of tokens in sequence, will default to the max
number allowed in the specified transformer
test: whether the dataset is in test mode or train mode
language_model: whether the model to be trained is a LM, if true and max_seq_len None,
will pad title and text to half max len
read_text: whether to read the text from the dataframe
read_title: whether to read the title from the dataframe
read_img: whether to read the image (and any text in the image) from the dataframe
"""
if isinstance(dataframes, pd.DataFrame):
# then there is a single data source
self.dataframes = [dataframes]
self.selection_probs = [1.0]
else:
self.dataframes = dataframes
# if the dataframes and selection probs aren't the same size, then default to equal weighting
self.selection_probs = (
selection_probs
if len(selection_probs) == len(dataframes)
else [1.0 / len(dataframes) for _ in range(len(dataframes))]
)
self.max_sequence_length = (
AutoConfig.from_pretrained(tokenizer).max_position_embeddings
if max_sequence_length is None and not language_model
else AutoConfig.from_pretrained(tokenizer).max_position_embeddings // 2 - 1
if max_sequence_length is None and language_model
else max_sequence_length
)
if self.max_sequence_length == 514:
# this fixes distil roberta
self.max_sequence_length = 512
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
if self.tokenizer.pad_token is None:
# if using gpt or something
self.tokenizer.pad_token = self.tokenizer.eos_token
self.pad_token_id = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.pad_token
)
self.transforms = transforms
self.test = test
if test:
# if testing we want to check all of the examples, and not massage etc
examples_per_class = [len(df) for df in self.dataframes]
self.dataframes = pd.concat(self.dataframes)
self.dataframes["task_index"] = sum(
[[task] * num for task, num in enumerate(examples_per_class)], []
)
self.read_text = read_text
self.read_title = read_title
self.read_img = read_img
self.read_img_text = read_img_text
def __len__(self):
return len(self.dataframes) if self.test else max(map(len, self.dataframes))
def __getitem__(self, idx):
if self.test:
data = self.dataframes.iloc[idx]
task_indices = data.task_index
else:
task_indices = random.choices(
list(range(len(self.selection_probs))), weights=self.selection_probs
)[0]
data = self.dataframes[task_indices].iloc[
idx % len(self.dataframes[task_indices])
]
# some datasets might not have some modes for some points or the whole dataset,
# so we keep track of what it does have
# elements represent text, title, image, label
modalities = [1, 1, 1, 1]
if self.read_text and "text" in data and not pd.isna(data.text) and data.text:
encoding = torch.cat(
(
self.tokenizer.encode(
data.text,
max_length=self.max_sequence_length,
padding=False,
truncation=True,
return_tensors="pt",
).flatten(),
torch.tensor([self.tokenizer.pad_token_id]),
)
)
if encoding.size() == torch.Size([1, 0]):
encoding = torch.full((1,), self.pad_token_id, dtype=torch.long)
mask = torch.zeros(encoding.size(), dtype=torch.float)
mask[encoding != self.pad_token_id] = 1
else:
encoding = torch.zeros(1, dtype=torch.long)
mask = encoding.clone().type(torch.float)
modalities[0] = 0
if (
self.read_title
and "title" in data
and not pd.isna(data.title)
and data.title
):
title = torch.cat(
(
self.tokenizer.encode(
data.title,
max_length=self.max_sequence_length,
padding=False,
truncation=True,
return_tensors="pt",
).flatten(),
torch.tensor([self.tokenizer.pad_token_id]),
)
)
if title.size() == torch.Size([1, 0]):
title = torch.full((1,), self.pad_token_id, dtype=torch.long)
title_mask = torch.zeros(title.size(), dtype=torch.float)
title_mask[title != self.pad_token_id] = 1
else:
title = torch.zeros(1, dtype=torch.long)
title_mask = title.clone().type(torch.float)
modalities[1] = 0
if self.read_img and "img" in data and not pd.isna(data.img) and data.img:
img = cv2.imread(data.img)
if self.read_img_text:
img_text_string = pytesseract.image_to_string(data.img)
try:
img_text = torch.cat(
(
self.tokenizer.encode(
img_text_string,
max_length=self.max_sequence_length,
padding=False,
truncation=True,
return_tensors="pt",
).flatten(),
torch.tensor([self.tokenizer.pad_token_id]),
)
)
if img_text.size() == torch.Size([1, 0]):
img_text = torch.full((1,), self.pad_token_id, dtype=torch.long)
except pytesseract.pytesseract.TesseractError:
# if the image doesn't have dimensions in it's metadata, will throw
# an error, this catches
img_text = torch.full((1,), self.pad_token_id, dtype=torch.long)
img_text_mask = torch.zeros(img_text.size(), dtype=torch.float)
img_text_mask[img_text != self.pad_token_id] = 1
else:
img_text = torch.full((1,), self.pad_token_id, dtype=torch.long)
img_text_mask = torch.zeros(img_text.size(), dtype=torch.float)
img_text_mask[img_text != self.pad_token_id] = 1
if self.transforms:
img = self.transforms(img)
else:
img = torch.zeros(3, 1, 1)
img_text = torch.full((1,), self.pad_token_id, dtype=torch.long)
img_text_mask = torch.zeros(img_text.size(), dtype=torch.float)
img_text_mask[img_text != self.pad_token_id] = 1
if self.transforms:
img = self.transforms(img)
modalities[2] = 0
if "label" in data and not | pd.isna(data.label) | pandas.isna |
import pandas as pd
from datetime import date, datetime
from functools import wraps
import importlib_resources
import requests
import time
import os
import io
token = "<KEY>"
#<PASSWORD>
#8a0ff681501b0bac557bf90fe6a036f7
def counter(func):
"""
A decorator that counts how many times we executed a funciton.
In our case we use it to track how many times we executed request()
to do not exceed API 1000 requests/hour limit. When we aproach
the limit functino automatically time sleeps.
"""
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.count += 1
if wrapper.count == 2:
global start #we like to live dangerously ;) (it is neccessary here to have global var; otherwise it would not be recognized at line 32)
start = time.time()
if wrapper.count == 998:
end = time.time()
wait_time = end - start + 120
print("You aproached the limit of requests per hour. The download will automatically continue after " + str(int(wait_time)) + " seconds.")
time.sleep(wait_time)
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
wrapper.count = 1
return wrapper
@counter
def request(token, page = 1, items_per_page = 5000, start_date = "1.1.2020", end_date = "24.12.2021", pause = 0.1):
"""
Request data from API and returs the response in json. The data in API are bounded to pages,
one request obtains data for onepage (5000 rows)
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
page : int
specifies page which will be downloaded (default 1)
items_per_page : int
number of rows per page (defualt 5000)
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
pause : int
to not overload the API (default 0.1)
Raises
------
Exception
if response of API is not 200 or 429.
Returns
-------
r
response of the API, in json
"""
url = "https://onemocneni-aktualne.mzcr.cz/api/v3/osoby?page={a}&itemsPerPage={b}&datum%5Bbefore%5D={d}&datum%5Bafter%5D={c}".format(a = page, b = items_per_page, c = start_date, d = end_date)
r = requests.get(url, {"apiToken": token})
if r.status_code == 200:
None
elif r.status_code == 429: #API limit per request reached
msg = r.json()["message"] #shows message with info about when next request can be made
t = "".join(a for a in msg if a.isdigit())
print("Holy Moly! You exceeded the requests limit per hour, now you need to wait " + t + " seconds...")
time.sleep(int(t)+60)
request.count = 1
start = time.time()
r = request(token, page)
else: #In case of different errors
raise Exception("Status code: " + r.status_code, "Error message: " + r.text, "Stopped on page: " + str(page))
time.sleep(pause)
return r
def get_total_pages(token, start_date = "1.1.2020", end_date = "24.12.2021"):
"""
Indetify how much pages needs to be downloaded to download whole dataset for given
start date and date.
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_date : str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date : str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
total_pages : int
total number of pages for given period
"""
r = request(token, start_date = start_date, end_date = end_date)
total_pages = int(r.json()["hydra:view"]["hydra:last"].split("=")[-1])
return total_pages
def get_total_items(token, start_date = "1.1.2020", end_date = "24.12.2021"):
"""
Indetify how much rows is in the dataset for gievn start date and end date
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
total_items : int
total number of rows in dataset for given time period
"""
r = request(token, start_date = start_date, end_date = end_date)
total_items = int(r.json()['hydra:totalItems'])
return total_items
def get_vacination():
r = requests.get("https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani-pozitivni-hospitalizovani.csv-metadata.json")
url = "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/" + r.json()["url"]
csv = requests.get("url")
csv = csv.content.decode('utf8')
csv_df = pd.read_csv(io.StringIO(csv))
csv_df.to_parquet('dataz.gzip',compression='gzip')
return csv_df
def duplicates_handling(df, i, P, pdf, total_len, start_date = "1.1.2020", end_date = "24.12.2021"):
"""Search for values that were not downloaded due to duplicates.
The API provides data based on pages - each pages can contain only certain amount of rows (in our case 5000). But we are
downloading dataset with more than 2mil. rows, hence we need to download about 500 pages and merge them together.
Unforunatelly, the data on each page are not exactly ordered and it may happend that same value is on page 1 and page 2,
for example. In other words, the obervations are not entirely fixed to specific row, thus when we request for page 1 many time
we do not get exactly the same results for each request. We solved it by indetifying if there was any duplicates and if yes,
then we iterate for multiple times in neighbourhood of the page untill we get the missed values.
Parameters
----------
df : dataframe
dataframe with covid data
i : int
a page where we curretly are
P : dic
dictionary that stores duplicates P = {page:duplicates}
start_date = str
begining date of the dataset - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end date of the dataset - datum in format "dd.mm.YYYY" (default is "24.12.2021")
Returns
-------
df
returns the dataframe hopefully with more rows
"""
duplicates = pdf + 10000 - len(df) #if len(df), after download of two pages, did not increse by 10000 -> duplicates
#print("duplicates: ", duplicates)
if duplicates > 0:
print("Handling duplicates...")
m = 1 # defiend to prevent infinite while loop
while duplicates > 0: #should handle missing values due to duplicates
if m == 8: #stops, if it does not find it can still happen that whole dataset will be downloaded (sometimes it finds more than is the actual number of duplicates)
if total_len + 10000 - duplicates > i * 5000:
print("succesful")
P[i] = duplicates
else:
print("unsucceseful")
P[i] = duplicates
break
elif m % 2 == 0: #softer force, 5000 rows per page
for j in range(max(i - 2, 1), i + 1):
e = request(token, j, start_date = start_date, end_date = end_date)
df = df.merge(pd.DataFrame.from_dict(e.json()["hydra:member"]), how = "outer").drop_duplicates()
duplicates = pdf + 10000 - len(df)
#print("small", duplicates)
else: #harder force 10000 rows per page
for n in range(max(int(i/2) - 1, 1), int(i/2) + 1):
e = request(token, n, 10000, start_date = start_date, end_date = end_date)
df = df.merge(pd.DataFrame.from_dict(e.json()["hydra:member"]), how = "outer").drop_duplicates()
duplicates = pdf + 10000 - len(df)
#print("big", duplicates)
m += 1
if m < 5:
print("Solved!")
P[i] = duplicates
return df
def saving_interim_results(df, i):
"""
Saves partial downloads of the dataframe to your folder. The saving happens every 50 pages.
It enables the code to run faster as when the part of the dataset is saved it is also drop. The data are
saved as parquet with snappy compression, b/c it is fast to load. So we maximally
work with df of length 280 000. And if your download is interapted you then do not need to start over again and
can begin close to where you stoped.
Parameters
----------
df : dataframe
dataframe with covid data
i : int
a page on which the download is
Returns
-------
df
last 30000 rows of your dataframe (the dataframe is not drop entirely, b/c there might
be duplicaes between pages, so 30000 rows are left)
"""
df.to_parquet('data{a}.parquet'.format(a = int(i/50)), compression = 'snappy')
df = pd.read_parquet('data{a}.parquet'.format(a = int(i/50)), engine = 'fastparquet').iloc[-30000:]
return df
def merging_interim_results(pages_total, intial_df = "1"):
"""
Merges all the interim results created by function saving_interim_results(df, i) into final data set. And attemps
to delete the interim results from your folder. We save the fianl dataset
with .gzip compressino, which should be the best for space limition in parquet.
Parameters
----------
pages_total : int
total number of pages for given period
intial_df : str
a first interim result
Returns
-------
data
the final downloaded dataset
"""
L = list(range(2, int(pages_total/50) + 2)) #list of numbers of saved interim datasets
data = pd.read_parquet('data{a}.parquet'.format(a = intial_df), engine = 'fastparquet')
cwd = os.getcwd()
os.remove(cwd + "/data{a}.parquet".format(a = 1))
for j in L:
data = data.merge(pd.read_parquet('data{a}.parquet'.format(a = j), engine = 'fastparquet'), how = "outer")
try:
cwd = os.getcwd()
os.remove(cwd + "/data{a}.parquet".format(a = j)) #removes saved interim dataset
except:
None
return data
class Covid_Data:
"""A class used to manage covid data - storing, downloading and upadating
...
Attributes
----------
data : pandas data frame
data frame of the covid data
info : dic
dictionary of information regarding covid data in attribute data (total cases(rows), start_date, end_date)
total_pages : int
information regarding how pages needs to be requested from API (loads by calling method get_page(token, items_per_page = 5000))
my_page : int
states on what page is your data set, helpful when only fraction of data were donwloaded
(loads by calling method get_page(token, items_per_page = 5000))
Methods
-------
get_info()
loads info about the covid data in attribute data
get_page(token, items_per_page = 5000)
obtain info about how many pages were downloaded out of total pages (API send the data in pages)
downloader(token, start_page = 1, start_date = "1.1.2020", end_date = "24.12.2021", upd = "N")
downloads covid data from API
updater(token, end_date = date.today())
updates covid data stored in attribute data
"""
def __init__(self):
"""
Parameters
----------
data : int, dataframe
if you already downloaded covid that then input the dataframe, otherwise input 0 - the data can be donwloaded by method download
"""
print("Class initialize, if you want to load data provided by this package - use method load_data() or you can download it on your own using method download(*args, *kwargs) You can access documentation at: "+str(importlib_resources.files("app"))+"/docs/_build/html/index.html")
self.data = 0
self.info = {"total cases": [],
"start_date": [],
"end_date": []}
if isinstance(self.data, int):
print("No data loaded or downloaded.")
else:
print("The provided data were loaded.")
self.get_info()
def load_data(self):
"""
loads data stored in package (from 1.3.2020 - 24.12.2021)
"""
my_resources = importlib_resources.files("app")
path = (str(my_resources) + "/data/datacovid.bz2")
self.data = pd.read_pickle(path, compression='bz2')
self.get_info()
print("Data loaded")
def get_info(self):
"""
loads info about the covid data in attribute data
if no data frame is loaded/downloaded yet it returns empty dictionary
"""
self.info["total cases"] = len(self.data)
self.data.datum = pd.to_datetime(self.data.datum)
self.data = self.data.sort_values(by = ["datum"])
self.info["start_date"] = str(self.data.iloc[1].datum.strftime("%d.%m.%Y"))
self.info["end_date"] = str(self.data.iloc[-1].datum.strftime("%d.%m.%Y"))
def get_page(self, token):
"""
obtain info about how many pages were downloaded out of total pages (API send the data in pages)
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
"""
self.total_pages = get_total_pages(token, start_date = self.info["start_date"], end_date = self.info["end_date"])
self.my_page = int(len(self.data)/5000) + 1
self.to_update = get_total_pages(token, start_date = self.info["start_date"], end_date = date.today())
print("You downloaded " + str(self.my_page) + " pages out of total " + str(self.total_pages) + " pages. \nTo upadte your dataset to today date you need to get total: " + str(self.to_update)+ " pages")
def downloader(self, token, start_page = 1, start_date = "1.1.2020", end_date = "24.12.2021", upd = "N"):
"""
downloads covid data from API
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
start_page : int
declare on page you want to start the download - if you begin then 1, if you already downloaded some part you can resume
but page where you stoped needs to be specialzed, it can be found out througt method get_page() (default is 1)
start_date = str
begining of the covid data - datum in format "dd.mm.YYYY" (default is "1.1.2020")
end_date = str
end of the covid data - datum in format "dd.mm.YYYY" (default is "24.12.2021")
upd = str
only used by updater, irelevant for you (default is "N")
"""
if start_page == 1: #if you begin your download
r_0 = request(token, start_page, start_date = start_date, end_date = end_date)
df = pd.DataFrame.from_dict(r_0.json()["hydra:member"])
total_len = len(df)
pdf = 0
pr_total_len = 0
else: #if you continue from specified page
start_page = int(start_page/50) * 50
df = pd.read_parquet('data1.parquet', engine='fastparquet') #estabilis df, on which will be the merge preformed
for k in range(1, int(start_page/50) + 1): #loads saved interim results
df = df.merge(pd.read_parquet('data{a}.parquet'.format(a = k), engine = 'fastparquet'), how = "outer").drop_duplicates()
pr_total_len = len(df) - int(start_page/50) * 50 * 5000
df = df.iloc[-30000:]
pdf = len(df)
pages_total = get_total_pages(token, start_date = start_date, end_date = end_date)
items_total = get_total_items(token, start_date = start_date, end_date = end_date)
P = {1 : 0} #dict where numbers of dupliates are saved
for i in range(start_page + 1, pages_total + 1): #loop for requesting and actual downloading
r = request(token, i, start_date = start_date, end_date = end_date)
df = df.merge(pd.DataFrame.from_dict(r.json()["hydra:member"]), how = "outer").drop_duplicates()
if i % 2 == 0 or i == pages_total: #every second page we check for duplicates and display progress
df = duplicates_handling(df, i, P, pdf, total_len, start_date = start_date, end_date = end_date)
total_len = i * 5000 - sum(P.values()) + pr_total_len
print("Currently on page " + str(i) + ". Progress: " + str(round((total_len/items_total) * 100, 1)) + " %.")
#print(total_len, i*5000, len(df))
pdf = len(df)
if i % 50 == 0: #every fifthy pages we save current dataset and drop it
#(not whole, keep last 30000 row to check for possible duplicates between page 50 a 51, 100 101 and so on)
df = saving_interim_results(df, i)
pdf = 30000 #we load last 30000 rows to check for duplicates to previous pages
#print(len(df), i * 5000, "ahead/behind: " + str(total_len - i*5000))
#print(P)
df.to_parquet('data{a}.parquet'.format(a = int(i/50)+1), compression = 'snappy')
data = merging_interim_results(pages_total)
if upd == "N": #save with diff name if download
data.to_parquet('datafinal{a}.gzip'.format(a = str("update"+upd)), compression = 'gzip')
self.data = data
else:
data.to_parquet('dataupdate.gzip', compression = 'gzip')
return data
def updater(self, token, end_date = datetime.today()):
"""
updates covid data from API
Parameters
----------
token : str
input token for the API - can be obatained here: https://onemocneni-aktualne.mzcr.cz/vytvorit-ucet
end_date : str, datetime
until what date you want to update the date (default date.today())
"""
if isinstance(end_date, str): #we need correct format of date in string
try:
end_date_dtformat = datetime.strptime(end_date, "%d.%m.%Y")
print("updating...")
except:
print("Incorrect date type, should be DD.MM.YYYY")
else: #if end_date is not filled then we need to transform datetype to str
end_date_dtformat = end_date
end_date = end_date.strftime("%d.%m.%Y")
print("updating...")
if end_date_dtformat < datetime.strptime(self.info["end_date"], "%d.%m.%Y"):
raise ValueError("End date before start date!")
data_new = self.downloader(token, start_page = 1, start_date = self.info["end_date"], end_date = end_date, upd = "Y")
data_new.datum = | pd.to_datetime(data_new.datum) | pandas.to_datetime |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import tqdm
def load_data(index=0):
""" 0: C7
1: C8
2: C9
3: C11
4: C13
5: C14
6: C15
7: C16
Note that C7 and C13 included a short break
(for about 100 timestamps long)
between the two procedure.
"""
fp = os.path.dirname(__file__)
if index == 0:
df = pd.read_csv(fp + '/C7-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C7-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 1:
return pd.read_csv(fp + '/C8.csv.gz')
elif index == 2:
return pd.read_csv(fp + '/C9.csv.gz')
elif index == 3:
return pd.read_csv(fp + '/C11.csv.gz')
elif index == 4:
df = pd.read_csv(fp + '/C13-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C13-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 5:
return pd.read_csv(fp + '/C14.csv.gz')
elif index == 6:
return pd.read_csv(fp + '/C15.csv.gz')
elif index == 7:
return pd.read_csv(fp + '/C16.csv.gz')
else:
raise ValueError
def rename_components(df):
""" current and speed
"""
# Rename L
L_curr = ['L_1', 'L_3', 'L_4', 'L_7', 'L_9']
L_speed = ['L_2', 'L_6', 'L_5', 'L_8', 'L_10']
df = df.rename(columns={k: f'c{i}_curr' for i, k in enumerate(L_curr)})
df = df.rename(columns={k: f'c{i}_speed' for i, k in enumerate(L_speed)})
# Rename A, B, and C
df = df.rename(columns={f'A_{i}': f'c5_val{i}' for i in range(1, 6)})
df = df.rename(columns={f'B_{i}': f'c6_val{i}' for i in range(1, 6)})
df = df.rename(columns={f'C_{i}': f'c7_val{i}' for i in range(1, 6)})
return df[df.columns.sort_values()]
def load_clean_data(index=0):
return rename_components(load_data(index=index))
def set_broken_labels(df, size):
labels = np.zeros(df.shape[0])
labels[-size:] = 1
df['broken'] = labels
return df
def run_to_failure_aux(df, n_sample, desc=''):
seq_len = df.shape[0]
samples = []
pbar = tqdm.tqdm(total=n_sample, desc=desc)
while len(samples) < n_sample:
# random censoring
t = np.random.randint(2, seq_len)
sample = {'lifetime': t, 'broken': df.loc[t, 'broken']}
sample = pd.DataFrame(sample, index=[0])
features = df.iloc[:t].mean(axis=0)[:-1]
sample[features.keys()] = features.values
samples.append(sample)
# break
pbar.update(1)
return | pd.concat(samples, axis=0) | pandas.concat |
import os
import pandas as pd
from typing import Dict, List, Any, Optional, Union
def write_metadata_emb(cats_d: Dict[str, List[Any]],
log_dir: str,
names_d: Optional[
Dict[str, Union[str, pd.DataFrame]]] = None,
) -> Dict[str, str]:
"""Book-keeping and writing of human-readable metadata for embeddings
Args:
cats_d: Dictionary of categories
log_dir: Directory to write metadata to
(should be the same as the log directory of checkpoints etc)
names_d: Dictionary of human-readable labels per element in vocab.
The values can either be a path to a csv file or a dataframe. The
index should be in the same units as stored in `cats_d`. The other
columns will be used as label names (can have multiple columns
for multiple labels).
If `None`, the embedding projector will just use the raw id of the
vocab
Returns:
Dictionary of written metadata paths
"""
metas_written_d = {}
for feat_name, cats in cats_d.items():
path_out = os.path.join(log_dir, f'metadata-{feat_name}.tsv')
if names_d and (feat_name in names_d):
name_path_or_df = names_d[feat_name]
if isinstance(name_path_or_df, str):
names_df = pd.read_csv(
names_d[feat_name], index_col=feat_name)
elif isinstance(name_path_or_df, pd.DataFrame):
names_df = name_path_or_df
else:
raise ValueError('Name mapping must be path or dataframe')
lbls_embs = names_df.reindex(cats).reset_index(drop=True)
lbls_embs.index.name = 'index'
lbls_embs.to_csv(path_out, sep='\t')
else:
# Write single column with just the raw vocab id
lbls_embs = | pd.Series(cats) | pandas.Series |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the dataframe editor.
"""
from __future__ import division
# Standard library imports
import os
import sys
from datetime import datetime
from unittest.mock import Mock, ANY
# Third party imports
from pandas import (DataFrame, date_range, read_csv, concat, Index, RangeIndex,
MultiIndex, CategoricalIndex, Series)
from qtpy.QtGui import QColor
from qtpy.QtCore import Qt, QTimer
import numpy
import pytest
from flaky import flaky
# Local imports
from spyder.utils.programs import is_module_installed
from spyder.utils.test import close_message_box
from spyder.plugins.variableexplorer.widgets import dataframeeditor
from spyder.plugins.variableexplorer.widgets.dataframeeditor import (
DataFrameEditor, DataFrameModel)
# =============================================================================
# Constants
# =============================================================================
FILES_PATH = os.path.dirname(os.path.realpath(__file__))
# =============================================================================
# Utility functions
# =============================================================================
def colorclose(color, hsva_expected):
"""
Compares HSV values which are stored as 16-bit integers.
"""
hsva_actual = color.getHsvF()
return all(abs(a-b) <= 2**(-16) for (a,b) in zip(hsva_actual, hsva_expected))
def data(dfm, i, j):
return dfm.data(dfm.createIndex(i, j))
def bgcolor(dfm, i, j):
return dfm.get_bgcolor(dfm.createIndex(i, j))
def data_header(dfh, i, j, role=Qt.DisplayRole):
return dfh.data(dfh.createIndex(i, j), role)
def data_index(dfi, i, j, role=Qt.DisplayRole):
return dfi.data(dfi.createIndex(i, j), role)
def generate_pandas_indexes():
""" Creates a dictionary of many possible pandas indexes """
return {
'Index': Index(list('ABCDEFGHIJKLMNOPQRST')),
'RangeIndex': RangeIndex(0, 20),
'Float64Index': Index([i/10 for i in range(20)]),
'DatetimeIndex': date_range(start='2017-01-01', periods=20, freq='D'),
'MultiIndex': MultiIndex.from_product(
[list('ABCDEFGHIJ'), ('foo', 'bar')], names=['first', 'second']),
'CategoricalIndex': CategoricalIndex(list('abcaadaccbbacabacccb'),
categories=['a', 'b', 'c']),
}
# =============================================================================
# Tests
# =============================================================================
def test_dataframemodel_index_sort(qtbot):
"""Validate the data in the model for index when sorting."""
ds = Series(numpy.arange(10))
editor = DataFrameEditor(None)
editor.setup_and_check(ds)
index = editor.table_index.model()
index.sort(-1, order=Qt.AscendingOrder)
assert data_index(index, 0, 0, Qt.DisplayRole) == '0'
assert data_index(index, 9, 0, Qt.DisplayRole) == '9'
index.sort(-1, order=Qt.DescendingOrder)
assert data_index(index, 0, 0, Qt.DisplayRole) == '9'
assert data_index(index, 9, 0, Qt.DisplayRole) == '0'
def test_dataframe_to_type(qtbot):
"""Regression test for spyder-ide/spyder#12296"""
# Setup editor
d = {'col1': [1, 2], 'col2': [3, 4]}
df = DataFrame(data=d)
editor = DataFrameEditor()
assert editor.setup_and_check(df, 'Test DataFrame To action')
with qtbot.waitExposed(editor):
editor.show()
# Check editor doesn't have changes to save and select an initial element
assert not editor.btn_save_and_close.isEnabled()
view = editor.dataTable
view.setCurrentIndex(view.model().index(0, 0))
# Show context menu and select option `To bool`
view.menu.show()
qtbot.keyPress(view.menu, Qt.Key_Down)
qtbot.keyPress(view.menu, Qt.Key_Down)
qtbot.keyPress(view.menu, Qt.Key_Return)
# Check that changes where made from the editor
assert editor.btn_save_and_close.isEnabled()
def test_dataframe_datetimeindex(qtbot):
"""Regression test for spyder-ide/spyder#11129 ."""
ds = Series(
numpy.arange(10),
index=date_range('2019-01-01', periods=10))
editor = DataFrameEditor(None)
editor.setup_and_check(ds)
index = editor.table_index.model()
assert data_index(index, 0, 0) == '2019-01-01 00:00:00'
assert data_index(index, 9, 0) == '2019-01-10 00:00:00'
def test_dataframe_simpleindex(qtbot):
"""Test to validate proper creation and handling of a simpleindex."""
df = DataFrame(numpy.random.randn(6, 6))
editor = DataFrameEditor(None)
editor.setup_and_check(df)
header = editor.table_header.model()
assert header.headerData(0, Qt.Horizontal,
Qt.DisplayRole) == "0"
assert header.headerData(1, Qt.Horizontal,
Qt.DisplayRole) == "1"
assert header.headerData(5, Qt.Horizontal,
Qt.DisplayRole) == "5"
def test_dataframe_simpleindex_custom_columns():
"""Test to validate proper creation and handling of custom simpleindex."""
df = DataFrame(numpy.random.randn(10, 5),
columns=['a', 'b', 'c', 'd', 'e'])
editor = DataFrameEditor(None)
editor.setup_and_check(df)
header = editor.table_header.model()
assert header.headerData(0, Qt.Horizontal,
Qt.DisplayRole) == "a"
assert header.headerData(1, Qt.Horizontal,
Qt.DisplayRole) == "b"
assert header.headerData(4, Qt.Horizontal,
Qt.DisplayRole) == "e"
def test_dataframe_multiindex():
"""Test to validate proper creation and handling of a multiindex."""
arrays = [numpy.array(['bar', 'bar', 'baz', 'baz',
'foo', 'foo', 'qux', 'qux']),
numpy.array(['one', 'two', 'one', 'two',
'one', 'two', 'one', 'two'])]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(numpy.random.randn(6, 6), index=index[:6],
columns=index[:6])
editor = DataFrameEditor(None)
editor.setup_and_check(df)
header = editor.table_header.model()
assert header.headerData(0, Qt.Horizontal,
Qt.DisplayRole) == 0
assert data_header(header, 0, 0) == 'bar'
assert data_header(header, 1, 0) == 'one'
assert data_header(header, 0, 1) == 'bar'
assert data_header(header, 1, 1) == 'two'
assert data_header(header, 0, 2) == 'baz'
assert data_header(header, 1, 2) == 'one'
assert data_header(header, 0, 3) == 'baz'
assert data_header(header, 1, 3) == 'two'
assert data_header(header, 0, 4) == 'foo'
assert data_header(header, 1, 4) == 'one'
assert data_header(header, 0, 5) == 'foo'
assert data_header(header, 1, 5) == 'two'
def test_header_bom():
"""Test for BOM data in the headers."""
df = read_csv(os.path.join(FILES_PATH, 'issue_2514.csv'))
editor = DataFrameEditor(None)
editor.setup_and_check(df)
header = editor.table_header.model()
assert header.headerData(0, Qt.Horizontal,
Qt.DisplayRole) == "Date (MMM-YY)"
@pytest.mark.skipif(is_module_installed('pandas', '<0.19'),
reason="It doesn't work for Pandas 0.19-")
def test_header_encoding():
"""Test for header encoding handling."""
df = read_csv(os.path.join(FILES_PATH, 'issue_3896.csv'))
editor = DataFrameEditor(None)
editor.setup_and_check(df)
header = editor.table_header.model()
assert header.headerData(0, Qt.Horizontal,
Qt.DisplayRole) == "Unnamed: 0"
assert "Unieke_Idcode" in header.headerData(1, Qt.Horizontal,
Qt.DisplayRole)
assert header.headerData(2, Qt.Horizontal,
Qt.DisplayRole) == "a"
assert header.headerData(3, Qt.Horizontal,
Qt.DisplayRole) == "b"
assert header.headerData(4, Qt.Horizontal,
Qt.DisplayRole) == "c"
assert header.headerData(5, Qt.Horizontal,
Qt.DisplayRole) == "d"
def test_dataframemodel_basic():
df = DataFrame({'colA': [1, 3], 'colB': ['c', 'a']})
dfm = DataFrameModel(df)
assert dfm.rowCount() == 2
assert dfm.columnCount() == 2
assert data(dfm, 0, 0) == '1'
assert data(dfm, 0, 1) == 'c'
assert data(dfm, 1, 0) == '3'
assert data(dfm, 1, 1) == 'a'
def test_dataframemodel_sort():
"""Validate the data in the model."""
df = DataFrame({'colA': [1, 3], 'colB': ['c', 'a']})
dfm = DataFrameModel(df)
dfm.sort(1)
assert data(dfm, 0, 0) == '3'
assert data(dfm, 1, 0) == '1'
assert data(dfm, 0, 1) == 'a'
assert data(dfm, 1, 1) == 'c'
def test_dataframemodel_sort_is_stable(): # cf. spyder-ide/spyder#3010.
"""Validate the sort function."""
df = DataFrame([[2,14], [2,13], [2,16], [1,3], [2,9], [1,15], [1,17],
[2,2], [2,10], [1,6], [2,5], [2,8], [1,11], [1,1],
[1,12], [1,4], [2,7]])
dfm = DataFrameModel(df)
dfm.sort(1)
dfm.sort(0)
col2 = [data(dfm, i, 1) for i in range(len(df))]
assert col2 == [str(x) for x in [1, 3, 4, 6, 11, 12, 15, 17,
2, 5, 7, 8, 9, 10, 13, 14, 16]]
def test_dataframemodel_max_min_col_update():
df = DataFrame([[1, 2.0], [2, 2.5], [3, 9.0]])
dfm = DataFrameModel(df)
assert dfm.max_min_col == [[3, 1], [9.0, 2.0]]
def test_dataframemodel_max_min_col_update_constant():
df = | DataFrame([[1, 2.0], [1, 2.0], [1, 2.0]]) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import numpy as np
import multiprocessing
import argparse
import operator
import os
import random
import sys
import time
import random
import subprocess
import pysam
import collections
import warnings
import math
import re
from Bio import SeqIO
base_path = os.path.split(__file__)[0]
def fragment_distribution(samfile):
all_reads = samfile.fetch()
size_freq = collections.defaultdict(int)
for read in all_reads:
if read.rnext == read.tid and read.is_paired:
size = abs(read.isize)
size_freq[size] += 1
return size_freq
def FragMAD(freq):
"""
calculate median and median absolute deviation fragment size distribution
"""
all_size = []
for key, value in freq.items():
all_size.extend([key] * int(value))
median_size = np.median(all_size)
residuals = abs(np.array(all_size) - median_size)
mad_size = 1.4826 * np.median(residuals)
return median_size, mad_size
def split_sam(args):
split_command = ' '.join(['sh',
os.path.join(base_path, "split_sam.sh"),
args.assemblies,
args.bamfile,
args.output,
args.samtools])
os.system(split_command)
def seq_parse(args):
input = SeqIO.parse(args.assemblies, "fasta")
contig_seqs = {}
for record in input:
if len(record.seq) >= args.min_length:
contig_seqs[record.id] = str(record.seq)
return contig_seqs
def kmer_parse(seq, pool):
seq_kmer = {"position": [], "KAD": []}
for i in range(len(seq)):
if seq[i:(i + 25)] in pool:
seq_kmer["KAD"].append(pool[seq[i:(i + 25)]])
seq_kmer["position"].append(i + 1)
if (i + 25) >= len(seq):
break
return seq_kmer
def KAD_window_cal(seq_kmer):
KAD_window_dict = {"start_pos": [],
"mean_KAD": [],
"abnormal_KAD_ratio": [],
"dev_KAD": []}
for i in range(300, len(seq_kmer['position']), 100):
KAD_window_dict["start_pos"].append(i)
mean_KAD = np.mean(np.abs(seq_kmer['KAD'][i:i + 100]))
KAD_window_dict["mean_KAD"].append(mean_KAD)
KAD_window_dict["abnormal_KAD_ratio"].append(
np.sum(np.abs(seq_kmer['KAD'][i:i + 100]) > 0.5) / 100)
KAD_window_dict["dev_KAD"].append(
np.sqrt(np.var(np.abs(seq_kmer['KAD'][i:i + 100]))))
return KAD_window_dict
def KAD_feature(args):
seq_data = seq_parse(args)
KAD_dict = {"contig": [],
'start_pos': [],
'mean_KAD': [],
'abnormal_KAD_ratio': [],
'dev_KAD': []}
for contig, seq in seq_data.items():
if len(seq) < args.min_length:
continue
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig)))):
try:
KAD_data = pd.read_csv(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig))), index_col=0, sep="\t")
KAD_data = KAD_data.drop_duplicates(['k-mer'])
except BaseException:
continue
KAD_data.index = KAD_data['k-mer']
KAD_pool = KAD_data.loc[:, 'KAD'].to_dict()
seq_kmer = kmer_parse(seq, KAD_pool)
KAD_window = KAD_window_cal(seq_kmer)
KAD_dict["contig"].extend([contig] * len(KAD_window['start_pos']))
KAD_dict["start_pos"].extend(KAD_window['start_pos'])
KAD_dict["mean_KAD"].extend(KAD_window["mean_KAD"])
KAD_dict["abnormal_KAD_ratio"].extend(
KAD_window["abnormal_KAD_ratio"])
KAD_dict["dev_KAD"].extend(KAD_window["dev_KAD"])
return KAD_dict
def KAD(args, contig, file):
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
str(contig), ".KAD")):
return 0
contig_file = os.path.join(args.output, "temp/split/contigs/", "{}.fa".format(file))
read_file = os.path.join(args.output,
"temp/split/reads/{}.read.fa".format(str(contig)))
# kmer count
outputdir = os.path.join(args.output, "temp/KAD/temp")
contig_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.jf'.format(str(contig))),
"-s 100M -t 8",
contig_file])
contig_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.txt'.format(str(contig))),
os.path.join(outputdir, '{}.jf'.format(str(contig)))])
os.system(contig_command1)
os.system(contig_command2)
read_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.read.jf'.format(str(contig))),
"-s 100M -t 8",
read_file])
read_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.read.txt'.format(str(contig))),
os.path.join(outputdir, '{}.read.jf'.format(str(contig)))])
os.system(read_command1)
os.system(read_command2)
assembly_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.txt".format(str(contig))), sep="\t", header=None)
assembly_kmer.index = assembly_kmer[0]
try:
read_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.read.txt".format(str(contig))),
sep="\t", header=None)
read_kmer.index = read_kmer[0]
except BaseException:
# zero reads mapped to contig
return 0
shared_kmer = set(assembly_kmer.loc[assembly_kmer[1] == 1, 0]).intersection(read_kmer.index)
if len(shared_kmer) == 0:
kmer_depth = pd.value_counts(read_kmer.loc[read_kmer[1] > 5, 1]).index[0]
else:
kmer_depth = pd.value_counts(read_kmer.loc[shared_kmer, ][1]).index[0]
assembly_kmer.columns = ['k-mer', 'assembly_count']
read_kmer.columns = ['k-mer', 'read_count']
assembly_kmer.index = range(assembly_kmer.shape[0])
read_kmer.index = range(read_kmer.shape[0])
kmer_result = pd.merge(assembly_kmer, read_kmer, how='outer')
kmer_result = kmer_result.fillna(0)
kmer_result['KAD'] = np.log2((kmer_result['read_count'] + kmer_depth)
/ (kmer_depth * (kmer_result['assembly_count'] + 1)))
kmer_result.loc[(kmer_result['read_count'] == 1) *
(kmer_result['assembly_count'] == 0), 'KAD'] = np.nan
kmer_result = kmer_result.loc[kmer_result['KAD'] == kmer_result['KAD'], ]
kmer_result.loc[:, ['k-mer', 'KAD']].to_csv(
os.path.join(args.output, "temp/KAD/KAD_data/", "{}.KAD".format(str(contig))), sep="\t")
def fragment_coverage_cal(reads, mu, dev, length):
"""
calculate fragment coverage per contig
"""
frag_coverage = np.array([0] * length)
for read in reads:
if read.rnext == read.tid and read.is_proper_pair:
size = abs(read.isize)
if (mu - 3 * dev <= size <= mu + 3 * dev):
if read.next_reference_start < read.reference_start:
start = min(read.next_reference_start,
read.reference_start,
read.reference_end)
end = start + size
frag_coverage[start:end] += 1
return frag_coverage
def window_read_cal(reads, mu, dev):
read_dict = {"start_pos": [], "read_count": [], "proper_read_count": [], "inversion_read_count": [], "clipped_read_count": [],
"supplementary_read_count": [], "discordant_size_count": [], "discordant_loc_count": []}
read_temp = {"num_read": 0, "num_proper": 0, "num_inversion": 0, "num_clipped": 0, "num_supplementary": 0, "num_discordant_size": 0,
"num_discordant_loc": 0}
pos = 0
for read in reads:
new_pos = math.floor((read.reference_start - 300) / 100) * 100 + 300
if read.reference_start < 300:
continue
if pos == 0:
pos = new_pos
elif new_pos != pos:
read_dict["start_pos"].append(pos)
read_dict["read_count"].append(read_temp["num_read"])
read_dict["proper_read_count"].append(read_temp["num_proper"])
read_dict["inversion_read_count"].append(
read_temp["num_inversion"])
read_dict["clipped_read_count"].append(read_temp["num_clipped"])
read_dict["supplementary_read_count"].append(
read_temp["num_supplementary"])
read_dict["discordant_size_count"].append(
read_temp["num_discordant_size"])
read_dict["discordant_loc_count"].append(
read_temp["num_discordant_loc"])
read_temp = {"num_read": 0,
"num_proper": 0,
"num_inversion": 0,
"num_clipped": 0,
"num_supplementary": 0,
"num_discordant_size": 0,
"num_discordant_loc": 0}
pos = new_pos
read_temp["num_read"] += 1
if read.is_paired:
if read.rnext == read.tid:
if read.is_proper_pair:
read_temp["num_proper"] += 1
if (read.is_reverse + read.mate_is_reverse) != 1:
read_temp["num_inversion"] += 1
if not mu - 3 * dev <= abs(read.isize) <= mu + 3 * dev:
read_temp["num_discordant_size"] += 1
else:
read_temp["num_discordant_loc"] += 1
if read.get_cigar_stats()[0][4] > 20:
read_temp["num_clipped"] += 1
if (read.is_supplementary and read.get_cigar_stats()[0][5] > 20):
read_temp["num_supplementary"] += 1
return read_dict
def window_frag_cal(coverage):
"""
Using sliding window approach to smooth out features
"""
coverage = np.array(coverage)
cov = {"pos": [], "coverage": [], "deviation": []}
for i in range(300, len(coverage), 100):
start = i
end = i + 100
cov["coverage"].append(np.mean(coverage[start:end]))
cov["deviation"].append(
np.sqrt(np.var(coverage[start:end])) / np.mean(coverage[start:end]))
cov["pos"].append(start)
if len(coverage) - end <= 300:
break
return cov
def contig_pool(samfile):
contig_len = {}
for (ref, lens) in zip(samfile.references, samfile.lengths):
contig_len[ref] = lens
return contig_len
def pileup_window_cal(pileup_dict):
window_dict = {"contig": [], "start_pos": [], "correct_portion": [], "ambiguous_portion": [], "disagree_portion": [],
"deletion_portion": [], "insert_portion": [], "coverage": [], "deviation": []}
for i in range(300, len(pileup_dict['correct']), 100):
start = i
end = i + 100
total = np.sum(pileup_dict['depth'][start:end])
window_dict["contig"].append(pileup_dict["contig"][0])
window_dict["start_pos"].append(start)
window_dict["correct_portion"].append(
np.sum(pileup_dict['correct'][start:end]) / total)
window_dict["ambiguous_portion"].append(
np.sum(pileup_dict["ambiguous"][start:end]) / total)
window_dict["insert_portion"].append(
np.sum(pileup_dict['insert'][start:end]) / total)
window_dict["deletion_portion"].append(
np.sum(pileup_dict['deletion'][start:end]) / total)
window_dict["disagree_portion"].append(
np.sum(pileup_dict['disagree'][start:end]) / total)
window_dict["coverage"].append(
np.mean(pileup_dict["depth"][start:end]))
window_dict["deviation"].append(np.sqrt(np.var(
pileup_dict["depth"][start:end])) / np.mean(pileup_dict["depth"][start:end]))
if len(pileup_dict['correct']) - (i + 100) <= 300:
break
return window_dict
def read_breakpoint_per_contig(samfile, ref, lens):
reads = samfile.fetch(contig=ref)
break_count = {"breakcount": np.array([0] * lens),
"readcount": np.array( [0] * lens)}
for read in reads:
ref_end = read.reference_end
ref_start = read.reference_start
read_start = read.query_alignment_start
read_end = read.query_alignment_end
break_count["readcount"][ref_start:ref_end] += 1
if read.is_supplementary:
if re.match('^([0-9]+H)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
else:
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
if read.get_cigar_stats()[0][4] > 0:
if re.match('^([0-9]+S)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
if (read.cigarstring).endswith('S'):
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
data = | pd.DataFrame(break_count) | pandas.DataFrame |
from data_fetchers.remc_fetchers import fetchFcaDayAheadDf, fetchFcaForeVsActDf, fetchFcaForeVsActPrevDf, fetchIftDayAheadDf, fetchIftForeVsActDf, fetchAleaDayAheadDf, fetchAleaForeVsActDf, fetchEnerDayAheadDf, fetchEnerForeVsActDf, fetchResDayAheadDf, fetchResForeVsActDf
import pandas as pd
from operator import add
# constants for data store names
FCA_DAY_AHEAD_STORE_NAME = 'fcaDayAheadStore'
FCA_FORECAST_VS_ACTUAL_STORE_NAME = 'fcaForecastVsActualStore'
FCA_FORECAST_VS_ACTUAL_PREV_STORE_NAME = 'fcaForecastVsActualPrevStore'
IFT_DAY_AHEAD_STORE_NAME = 'iftDayAheadStore'
IFT_FORECAST_VS_ACTUAL_STORE_NAME = 'iftForecastVsActualStore'
ALEA_DAY_AHEAD_STORE_NAME = 'aleaDayAheadStore'
ALEA_FORECAST_VS_ACTUAL_STORE_NAME = 'aleaForecastVsActualStore'
ENER_DAY_AHEAD_STORE_NAME = 'enerDayAheadStore'
ENER_FORECAST_VS_ACTUAL_STORE_NAME = 'enerForecastVsActualStore'
RES_DAY_AHEAD_STORE_NAME = 'resDayAheadStore'
RES_FORECAST_VS_ACTUAL_STORE_NAME = 'resForecastVsActualStore'
def loadRemcDataStore(storeName):
global g_fcaDayAheadDf
global g_fcaForecastVsActual
global g_fcaForecastVsActualPrev
global g_iftDayAheadDf
global g_iftForecastVsActual
global g_aleaDayAheadDf
global g_aleaForecastVsActual
global g_enerDayAheadDf
global g_enerForecastVsActual
global g_resDayAheadDf
global g_resForecastVsActual
if storeName == FCA_DAY_AHEAD_STORE_NAME:
g_fcaDayAheadDf = fetchFcaDayAheadDf()
elif storeName == FCA_FORECAST_VS_ACTUAL_STORE_NAME:
g_fcaForecastVsActual = fetchFcaForeVsActDf()
elif storeName == FCA_FORECAST_VS_ACTUAL_PREV_STORE_NAME:
g_fcaForecastVsActualPrev = fetchFcaForeVsActPrevDf()
elif storeName == IFT_DAY_AHEAD_STORE_NAME:
g_iftDayAheadDf = fetchIftDayAheadDf()
elif storeName == IFT_FORECAST_VS_ACTUAL_STORE_NAME:
g_iftForecastVsActual = fetchIftForeVsActDf()
elif storeName == ALEA_DAY_AHEAD_STORE_NAME:
g_aleaDayAheadDf = fetchAleaDayAheadDf()
elif storeName == ALEA_FORECAST_VS_ACTUAL_STORE_NAME:
g_aleaForecastVsActual = fetchAleaForeVsActDf()
elif storeName == ENER_DAY_AHEAD_STORE_NAME:
g_enerDayAheadDf = fetchEnerDayAheadDf()
elif storeName == ENER_FORECAST_VS_ACTUAL_STORE_NAME:
g_enerForecastVsActual = fetchEnerForeVsActDf()
elif storeName == RES_DAY_AHEAD_STORE_NAME:
g_resDayAheadDf = fetchResDayAheadDf()
elif storeName == RES_FORECAST_VS_ACTUAL_STORE_NAME:
g_resForecastVsActual = fetchResForeVsActDf()
def deleteRemcDataStore(storeName):
global g_fcaDayAheadDf
global g_fcaForecastVsActual
global g_fcaForecastVsActualPrev
global g_iftDayAheadDf
global g_iftForecastVsActual
global g_aleaDayAheadDf
global g_aleaForecastVsActual
global g_enerDayAheadDf
global g_enerForecastVsActual
global g_resDayAheadDf
global g_resForecastVsActual
if storeName == FCA_DAY_AHEAD_STORE_NAME:
g_fcaDayAheadDf = pd.DataFrame()
elif storeName == FCA_FORECAST_VS_ACTUAL_STORE_NAME:
g_fcaForecastVsActual = pd.DataFrame()
elif storeName == FCA_FORECAST_VS_ACTUAL_PREV_STORE_NAME:
g_fcaForecastVsActualPrev = pd.DataFrame()
elif storeName == IFT_DAY_AHEAD_STORE_NAME:
g_iftDayAheadDf = pd.DataFrame()
elif storeName == IFT_FORECAST_VS_ACTUAL_STORE_NAME:
g_iftForecastVsActual = pd.DataFrame()
elif storeName == ALEA_DAY_AHEAD_STORE_NAME:
g_aleaDayAheadDf = pd.DataFrame()
elif storeName == ALEA_FORECAST_VS_ACTUAL_STORE_NAME:
g_aleaForecastVsActual = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xlrd
import openpyxl
import unicodecsv as csv
from math import log10, floor
from pandas.api.types import is_string_dtype
import pandas as pd
import numpy as np
import six
import six.moves
import orjson as json
from plaidcloud.rpc import utc
from plaidcloud.rpc.connection.jsonrpc import SimpleRPC
from plaidcloud.rpc.rpc_connect import Connect
from plaidcloud.utilities.query import Connection, Table
from plaidcloud.utilities import data_helpers as dh
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2013-2021, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
CSV_TYPE_DELIMITER = '::'
class ContainerLogger(object):
def info(self, msg):
print(msg, file=sys.stderr)
def debug(self, msg):
self.info(msg)
def exception(self, msg=None):
print(traceback.format_exc(), file=sys.stderr)
if msg is not None:
print(msg, file=sys.stderr)
logger = ContainerLogger()
def sql_from_dtype(dtype):
"""Returns a sql datatype given a pandas datatype
Args:
dtype (str): The pandas datatype to convert
Returns:
str: the equivalent SQL datatype
Examples:
>>> sql_from_dtype('bool')
'boolean'
>>> sql_from_dtype('float64')
'numeric'
>>> sql_from_dtype('number')
'numeric'
>>> sql_from_dtype('varchar(123)')
'text'
>>> sql_from_dtype('char(3)')
'text'
>>> sql_from_dtype('xml')
'text'
>>> sql_from_dtype('bytea')
'largebinary'
"""
mapping = {
'bool': 'boolean',
'boolean': 'boolean',
's8': 'text',
's16': 'text',
's32': 'text',
's64': 'text',
's128': 'text',
's256': 'text',
'object': 'text',
's512': 'text',
's1024': 'text',
'text': 'text',
'string': 'text',
'int8': 'smallint', # 2 bytes
'int16': 'integer',
'smallint': 'smallint',
'int32': 'integer', # 4 bytes
'integer': 'integer',
'int64': 'bigint', # 8 bytes
'bigint': 'bigint',
'float8': 'numeric',
'float16': 'numeric', # variable but ensures precision
'float32': 'numeric', # variable but ensures precision
'float64': 'numeric', # variable but ensures precision
'numeric': 'numeric',
'serial': 'serial',
'bigserial': 'bigserial',
'datetime64[s]': 'timestamp', # This may have to cover all datettimes
'datetime64[d]': 'timestamp',
'datetime64[ns]': 'timestamp',
'timestamp': 'timestamp',
'timestamp without time zone': 'timestamp',
'timedelta64[s]': 'interval', # This may have to cover all timedeltas
'timedelta64[d]': 'interval',
'timedelta64[ns]': 'interval',
'interval': 'interval',
'date': 'date',
'time': 'time',
'binary': 'largebinary',
'bytea': 'largebinary',
'largebinary': 'largebinary',
'xml': 'text',
'uuid': 'text',
'money': 'numeric',
'real': 'numeric',
'json': 'text',
'cidr': 'text',
'inet': 'text',
'macaddr': 'text',
}
dtype = str(dtype).lower()
if dtype.startswith('num'):
dtype = 'numeric'
elif 'char' in dtype:
dtype = 'text'
return mapping[dtype]
def save_typed_psv(df, outfile, sep='|', **kwargs):
"""Saves a typed psv, from a pandas dataframe. Types are analyze compatible
sql types, written in the header, like {column_name}::{column_type}, ...
Args:
df (`pandas.DataFrame`): The dataframe to create the psv from
outfile (file object or str): The path to save the output file to
sep (str, optional): The separator to use in the output file
"""
# ADT2017: _write_copy_from did something special with datetimes, but I'm
# not sure it's necessary, so I'm leaving it out.
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
column_names = [cleaned(n) for n in list(df)]
column_types = [sql_from_dtype(d) for d in df.dtypes]
header = [
CSV_TYPE_DELIMITER.join((name, sqltype))
for name, sqltype in six.moves.zip(column_names, column_types)
]
df.to_csv(outfile, header=header, index=False, sep=sep)
def list_of_dicts_to_typed_psv(lod, outfile, types, fieldnames=None, sep='|'):
""" Saves a list of dicts as a typed psv. Needs a dict of sql types. If
provided, fieldnames will specify the column order.
Args:
lod (:type:`list` of :type:`dict`): The list of dicts containing the data
to use to create the psv
outfile (str): The path to save the output file to, including file name
types (dict): a dict with column names as the keys and column datatypes as
the values
fieldnames (:type:`list` of :type:`str`, optional): A list of the field names.
If none is provided, defaults to the keys in `types`
sep (str): The separator to use in the output file
"""
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
header = {
name: CSV_TYPE_DELIMITER.join((cleaned(name), sqltype))
for name, sqltype in types.items()
}
if fieldnames is None:
# Caller doesn't care about the order
fieldnames = list(types.keys())
if isinstance(outfile, six.string_types):
buf = open(outfile, 'wb')
else:
buf = outfile
try:
writer = csv.DictWriter(buf, fieldnames=fieldnames, delimiter=sep)
writer.writerow(header) # It's not just the keys, so we're not using writeheader
for row in lod:
writer.writerow(row)
finally:
if isinstance(outfile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def get_project_variables(token, uri, project_id):
"""It opens a connection to Analyze and then
gets vars for a given project
Args:
token (str): oAuth token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
project_id (str): Id of the Project for which to grab the variables
Returns:
dict: Variables as key/values
"""
rpc = SimpleRPC(token, uri, verify_ssl=True)
try:
project_vars = rpc.analyze.project.variables(project_id=project_id)
except:
project_vars = rpc.analyze.project.variables(project=project_id)
return {pv['id']: pv['value'] for pv in project_vars}
def download(tables, configuration=None, retries=5, conn=None, clean=False, **kwargs):
"""This replaces the old get_tables() that was client-specific.
It opens a connection to Analyze and then
accepts a set of tables and saves them off to a local location.
For now, tables are understood to be typed psv's, but that can expand to
suit the need of the application (for instance, Excel.)
Args:
tables (set or list): table paths to retrieve (for backwards compatibility, you can leave off the initial '/')
token (str): token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
local_storage_path (str): local path where files should be saved. Would typically originate
from a local config.
**kwargs:
config (dict) contains a dict of config settings
token (str) simpleRFC authorization token
uri (str): uri e.g. 'https://ci.plaidcloud.com/json-rpc/'
local_storage_path (str) Target for files being saved
Returns:
The return value of function. If retries are exhausted, raises the
final Exception.
Examples:
"""
# TODO: if configuration is None, revert to **kwargs for the params we need.
if not conn:
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
try:
return_df = configuration['return_df']
except:
return_df = True
try:
project_id = configuration['project_id']
except:
project_id = conn.project_id
dfs = []
for table in tables:
table_path = table.get('table_name')
query = table.get('query')
table_obj = table.get('table_object')
df = None # Initial value
# wipe this out each time through
clean_df = pd.DataFrame()
logger.debug("Attempting to download {0}...".format(table_path))
tries = 1
if table_obj is not None:
# RPC table object exists; proceed to use it to fetch data
while tries <= retries:
if query is None:
# no query passed. fetch whole table
df = conn.get_dataframe(table_obj, clean=clean)
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
elif isinstance(query, six.string_types):
# query object passed in. execute it
try:
df = conn.get_dataframe_by_querystring(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
else:
# query object passed in. execute it
try:
df = conn.get_dataframe_by_query(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
tries += 1
columns = table_obj.cols()
if columns:
if isinstance(df, pd.core.frame.DataFrame):
cols = [c['id'] for c in columns if c['id'] in df.columns.tolist()]
df = df[cols] # this ensures that the column order is as expected
else:
cols = [c['id'] for c in columns]
df = pd.DataFrame(columns=cols) # create empty dataframe with expected metadata/shape
else:
if not table_path.startswith('/'):
table_path = '/{}'.format(table_path)
table_result = None
while not table_result and tries <= retries:
tries += 1
try:
table_result = conn.analyze.table.table(project_id=project_id, table_path=table_path)
logger.debug("Downloaded {0}...".format(table_path))
break
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
df = table_result_to_df(table_result or pd.DataFrame())
if not isinstance(df, pd.core.frame.DataFrame):
logger.exception('Table {0} failed to download!'.format(table_path))
elif len(df.columns) == 0:
logger.exception('Table {0} downloaded 0 records!'.format(table_path))
else:
if clean and query:
# Use the old cleaning process for things other than the full query.
clean_df = dh.clean_frame(df)
else:
clean_df = df
dfs.append({'df': clean_df, 'name': table_path})
return dfs
def load(source_tables, fetch=True, cache_locally=False, configuration=None, conn=None, clean=False):
"""Load frame(s) from requested source, returning a list of dicts
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, typed_df[col]))
return typed_df
def dwim_save(df, name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, saves a dataframe as an analyze table.
Otherwise saves it as a typed psv in localdir.
Args:
df (`pandas.DataFrame`): The dataframe to save
name (str): The name to save this dataframe as
localdir (str, optional): The local path to save the typed psv
lvl (str, optional): What level (project/model) the table should be
extension (str, optional): What file extension to give the output file
sep (str, optional): The separator to use in the output file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import save, save_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
save_fn = {
'model': save,
'project': save_project,
}[lvl]
save_fn(df, name)
except ImportError:
# We must not be on an app server, so save as typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
save_typed_psv(df, path, sep)
def dwim_load(name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, loads an analyze table.
Otherwise loads a typed psv from localdir.
Args:
name (str): The name of the table or file to load
localdir (str, optional): The path to the directory where the local file is stored
lvl (str, optional): The level (model/project) of the table to load
extension (str, optional): The flie extension of the local file
sep (str, optional): The separator used in the local file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import load, load_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
load_fn = {
'model': load,
'project': load_project,
}[lvl]
return load_fn(name)
except ImportError:
# We must not be on an app server, so load from typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
return load_typed_psv(path, sep)
def clean_uuid(id):
"""Removes any invalid characters from a UUID and ensures it is 32 or 36 characters
Args:
id (str): The ID to clean
Returns:
str: `id` with any invalid characters removed
"""
# !! WARNING: If you're calling this in new code, make sure it's really what you
# !! want. It used to remove dashes. That turned out to be a bad idea. Now
# !! it leaves dashes in.
#
# !! If you've found a bug related to dashes being left in, and this is
# !! being called on lookup, you should probably just remove the call to
# !! clean_uuid. Going forward, we don't remove dashes.
if id is None:
return None
name = six.text_type(id).lower()
valid_chars = '0123456789abcdef-'
cleaned_id = u''.join(n for n in name if n in valid_chars)
if '-' in cleaned_id:
if len(cleaned_id) != 36:
raise Exception("Could not clean id {}. Not 36 characters long.".format(id))
else:
if len(cleaned_id) != 32:
raise Exception("Could not clean id {}. Not 32 characters long.".format(id))
return cleaned_id
def clean_name(name):
"""
DEPRECATED: does nothing
Removes any invalid characters from a name and limits it to 63 characters
Args:
name (str): The name to clean
Returns:
str: The cleaned version of `name`
"""
return name
def clean_filename(name):
"""Remove '/' from a name
Args:
name (str): the filename to clean
Returns:
str: the cleaned version of `name`
"""
if name is None:
return None
# everything's fine except /
return six.text_type(name).translate({'/': None})
def describe(df):
"""Shorthand for df.describe()
Args:
df (`pandas.DataFrame`): The dataframe to describe
Returns:
summary: Series/DataFrame of summary statistics
"""
return df.describe()
def unique_values(df, column):
"""Returns unique values in the provided column
Args:
df (`pandas.DataFrame`): The DataFrame containing data
column (str): The column to find unique values in
Returns:
list: The unique values in the column
"""
return df[column].unique()
def count_unique(group_by, count_column, df):
"""Returns a count of unique items in a dataframe
Args:
group_by (str): The group by statement to apply to the dataframe
count_column (str): The column to count unique records in
df (`pandas.DataFrame`): The DataFrame containing the data
Returns:
int: The count of unique items in the specified column after grouping
"""
return df.groupby(group_by)[count_column].apply(lambda x: len(x.unique()))
def sum(group_by, df):
return df.groupby(group_by).sum()
def std(group_by, df):
return df.groupby(group_by).std()
def mean(group_by, df):
return df.groupby(group_by).mean()
def count(group_by, df):
return df.groupby(group_by).count()
def inner_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps only matches
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='inner')
def outer_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps data from both frames and matches up using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='outer')
def left_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from left frame and any matches in right using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='left')
def right_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from right frame and any matches in left using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='right')
def anti_join(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from left frame that is not found in right frame
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
indicator_status = False
indicator_name = '_merge'
left_cols = left_frame.columns
# avoid collision with pd generated indicator name
while not indicator_status:
if indicator_name in left_cols:
indicator_name = '_' + indicator_name
else:
indicator_status = True
df = pd.merge(left_frame, right_frame[right_on], how='left', left_on=left_on, right_on=right_on, indicator=indicator_name)
df = df[df[indicator_name] == 'left_only']
del df[indicator_name]
return df
def compare(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from right frame and any matches in left using the on_columns"""
#20180420 PBB Is "compare" a good name for this, it's basically a right-join in SQL terms?
#20180420 MWR It's quite old legacy. Not sure this one has ever been used for anything. Perhaps
# we can just do away with it.
if right_on is None:
right_on = left_on
return pd.merge(left_frame, right_frame, left_on=left_on, right_on=right_on, how='outer')
def apply_rule(df, rules, target_columns=None, include_once=True, show_rules=False):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
rules (list): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
Returns:
pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_final = pd.DataFrame()
df['temp_index'] = df.index
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
# Establish new column(s) as blank columns.
for column in target_columns:
df[column] = ''
def exclude_matched(include, match):
"""
Exclude if matched, or if previously excluded
Please do not change the 'if match is True:' line to 'if match:'. It matters here.
"""
return False if match is True else include
rule_num = 0
for rule in rules:
rule_num = rule_num + 1
rule_condition = rule.get('condition')
# Find subset based on condition
if rule_condition is not None and rule_condition != '' and str(rule_condition) != 'nan':
try:
df_subset = df[df['include'] == True].query(rule_condition, engine='python')
print('subset length: {}'.format(len(df[df['include'] == True])))
if show_rules:
df_subset['rule_number'] = str(rule_num)
df_subset['rule'] = str(rule_condition)
except Exception as e:
# TODO update this. We should capture all exceptions in an exception table.
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule_condition format param from here
if show_rules:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(rule_num, rule_condition, e)
logger.exception('EXCEPTION {}'.format(error_msg))
else:
df_subset = df[df['include'] == True]
# Populate target columns as specified in split
for column in target_columns:
df_subset[column] = rule[column]
# need to find a way to flip the flag once data has been selected
if include_once:
# Exclude the records of the current split from exposure to
# subsequent filters.
#if statement handles edge case where df is empty and has no columns.
if 'temp_index' in df_subset.columns:
#refactor to be m*1 not m*n.
df_subset['match'] = True
df = lookup(
df,
df_subset,
left_on=['temp_index'],
right_on=['temp_index'],
keep_columns=['match']
)
df['include'] = list(map(exclude_matched, df['include'], df['match']))
del df['match']
# The way we're doing this allows multiple matches
# if include_once is false.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
df_final = pd.concat([df_final, df_subset])
print('length:{}'.format(len(df_subset)))
df_final.drop(columns=['temp_index', 'include'], inplace=True, errors='ignore')
return df_final
def apply_rules(df, df_rules, target_columns=None, include_once=True, show_rules=False,
verbose=True, unmatched_rule='UNMATCHED', condition_column='condition', iteration_column='iteration',
rule_id_column=None, logger=logger):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
df_rules (pandas.DataFrame): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
verbose (bool, optional): Display the rules in the log messages? Defaults
to `True`. This is not overly heavier than leaving it off, so we probably should
always leave it on unless logging is off altogether.
unmatched_rule (str, optional): Default rule to write in cases of records not matching any rule
condition_column (str, optional): Column name containing the rule condition, defaults to 'condition'
rule_id_column (str, optional): Column name containing the rule id, just set to index if not provided
logger (object, optional): Logger to record any output
Returns:
list of pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_rules = df_rules.reset_index(drop=True)
if iteration_column not in df_rules.columns:
df_rules[iteration_column] = 1
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
df['rule_id'] = '' if rule_id_column else df.index
# Establish new column(s) as blank columns <i>if they do not already exist.</i>
for column in target_columns:
if column not in df.columns:
df[column] = ''
summary = []
iterations = list(set(df_rules[iteration_column]))
iterations.sort()
for iteration in iterations:
df['include'] = True
def write_rule_numbers(rule_num):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_num == '':
return str(index)
else:
return '{}, {}'.format(rule_num, str(index))
def write_rule_conditions(condition):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if condition == '':
return str(rule[condition_column])
else:
return '{}, {}'.format(condition, str(rule[condition_column]))
def write_rule_id(rule_id):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_id == '':
return str(rule[rule_id_column])
else:
return '{}, {}'.format(rule_id, str(rule[rule_id_column]))
matches = [] # for use when include_once is False
index = 0
for index, rule in df_rules[df_rules[iteration_column] == iteration].iterrows():
# Find subset based on condition
df_subset = df[df['include'] == True]
input_length = len(df_subset)
if include_once is True and input_length == 0:
break
if verbose:
logger.debug('')
logger.debug('iteration:{} - rule:{} - {}'.format(iteration, index, rule[condition_column]))
if rule[condition_column] is not None and rule[condition_column] != '' and str(rule[condition_column]) != 'nan':
try:
df_subset = df_subset.query(rule[condition_column])#, engine='python')
if verbose:
logger.debug('{} - input length'.format(input_length))
if show_rules is True:
if include_once is True:
df.loc[list(df_subset.index), 'rule_number'] = list(map(write_rule_numbers, df.loc[list(df_subset.index), 'rule_number']))
df.loc[list(df_subset.index), 'rule'] = list(map(write_rule_conditions, df.loc[list(df_subset.index), 'rule']))
if rule_id_column:
df.loc[list(df_subset.index), 'rule_id'] = list(map(write_rule_id, df.loc[list(df_subset.index), 'rule_id']))
else:
df_subset['rule_number'] = df_subset.index
df_subset['rule'] = rule[condition_column]
if rule_id_column:
df_subset['rule_id'] = rule[rule_id_column]
except Exception as e:
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule[condition_column] param from format string
if show_rules is True:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(index, rule[condition_column], e)
logger.exception('EXCEPTION {}'.format(error_msg))
# Populate target columns as specified in split
for column in target_columns:
if rule[column] not in ['nan', '', 'None', None]:
if include_once is True:
df.loc[list(df_subset.index), column] = rule[column]
else:
df_subset[column] = rule[column]
# The way we're doing this allows multiple matches if include_once is False.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
# MIKE look here.
# df_final = pd.concat([df_final, df_subset])
matched_length = len(df_subset)
if verbose:
logger.debug('{} - matched length'.format(matched_length))
if include_once:
# Exclude the records of the current split from exposure to subsequent filters.
df.loc[list(df_subset.index), 'include'] = False
else:
if matched_length > 0:
matches.append(df_subset)
summary_record = {
'row_num': index,
iteration_column: iteration,
'input_records': input_length,
'matched_records': matched_length,
}
summary_record.update(rule)
summary.append(summary_record)
if include_once is False:
if len(matches) > 0:
df = pd.concat(matches)
else:
df = pd.DataFrame().reindex(columns=df.columns)
# unmatched record:
unmatched_length = len(df[df['include'] == True])
summary.append({
'row_num': index+1,
iteration_column: iteration,
'input_records': unmatched_length,
'matched_records': unmatched_length,
'rule': unmatched_rule
})
df_summary = pd.DataFrame.from_records(summary)
if show_rules is True:
df.drop(columns=['include'], inplace=True, errors='ignore')
else:
df.drop(columns=['include', 'rule_number', 'rule', 'rule_id'], inplace=True, errors='ignore')
return [df, df_summary]
def memoize(fn):
cache = fn.cache = {}
@wraps(fn)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return memoizer
def trailing_negative(value, default=0):
"""Attempts to handle the trailing negative issue in a more performant way
Args:
value (str, int, or float): The value to clean
default (float or int, optional): A default value to return if `value`
cannot be cleaned
Returns:
float: The cleaned version of `value`, or `default`
"""
try:
return float(value)
except:
try:
return -float(value[:-1])
except:
return default
def now(offset=0):
"""Returns the current date/time, with an optional offset
Args:
offset (int, optional): The offset to apply to the current time
Returns:
`utc.timestamp`: The current date/time, with `offset` applied
"""
dt = utc.timestamp()
if offset != 0:
off = datetime.timedelta(hours=offset)
dt = dt + off
return dt
def concat(left_frame, right_frame):
"""Concatenate two DataFrames
Args:
left_frame (`pandas.DataFrame`): The first frome to concat
right_frame (`pandas.DataFrame`): The second frame to concat
Returns:
`pandas.DataFrame`: The concatenation of `left_frame` and `right_frame`
"""
return pd.concat(left_frame, right_frame)
def covariance(df, columns, min_observations=0):
"""Compute pairwise covariances among the series in the DataFrame, also excluding NA/null values
Args:
df (`pandas.DataFrame`): The dataframe to compute on
columns (None): DEPRICATED - Columns are now determined from `df`
min_observations (int, optional): Minimum observations, defaults to `0`
"""
df = df.columns
if min_observations is not None and min_observations > 0:
return df.cov(min_periods=min_observations)
else:
return df.cov()
def correlation(first_series, second_series, method='pearson'):
return first_series.corr(second_series, method=method)
def apply_agg(df, group_by, column_operations):
"""Pass in a dict of key values for columns and operations
{'A': 'sum', 'B': 'std', 'C': 'mean'}
Args:
df (`pandas.DataFrame`): The dataframe to apply aggregation to
group_by (str): The group by operation to apply to `df`
column_operations (dict): The operations to apply and on which columns
Returns:
`pandas.DataFrame`: `df` with aggregation applied
"""
# Taken from operations that can be performed on columns as described here:
# http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.html
valid_operations = (
'mean', 'median', 'mode',
'sum', 'std', 'var', 'size', 'first', 'last', 'prod', 'product',
'min', 'max', 'abs', 'quantile',
'skew', # Return unbiased skew over requested axis
'kurtosis', # Return unbiased kurtosis over requested axis
'mad', # Return the mean absolute deviation of the values for the requested axis
'cumsum', # Return cumulative sum over requested axis
'cummin', # Return cumulative min over requested axis.
'cummax', # Return cumulative max over requested axis.
'cumprod' # Return cumulative prod over requested axis.
)
final = {}
for co in column_operations.keys():
if column_operations[co] in valid_operations:
final[co] = column_operations[co]
return df.groupby(group_by).agg(final).reset_index()
def distinct(df, columns=None, keep='first', inplace=False):
"""Removes duplicate items from columns
Args:
df (`pandas.DataFrame`): The DataFrame to operate on
columns (list, optional): Specific columns to operate on
keep (str, optional): Which row containing duplicate values to keep.
defaults to `'first'`
inplace (bool, optional): Should duplicate values be removed from the
source `df`? Defaults to `False`
Returns:
`pandas.DataFrame`: The `df` with duplicate values removed
"""
return df.drop_duplicates(subset=columns,
keep=keep,
inplace=inplace)
def find_duplicates(df, columns=None, take_last=False):
"""Locates duplicate values in a dataframe
Args:
df (`pandas.DataFrame`): The DataFrame to find duplicates in
columns (`list`, optional): Spesific columns to find duplicates in
take_last (bool, optional): Should the last duplicate not be marked as a duplicate?
Defaults to `False`
Returns:
`pandas.DataFrame`: A frame containing duplicates
"""
mask = df.duplicated(cols=columns, take_last=take_last)
return df.loc[mask]
def sort(df, sort_columns, sort_directions=True):
"""Sorts a dataframe
wraps `pandas.DataFrame.sort_values()`
Args:
df (`pandas.DataFrame`): The dataframe to sort
sort_columns (list): A list of the columns to sort on
sort_directions (bool, optional): `True` to sort ascending, `False`
to sort descending
Returns:
`pandas.DataFrame`: `df` sorted by the provided columns
"""
return df.sort_values(sort_columns, ascending=sort_directions)
def replace_column(df, column, replace_dict):
"""cdystonia2.treat.replace({'a':{'Placebo': 0, '5000U': 1, '10000U': 2}})
Args:
df (`pandas.DataFrame`): The dataframe to replace in
column (str): Which column to replace values in
replace_dict (dict): What values to replace, and what to replace them with
Returns:
`pandas.dataframe`: `df` with the values replaced
"""
return df.replace({column: replace_dict})
def replace(df, replace_dict):
"""Replaces values in columns based on a dict
Args:
df (`pandas.DataFrame`): The dataframe to replace values in
replacement_dict (dict): A dict containing columns, the values to replace, and what to replace them with.
Should be formatted like:
{
'column_a': {'$': '', ',':''},
'column_b': {'bought': 1, 'sold': -1}
}
"""
return df.replace(replace_dict)
def reindex(df, columns):
"""Reindexes a dataframe
wraps `pandas.DataFrame.reindex()`
Args:
df (`pandas.DataFrame`): The dataframe to reindex
columns (:type:`list`): A list of the columns to reindex
Returns:
`pandas.DataFrame`: A reindexed version of `df`
"""
return df.reindex(columns)
def rename_columns(df, rename_dict):
"""Renames columns based on `rename_dict`
Args:
df (`pandas.DataFrame`): The dataframe to rename columns in
rename_dict (:type:`dict`): A dict in the format `{'old_name': 'new_name'}`
to use to rename the columns
Returns:
`pandas.DataFrame`: `df` with renamed columns
"""
return df.rename(columns=rename_dict)
def column_info(df):
"""Returns a list of columns and their datatypes
Args:
df (`pandas.DataFrame`): The dataframe to get info for
Returns:
:type:`list` of :type:`dict`: A list of dicts containing column ids and their
Dtypes
"""
column_info = []
for column_name, data_type in six.iteritems(df.dtypes):
temp = {'id': column_name, 'dtype': str(data_type)}
column_info.append(temp)
return column_info
def set_column_types(df, type_dict):
"""Sets the datatypes of columns in a DataFrame
df3.astype('float32').dtypes
Here are the date units:
Code Meaning Time span (relative) Time span (absolute)
Y year +/- 9.2e18 years [9.2e18 BC, 9.2e18 AD]
M month +/- 7.6e17 years [7.6e17 BC, 7.6e17 AD]
W week +/- 1.7e17 years [1.7e17 BC, 1.7e17 AD]
D day +/- 2.5e16 years [2.5e16 BC, 2.5e16 AD]
And here are the time units:
Code Meaning Time span (relative) Time span (absolute)
h hour +/- 1.0e15 years [1.0e15 BC, 1.0e15 AD]
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
s second +/- 2.9e12 years [ 2.9e9 BC, 2.9e9 AD]
ms millisecond +/- 2.9e9 years [ 2.9e6 BC, 2.9e6 AD]
us microsecond +/- 2.9e6 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD]
Args:
df (`pandas.DataFrame`): The dataframe to set column types on
type_dict (dict): A dict to set columns based on. In the format
{'column': 'dtype'}
Returns:
`pandas.DataFrame`: `df` with the column types set
"""
float_column_types = (
'float16', 'float32', 'float64', 'numeric',
)
#Can't call this int_column_types, because there's a few other possibilities things listed
#that we'll just send through to get the default Pandas.to_numeric() treatment for now.
numeric_non_float_column_types = (
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'complex64', 'complex128',
'bigint', 'smallint',
)
date_column_types = (
'datetime64[as]', 'datetime64[fs]', 'datetime64[ps]', 'datetime64[ns]',
'datetime64[us]', 'datetime64[ms]', 'datetime64[s]', 'datetime64[m]',
'datetime64[h]', 'datetime64[D]', 'datetime64[W]',
'datetime64[M]', 'datetime64[Y]',
'timestamp', 'date', 'time'
)
timedelta_column_types = (
'timedelta64[as]', 'timedelta64[fs]', 'timedelta64[ps]', 'timedelta64[ns]',
'timedelta64[us]', 'timedelta64[ms]', 'timedelta64[s]', 'timedelta64[m]',
'timedelta64[h]', 'timedelta64[D]', 'timedelta64[W]',
'timedelta64[M]', 'timedelta64[Y]'
'interval',
)
bool_column_types = (
'bool',
'Boolean',
)
string_column_types = {
'object': 256,
's8': 8,
's16': 16,
's32': 32,
's64': 64,
's128': 128,
's256': 256,
's512': 512,
's1024': 1024,
'text': 256,
}
category_column_types = (
'category',
)
for td in type_dict.keys():
dtype = dtype_from_sql(type_dict[td])
try:
if dtype in float_column_types:
df[td] = pd.to_numeric(df[td], downcast='float')
elif dtype in numeric_non_float_column_types:
df[td] = pd.to_numeric(df[td])
elif dtype in date_column_types:
df[td] = pd.to_datetime(df[td])
elif dtype in timedelta_column_types:
df[td] = pd.to_timedelta(df[td])
elif dtype in list(string_column_types.keys()):
# Keep whatever text is there
pass
elif dtype in bool_column_types:
df[td] = df[td].astype('bool')
elif dtype in category_column_types:
df[td] = df[td].astype('category')
else:
raise Exception('Unknown dtype specified')
except:
logger.exception('EXCEPTION')
err_msg = 'dtype conversion of {0} to {1} FAILED'.format(td, dtype)
raise Exception(err_msg)
return df
def drop_column(df, columns_to_drop):
""" Removes columns from a DataFrame
del df[name]
Args:
df (`pandas.DataFrame`): The dataframe to drop columns on
columns_to_drop (:type:`list` of :type:`str`): A list of the columns
to remove
Returns:
`pandas.DataFrame`: `df` with the provided columns removed
"""
for ctd in columns_to_drop:
del df[ctd]
return df
def has_data(df):
"""Determines if a DataFrame has any data
Args:
df (`pandas.DataFrame`): A DataFrame to test
Returns:
bool: If `df` has any data
"""
row_max = 0
try:
for k, v in six.iteritems(df.count()):
row_max = max(row_max, int(v))
except:
pass
if row_max > 0:
return True
else:
return False
def convert_currency(
df_data,
df_rates,
source_amount_column=None,
target_amount_column=None,
rate_field='RATE',
source_frame_source_currency='CURRENCY_SOURCE',
source_frame_target_currency='CURRENCY_TARGET',
rates_frame_source_currency='CURRENCY_SOURCE',
rates_frame_target_currency='CURRENCY_TARGET',
intermediate_currency='USD',
):
"""
Convert currency from unconverted amount to amount of target currency
This function requires 4 parameters: rates_frame, source_amount_column, and target_amount_column.
source_frame and rates_frame are pandas.df.
Remaining parameters are all strings representing required or optional column names
We are going to try to do a point-to-point lookup first. If that fails, we will attempt to lookup
from source to USD and then from USD to target, applying correct math along the way.
TODO 1: We should optionally consider period, but for now we'll assume records of both tables pertain to the same period.
TODO 2: Build test cases
TODO 3: Build appropriate warnings for things like:
Sending in source tables with columns that do not line up with expected source columns
Args:
df_data (`pandas.DataFrame`): The frame containing source data
df_rates (`pandas.DataFrame`): A frame containing currency rates
source_amount_column (str): Column of the `source_frame` containing pre-converted amount
target_amount_column (str): Column name of the final converted amount
rate_field (str, optional): Column name of column of the `rates_frame` containing the amount to convert
source_frame_source_currency (str, optional): Column of source frame indicating currency of pre-converted amount
source_frame_target_currency (str, optional): Column of source frame indicating currency of intended post-converted amount
rates_frame_source_currency (str, optional): Column of source frame indicating currency of pre-converted amount
rates_frame_target_currency (str, optional): Column of source frame indicating currency of intended post-converted amount
intermediate_currency (str, optional):
Returns:
`pandas.DataFrame`: The results of the lookup
"""
periods_in_use = set(df_data['PERIOD'])
# Set intermediate currency for 2-step currency conversion
df_data['CURRENCY_INTERMEDIATE'] = intermediate_currency
if source_frame_source_currency != 'CURRENCY_SOURCE':
df_data['CURRENCY_SOURCE'] = df_data[source_frame_source_currency]
delete_curr_src_col = True #Clean up after ourselves so we don't send extra columns back that wouldn't be expected.
else:
delete_curr_src_col = False
if source_frame_target_currency != 'CURRENCY_TARGET':
df_data['CURRENCY_TARGET'] = df_data[source_frame_target_currency]
delete_curr_tgt_col = True #Clean up after ourselves so we don't send extra columns back that wouldn't be expected.
else:
delete_curr_tgt_col = False
df_rates = df_rates[df_rates['PERIOD'].isin(periods_in_use)]
df_rates['CURRENCY_SOURCE'] = df_rates[rates_frame_source_currency]
df_rates['CURRENCY_TARGET'] = df_rates[rates_frame_target_currency]
df_rates[rate_field] = df_rates[rate_field]
df_rates['RATE_TYPE'] = '1' #Loaded rates are preferred
df_rates = df_rates[[
'PERIOD', #object
rate_field, #float64
'CURRENCY_SOURCE', #object
'CURRENCY_TARGET', #object
]]
df_rates_flipped = df_rates.copy(deep=True)
df_rates_flipped['CURRENCY_SOURCE_old'] = df_rates_flipped['CURRENCY_SOURCE']
df_rates_flipped['CURRENCY_TARGET_old'] = df_rates_flipped['CURRENCY_TARGET']
df_rates_flipped['CURRENCY_SOURCE'] = df_rates_flipped['CURRENCY_TARGET_old']
df_rates_flipped['CURRENCY_TARGET'] = df_rates_flipped['CURRENCY_SOURCE_old']
df_rates_flipped[rate_field] = 1.0 / df_rates_flipped[rate_field]
del df_rates_flipped['CURRENCY_SOURCE_old']
del df_rates_flipped['CURRENCY_TARGET_old']
df_rates_flipped['RATE_TYPE'] = '2' #Flipped rates are to be used only if loaded rates do not exist
currencies_in_use = list(set(list(set(df_rates['CURRENCY_SOURCE'])) + list(set(df_rates['CURRENCY_TARGET']))))
currencies_in_use.sort()
df_rates_passthrough_wip = pd.DataFrame({'CURRENCY_SOURCE' : currencies_in_use})
df_rates_passthrough_wip['CURRENCY_TARGET'] = df_rates_passthrough_wip['CURRENCY_SOURCE']
df_rates_passthrough_wip[rate_field]=1.0
df_rates_passthrough_wip['RATE_TYPE'] = '3'
df_rates_passthrough = pd.DataFrame()
#Create passthrough rates for each period.
for period in periods_in_use:
df_rates_passthrough_chunk = df_rates_passthrough_wip.copy(deep=True)
df_rates_passthrough_chunk['PERIOD'] = period
df_rates_passthrough = pd.concat([df_rates_passthrough, df_rates_passthrough_chunk])
df_rates = | pd.concat([df_rates, df_rates_flipped, df_rates_passthrough]) | pandas.concat |
# Kør herfra ved start for at få fat i de nødvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfPrices = dfWMR
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = | pd.DataFrame() | pandas.DataFrame |
from tradester.feeds.static import SecuritiesTS
from matplotlib.gridspec import GridSpec
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
import calendar
class Metrics():
def __init__(
self,
portfolio,
oms,
start_date,
end_date,
):
self.portfolio = portfolio
self.oms = oms
self.start_date = start_date
self.end_date = end_date
self.holdings = None
self.values = None
self.trading_log = None
self.statistics = None
self.monthly_returns_pct = None
self.monthly_returns_usd = None
self.yearly_returns = None
self.ts_yearly_returns_usd = None
self.ts_yearly_returns_pct = None
def _calculate(self):
self.holdings = self.portfolio.holdings_df
self.values = self.portfolio.values_df.set_index('date').sort_index()
self.trading_log = self.portfolio.trading_log_df
#if not self.start_date is None:
# self.holdings = self.holdings.loc[self.holdings.date >= pd.to_datetime(self.trade_start_date)]
# self.values = self.values.loc[self.values.index >= pd.to_datetime(self.trade_start_date)]
# self.trading_log = self.trading_log.loc[self.trading_log.date >= pd.to_datetime(self.trade_start_date)]
self.values['expanding_max'] = self.values['value'].expanding().max()
self.values['dd_%'] = (self.values['value'] / self.values['expanding_max'] - 1).apply(lambda x: 0 if x > 0 else x)
self.values['dd_$'] = (self.values['value'] - self.values['expanding_max']).apply(lambda x: 0 if x > 0 else x)
self.values['Long Market Value'] = self.values['long_equity'] / self.values['value']
self.values['Short Market Value'] = self.values['short_equity'] / self.values['value']
self.values['Net Market Value'] = self.values['Long Market Value'] + self.values['Short Market Value']
self.values['Gross Market Value'] = self.values['Long Market Value'] - self.values['Short Market Value']
self.values['cash%'] = self.values['cash']/self.values['value'] - 1
self.values['%'] = self.values['value'].pct_change().fillna(0)
self.values['$'] = self.values['value'].diff().fillna(0)
self.values['cumulative'] = (1+self.values['%']).cumprod().fillna(1)
self.values['date'] = self.values.index
self.values['date'] = self.values['date'].apply(lambda x: x.strftime('%Y-%m-%d'))
self.values['year-month'] = self.values['date'].apply(lambda x: pd.to_datetime(f'{x.split("-")[0]}' +'-'+ f'{x.split("-")[1]}'+'-01'))
self.values['year'] = self.values['date'].apply(lambda x: pd.to_datetime(f'{x.split("-")[0]}'+'-01-01'))
self.values['day_of_year'] = self.values['date'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
import logging
import requests
import json
import pandas as pd
import timeago
from datetime import datetime
def get_data():
"""
:return: tuple of 2 dataframes
:info:
actuals: Dataset containing the most recent measurements from the weather stations.
forecast: Dataset contaning the 5-day forecast
"""
data = request_data()
actuals = get_actuals(data)
logging.info(f"dataset updated {timeago.format(actuals.timestamp[0], datetime.now() )}")
logging.info(f"pulled measurements from {len(actuals)} weather stations")
forecast = get_forecast(data)
logging.info(f"pulled {len(forecast)} records for 5-day forecast")
return actuals, forecast
def request_data():
"""
:return: json object.
dataset containing information of weatherstation measurements of Buienradar.
"""
r = requests.get("https://data.buienradar.nl/2.0/feed/json")
data = json.loads(r.text)
return data
def get_actuals(data):
"""
:param data: json object retrieved from the buienradar API.
:return: dataframe object containing the current station measurements.
"""
data = data["actual"]
stations = data["stationmeasurements"]
df = | pd.DataFrame(stations) | pandas.DataFrame |
# Import standard python libraries.
import pandas as pd
import numpy as np
import pathlib
import warnings
import sys
# Import the functions used throughout this project from the function dictionary library file
fileDir = pathlib.Path(__file__).parents[2]
code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
sys.path.append(str(code_library_folder))
from coal_data_processing_functions import state_abbreviations, generic_coal_rank, lower_case_data_keys
from statistical_functions import ecdf, weighted_ecdf
from statistics import mean
def weighted_coal_ecdf(coal):
warnings
# Read in (1) COALQUAL Data (2) and the amount of coal mining done in each county. We use skipfooter to not read in the
# search criteria rows.
coalqual_filename = fileDir / 'Data' / 'COALQUAL Data' / 'CQ_upper_level.csv'
COALQUAL = pd.read_csv(coalqual_filename, header=0,
names=['Sample_ID', 'State', 'County', 'Province', 'Region', 'Field', 'Formation', 'Bed',
'Apparent_Rank', 'Sulfur', 'Heat', 'Arsenic', 'Boron', 'Bromine', 'Chlorides',
'Mercury',
'Lead', 'Selenium'], usecols=[0, 1, 2, 5, 6, 7, 9, 11, 28, 84, 87, 147, 151, 159, 165,
191, 219, 239])
mining_volume_filename = fileDir / 'Intermediate' / 'Coal Mining By Counties.csv'
Mining_Volume = pd.read_csv(mining_volume_filename, header=0, names=['Coal_Sales', 'FIPS_Code_State',
'County_Name_State_Normal_Capitalization'],
usecols=[1, 2, 8])
# Drop COALQUAL anthracite and samples with blank apparent rank.
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Anthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Semianthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Rock']
COALQUAL = COALQUAL.dropna(subset=['Apparent_Rank'])
# Classify apparent ranks into broad categories.
COALQUAL['Rank'] = generic_coal_rank(COALQUAL.Apparent_Rank)
# Process the columns that will serve as keys for the data merging.
COALQUAL['State_Abbreviation'] = state_abbreviations(COALQUAL.State)
County_Name_State_Normal_Capitalization = COALQUAL['County'] + ' County, ' + COALQUAL['State_Abbreviation']
COALQUAL['County_Name_State'] = lower_case_data_keys(County_Name_State_Normal_Capitalization)
Mining_Volume['County_Name_State'] = lower_case_data_keys(Mining_Volume['County_Name_State_Normal_Capitalization'])
# mask = pd.Series(np.isfinite(COALQUAL['Chlorides']))
COALQUAL_all_samples_Cl = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Chlorides']))]
COALQUAL_all_samples_Br = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Bromine']))]
COALQUAL_all_samples_Cl = COALQUAL_all_samples_Cl.groupby(['County_Name_State']).mean()
COALQUAL_all_samples_Cl['County_Name_State'] = COALQUAL_all_samples_Cl.index
COALQUAL_all_samples_Cl = | pd.merge(COALQUAL_all_samples_Cl, Mining_Volume, on='County_Name_State') | pandas.merge |
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objects as go
import pandas as pd
from dash.dependencies import Input, Output
from fbprophet import Prophet
from datetime import datetime, timedelta
# gobals
step_num = 100
# Load data
df = pd.read_csv('data/stockdata2.csv', index_col=0, parse_dates=True)
df.index = pd.to_datetime(df['Date'])
# Initialize the app
app = dash.Dash(__name__,
assets_folder = 'assets')
app.config.suppress_callback_exceptions = True
# Do hard coded data prep - #TODO move to seperate file
df_sub = df.copy()
stock = "AAPL"
x=df_sub[df_sub['stock'] == stock].index,
y=df_sub[df_sub['stock'] == stock][['Date','value']]
y.columns = ["ds","y"]
y["ds"] = pd.to_datetime(y["ds"])
def simulate_outlier_detection(
data = None,
delta_test = 3,
delta_train = 28,
curr_date = pd.to_datetime("2010-01-01")
):
y = data.copy()
# generate dates
maxcut_date = curr_date - timedelta(days=delta_test)
mincut_date = curr_date - timedelta(days=delta_train)
# generate train data
ys = y[(y.ds <= maxcut_date) & (y.ds >= mincut_date)]
# generate pred dates
future_dates = pd.DataFrame({"ds":[curr_date - timedelta(days=i) for i in range(delta_test)]})
future_dates.sort_values("ds",inplace=True)
future_dates.index = future_dates.ds
pred_dates = | pd.concat([ys[["ds"]],future_dates],axis=0) | pandas.concat |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = | pd.DataFrame(expect_collection_noExpand['remove']) | pandas.DataFrame |
import pendulum as pdl
import sys
sys.path.append(".")
# the memoization-related library
import loguru
import itertools
import portion
import klepto.keymaps
import CacheIntervals as ci
from CacheIntervals.utils import flatten
from CacheIntervals.utils import pdl2pd, pd2pdl
from CacheIntervals.utils import Timer
from CacheIntervals.Intervals import pd2po, po2pd
from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas
class QueryRecorder:
'''
A helper class
'''
pass
class MemoizationWithIntervals(object):
'''
The purpose of this class is to optimise
the number of call to a function retrieving
possibly disjoint intervals:
- do standard caching for a given function
- additively call for a date posterior to one
already cached is supposed to yield a pandas
Frame which can be obtained by concatenating
the cached result and a -- hopefully much --
smaller query
Maintains a list of intervals that have been
called.
With a new interval:
-
'''
keymapper = klepto.keymaps.stringmap(typed=False, flat=False)
def __init__(self,
pos_args=None,
names_kwarg=None,
classrecorder=RecordIntervalsPandas,
aggregation=lambda listdfs: pd.concat(listdfs, axis=0),
debug=False,
# memoization=klepto.lru_cache(
# cache=klepto.archives.hdf_archive(
# f'{pdl.today().to_date_string()}_memoization.hdf5'),
# keymap=keymapper),
memoization=klepto.lru_cache(
cache=klepto.archives.dict_archive(),
keymap=keymapper),
**kwargs):
'''
:param pos_args: the indices of the positional
arguments that will be handled as intervals
:param names_kwarg: the name of the named parameters
that will be handled as intervals
:param classrecorder: the interval recorder type
we want to use
:param memoization: a memoization algorithm
'''
# A dictionary of positional arguments indices
# that are intervals
self.argsi = {}
self.kwargsi = {}
# if pos_args is not None:
# for posarg in pos_args:
# self.argsi[posarg] = classrecorder(**kwargs)
self.pos_args_itvl = pos_args if pos_args is not None else []
#print(self.args)
# if names_kwarg is not None:
# for namedarg in names_kwarg:
# self.kwargsi[namedarg] = classrecorder(**kwargs)
self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {}
#print(self.kwargs)
self.memoization = memoization
self.aggregation = aggregation
self.debugQ = debug
self.argsdflt = None
self.kwargsdflt = None
self.time_last_call = pdl.today()
self.classrecorder = classrecorder
self.kwargsrecorder = kwargs
self.argssolver = None
self.query_recorder = QueryRecorder()
def __call__(self, f):
'''
The interval memoization leads to several calls to the
standard memoised function and generates a list of return values.
The aggregation is needed for the doubly lazy
function to have the same signature as the
To access, the underlying memoized function pass
get_function_cachedQ=True to the kwargs of the
overloaded call (not of this function
:param f: the function to memoize
:return: the wrapper to the memoized function
'''
if self.argssolver is None:
self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True)
@self.memoization
def f_cached(*args, **kwargs):
'''
The cached function is used for a double purpose:
1. for standard calls, will act as the memoised function in a traditional way
2. Additively when pass parameters of type QueryRecorder, it will create
or retrieve the interval recorders associated with the values of
non-interval parameters.
In this context, we use the cached function as we would a dictionary.
'''
QueryRecorderQ = False
args_new = []
kwargs_new = {}
'''
check whether this is a standard call to the user function
or a request for the interval recorders
'''
for i,arg in enumerate(args):
if isinstance(arg, QueryRecorder):
args_new.append(self.classrecorder(**self.kwargsrecorder))
QueryRecorderQ = True
else:
args_new.append(args[i])
for name in kwargs:
if isinstance(kwargs[name], QueryRecorder):
kwargs_new[name] = self.classrecorder(**self.kwargsrecorder)
QueryRecorderQ = True
else:
kwargs_new[name] = kwargs[name]
if QueryRecorderQ:
return args_new, kwargs_new
return f(*args, **kwargs)
def wrapper(*args, **kwargs):
if kwargs.get('get_function_cachedQ', False):
return f_cached
#loguru.logger.debug(f'function passed: {f_cached}')
loguru.logger.debug(f'args passed: {args}')
loguru.logger.debug(f'kwargs passed: {kwargs}')
# First pass: resolve the recorders
dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs)
# Intervals are identified by position and keyword name
# 1. First get the interval recorders
args_exp = list(dargs_exp.values())
args_exp_copy = args_exp.copy()
kwargs_exp_copy = kwargs_exp.copy()
for i in self.pos_args_itvl:
args_exp_copy[i] = self.query_recorder
for name in self.names_kwargs_itvl:
kwargs_exp_copy[name] = self.query_recorder
args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy)
# 2. Now get the the actual list of intervals
for i in self.pos_args_itvl:
# reuse args_exp_copy to store the list
args_exp_copy[i] = args_with_ri[i](args_exp[i])
for name in self.names_kwargs_itvl:
# reuse kwargs_exp_copy to store the list
kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name])
'''3. Then generate all combination of parameters
3.a - args'''
ns_args = range(len(args_exp))
lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args]
# Take the cartesian product of these
calls_args = list( map(list,itertools.product(*lists_possible_args)))
'''3.b kwargs'''
#kwargs_exp_vals = kwargs_exp_copy.values()
names_kwargs = list(kwargs_exp_copy.keys())
lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl
else kwargs_exp_copy[name] for name in names_kwargs]
calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs)))
calls = list(itertools.product(calls_args, calls_kwargs))
if self.debugQ:
results = []
for call in calls:
with Timer() as timer:
results.append(f_cached(*call[0], **call[1]) )
print('Timer to demonstrate caching:')
timer.display(printQ=True)
else:
results = [f_cached(*call[0], **call[1]) for call in calls]
result = self.aggregation(results)
return result
return wrapper
if __name__ == "__main__":
import logging
import daiquiri
import pandas as pd
import time
daiquiri.setup(logging.DEBUG)
logging.getLogger('OneTick64').setLevel(logging.WARNING)
logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING)
logging.getLogger('requests_kerberos').setLevel(logging.WARNING)
pd.set_option('display.max_rows', 200)
| pd.set_option('display.width', 600) | pandas.set_option |
from robust_rcf import robust_rcf
import numpy as np
import pandas as pd
from evaluate import evaluate, anomaly_classification_percentile
from sklearn.metrics import accuracy_score
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def test_rrcf_simon(data, sample = 0.1):
# load / prepare data
df = pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])
# shuffle values for SIMON training / testing
df_shuffle = df.sample(frac = 1)
simon_features = np.array(df_shuffle['Simon Features'].values.tolist())
labels = (df_shuffle['file'] != 'enron.jsonl').astype(int)
# break into train / test by oldest / newest
train_split = int(0.6 * df.shape[0])
val_split = int(0.3 * df.shape[0] * sample)
simon_train, y_train = simon_features[:train_split], labels[:train_split]
simon_val, y_val = simon_features[train_split:train_split + val_split], labels[train_split:train_split + val_split]
simon_test, y_test = simon_features[train_split + val_split:], labels[train_split + val_split:]
# print anomalous percentage in train / val / test
print('There are {} ({} %) anomalous examples in the train set'.format(sum(y_train), 100 * sum(y_train) / len(y_train)))
print('There are {} ({} %) anomalous examples in the sampled validation set'.format(sum(y_val), 100 * sum(y_val) / len(y_val)))
print('There are {} ({} %) anomalous examples in the test set'.format(sum(y_test), 100 * sum(y_test) / len(y_test)))
# test batch anomaly detection on SIMON features
# initially set num_samples_per_tree based on ratio of anomalies
tree_size = int((df.shape[0] - sum(labels)) / sum(labels) * 2)
num_trees = 200
start_time = time.time()
print('Fitting batch anomaly detection on training set...')
clf = robust_rcf(num_trees, tree_size)
clf.fit_batch(simon_train)
print('Fitting batch anomaly detection took {} seconds'.format(time.time() - start_time))
'''
print('Scoring training set')
start_time = time.time()
anom_score = clf.batch_anomaly_scores()
print('Scoring batch anomaly detection took {} seconds'.format(time.time() - start_time))
# set threshold as % of anomalies in sample
# TODO = add function that can do percentile or z-score
anom_thresh = (len(labels) - sum(labels)) / len(labels) * 100
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
print("Training Set Evaluation")
print(evaluate(y_train, anom_pred))
'''
# eval on validation set
print('Scoring validation set')
start_time = time.time()
val_anom_score = clf.anomaly_score(simon_val)
print('Scoring batch anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(val_anom_score, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(y_val, val_anom_pred))
# test streaming anomaly detection on SIMON features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(simon_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({}%) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(y_val, val_anom_pred))
def test_rrcf_enron_times(data, sample = 0.1, anom_thresh = 95):
'''
Test batch and streaming anomaly detection on just Enron email time features
'''
# sort Enron emails by timestamp
df = pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file'])
df = df.loc[df['file'] == 'enron.jsonl'].sort_values(by = 'Timestamp')
# convert timestamp column to timestamp difference
df['Timestamp Difference'] = df['Timestamp'].diff()
# drop non-time columns
df.drop(['Timestamp', 'Simon Features', 'file'], axis=1, inplace=True)
#df = df[['Timestamp Difference']]
# cast to np array of float values and remove initial timestamp (nan time difference)
df = df.values.astype(float)[1:]
# test on sample of training / validation data
train_split = int(0.6 * df.shape[0] * sample)
val_split = int(0.3 * df.shape[0] * sample)
enron_train = df[:train_split]
enron_val = df[train_split:train_split + val_split]
plt.hist(enron_train)
plt.show()
plt.hist(enron_val)
plt.show()
# test batch anomaly detection
tree_size = 100
num_trees = 100
start_time = time.time()
print('Fitting batch anomaly detection on training set...')
clf = robust_rcf(num_trees, tree_size)
clf.fit_batch(enron_train)
print('Fitting batch anomaly detection took {} seconds'.format(time.time() - start_time))
print('Scoring training set')
start_time = time.time()
anom_score = clf.anomaly_score(enron_train)
print('Scoring batch anomaly detection took {} seconds'.format(time.time() - start_time))
# set "true" anomalies just based on frequency
anom_pred = anomaly_classification_percentile(anom_score, anom_thresh)
anom_true = (enron_train[:,-1] < np.percentile(enron_train[:,-1], 100 - anom_thresh)).astype(int)
print("Training Set Evaluation")
print(evaluate(anom_true, anom_pred))
# eval on validation set
print('Scoring validation set')
start_time = time.time()
val_anom_score = clf.anomaly_score(enron_val)
print('Scoring batch anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(val_anom_score, anom_thresh)
anom_true = (enron_val[:,-1] < np.percentile(enron_val[:,-1],100 - anom_thresh)).astype(int)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
enron_scaled = MinMaxScaler().fit_transform(enron_train[:,-1].reshape(-1,1)).reshape(-1,)
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,1)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Batch Anomaly Detection on Enron Time Series Data')
plt.show()
# test streaming anomaly detection on Enron time features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(enron_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
enron_scaled = MinMaxScaler().fit_transform(enron_train[:,-1].reshape(-1,1)).reshape(-1,)
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,1)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Batch Anomaly Detection on Enron Time Series Data')
plt.show()
# test streaming anomaly detection on Enron time features (just validation set)
print('Fitting / scoring streaming anomaly detection on validation set...')
start_time = time.time()
stream_anom_scores = clf.stream_anomaly_scores(enron_val, window_size = 1, new_forest=True)
print('Fitting / Scoring streaming anomaly detection took {} seconds on ({} %) of the validation set'.format(time.time() - start_time, 100 * sample))
val_anom_pred = anomaly_classification_percentile(stream_anom_scores, anom_thresh)
print("Validation Set Evaluation")
print(evaluate(anom_true, val_anom_pred))
# graph results
colors = ('blue', 'red')
targets = ('non-anomalous', 'anomalous')
pred_indices = (np.where(val_anom_pred == 0), np.where(val_anom_pred == 1))
pred_data = (enron_scaled[np.where(val_anom_pred == 0)[0]], enron_scaled[np.where(val_anom_pred == 1)[0]])
plt.subplot(2,1,2)
for index, dat, color, target in zip(pred_indices, pred_data, colors, targets):
plt.scatter(index[0], dat, c = color, label = target, s=10)
plt.legend()
plt.title('Streaming Anomaly Detection on Enron Time Series Data')
plt.show()
def test_rrcf_enron_jpl_times(data, sample = 0.1):
'''
Test batch and streaming anomaly detection on JPL Abuse emails superimposed on Enron
email distribution over time
'''
# graph JPL / Nigerian timestamps vs Enron timestamps
df = | pd.DataFrame(data, columns = ['Timestamp','Year', 'Month', 'Day of Month', 'Day of Week', 'Hour', 'Minute', 'Seconds', 'Simon Features', 'file']) | pandas.DataFrame |
import time
from functools import wraps
import pandas as pd
def timer(f):
@wraps(f)
def wrap(*args, **kwargs):
start = time.time()
x = f(*args, **kwargs)
print(f"time for {f.__name__}: {time.time() - start}")
return x
return wrap
def show_shape(f):
@wraps(f)
def wrap(*args, **kwargs):
print(f"\nfunction: {f.__name__}")
for key, val in kwargs.items():
if isinstance(val, pd.DataFrame):
print(f"{key}.shape: {val.shape}")
x = f(*args, **kwargs)
return x
return wrap
@timer
@show_shape
def process_data(df1, df2):
time.sleep(0.3)
return pd.concat([df1, df2])
@timer
@show_shape
def model_train(training_df):
time.sleep(0.3)
return training_df
@timer
@show_shape
def evaluate(evaluate_df, df1, df2):
time.sleep(0.3)
@timer
def pipeline(df1, df2):
df = process_data(df1=df1, df2=df2)
df = model_train(training_df=df)
evaluate(evaluate_df=df, df1=df1, df2=df2)
if __name__ == "__main__":
d1 = {"col1": [1, 2], "col2": [3, 4]}
df1 = pd.DataFrame(data=d1)
d2 = {"col1": [1, 2, 3], "col2": [3, 4, 5]}
df2 = | pd.DataFrame(data=d2) | pandas.DataFrame |
# USDA_ERS_MLU.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
USDA Economic Research Service (ERS) Major Land Uses (MLU)
https://www.ers.usda.gov/data-products/major-land-uses/
Last updated: Thursday, April 16, 2020
"""
import io
import pandas as pd
import numpy as np
from flowsa.location import get_all_state_FIPS_2, US_FIPS
from flowsa.settings import vLogDetailed
from flowsa.flowbyfunctions import assign_fips_location_system, aggregator
from flowsa.common import load_crosswalk
from flowsa.literature_values import \
get_area_of_rural_land_occupied_by_houses_2013, \
get_area_of_urban_land_occupied_by_houses_2013, \
get_transportation_sectors_based_on_FHA_fees, \
get_urban_land_use_for_airports, \
get_urban_land_use_for_railroads, get_open_space_fraction_of_urban_area
from flowsa.validation import compare_df_units
def mlu_call(*, resp, **_):
"""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param resp: df, response from url call
:return: pandas dataframe of original source data
"""
with io.StringIO(resp.text) as fp:
df = | pd.read_csv(fp, encoding="ISO-8859-1") | pandas.read_csv |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import floris.tools as wfct
import floris.tools.cut_plane as cp
import floris.tools.wind_rose as rose
import floris.tools.power_rose as pr
import floris.tools.visualization as vis
# Instantiate the FLORIS object
file_dir = os.path.dirname(os.path.abspath(__file__))
fi = wfct.floris_interface.FlorisInterface(
os.path.join(file_dir, "../example_input.json")
)
# Define wind farm coordinates and layout
wf_coordinate = [39.8283, -98.5795]
# Below minimum wind speed, assumes power is zero.
minimum_ws = 3.0
# Set wind farm to N_row x N_row grid with constant spacing
# (2 x 2 grid, 5 D spacing)
D = fi.floris.farm.turbines[0].rotor_diameter
N_row = 2
spc = 5
layout_x = []
layout_y = []
for i in range(N_row):
for k in range(N_row):
layout_x.append(i * spc * D)
layout_y.append(k * spc * D)
N_turb = len(layout_x)
fi.reinitialize_flow_field(
layout_array=(layout_x, layout_y), wind_direction=[270.0], wind_speed=[8.0]
)
fi.calculate_wake()
# ================================================================================
print("Plotting the FLORIS flowfield...")
# ================================================================================
# Initialize the horizontal cut
hor_plane = fi.get_hor_plane(height=fi.floris.farm.turbines[0].hub_height)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
ax.set_title("Baseline flow for U = 8 m/s, Wind Direction = 270$^\\circ$")
# ================================================================================
print("Importing wind rose data...")
# ================================================================================
# Create wind rose object and import wind rose dataframe using WIND Toolkit
# HSDS API. Alternatively, load existing file with wind rose information.
calculate_new_wind_rose = False
wind_rose = rose.WindRose()
if calculate_new_wind_rose:
wd_list = np.arange(0, 360, 5)
ws_list = np.arange(0, 26, 1)
df = wind_rose.import_from_wind_toolkit_hsds(
wf_coordinate[0],
wf_coordinate[1],
ht=100,
wd=wd_list,
ws=ws_list,
limit_month=None,
st_date=None,
en_date=None,
)
else:
df = wind_rose.load(
os.path.join(file_dir, "../optimization/scipy/windtoolkit_geo_center_us.p")
)
# plot wind rose
wind_rose.plot_wind_rose()
# =============================================================================
print("Finding power with and without wakes in FLORIS...")
# =============================================================================
# Determine baseline power with and without wakes
# Put results in dict for speed
power_dict = dict()
for i in range(len(df.wd)):
print(
"Computing wind speed, wind direction pair "
+ str(i)
+ " out of "
+ str(len(df.wd))
+ ": wind speed = "
+ str(df.ws[i])
+ " m/s, wind direction = "
+ str(df.wd[i])
+ " deg."
)
if df.ws[i] >= minimum_ws:
fi.reinitialize_flow_field(wind_direction=[df.wd[i]], wind_speed=[df.ws[i]])
# calculate baseline power
fi.calculate_wake()
power_base = fi.get_turbine_power()
# calculate power for no wake case
fi.calculate_wake(no_wake=True)
power_no_wake = fi.get_turbine_power(no_wake=True)
else:
power_base = N_turb * [0.0]
power_no_wake = N_turb * [0.0]
power_dict[i] = {
"ws": df.ws[i],
"wd": df.wd[i],
"power_baseline": np.sum(power_base),
"turbine_power_baseline": power_base,
"power_no_wake": np.sum(power_no_wake),
"turbine_power_no_wake": power_no_wake,
}
df_base = | pd.DataFrame.from_dict(power_dict, "index") | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import dask
import scipy
import time
from functools import partial
from abc import ABCMeta, abstractmethod
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import point_in_polygon
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF, DotProduct, WhiteKernel
import factorialModel
import loadData
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d, griddata
import SSVI
import bootstrapping
#######################################################################################################
class InterpolationModel(factorialModel.FactorialModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
#Build the learner
def buildModel(self):
#raise NotImplementedError()
return
def trainWithSession(self, session, inputTrain, nbEpoch, inputTest = None):
raise NotImplementedError("Not a tensorflow model")
return super().trainWithSession(session,
inputTrain,
nbEpoch,
inputTest = inputTest)
def train(self, inputTrain, nbEpoch, inputTest = None):
#Do nothing
return np.array([0.0])
def evalModelWithSession(self, sess, inputTest):
raise NotImplementedError("Not a tensorflow model")
return super().evalModelWithSession(sess, inputTest)
def evalModel(self, inputTestList):
#No loss since we interpolate exactly
inputTest = inputTestList[0]
coordinates = inputTestList[1]
loss = pd.Series(np.zeros(inputTest.shape[0]), index = inputTest.index)
#Return the inputs as compressed values
inputs = inputTest.apply(lambda x : self.interpolate(x, coordinates.loc[x.name]), axis=1)
#We do not have any factors so we assign a dummy value of 1
factors = pd.DataFrame(np.ones((inputTest.shape[0],self.nbFactors)),
index=inputTest.index)
return loss, inputs, factors
def getWeightAndBiasFromLayer(self, layer):
raise NotImplementedError("Not a tensorflow model")
return super().getWeightAndBiasFromLayer(layer)
#Interpolate or extrapolate certain values given the knowledge of other ones
def interpolate(self, incompleteSurface, coordinates):
raise NotImplementedError()
return pd.Series()
def completeDataTensor(self,
sparseSurfaceList,
initialValueForFactors,
nbCalibrationStep):
# knownValues = sparseSurface.dropna()
# locationToInterpolate = sparseSurface[sparseSurface.isna()].index
sparseSurface = sparseSurfaceList[0]
coordinates = sparseSurfaceList[1]
interpolatedValues = self.interpolate(sparseSurface, coordinates)
#Not a factorial model, we assign a dummy value
bestFactors = np.ones(self.nbFactors)
#Exact inteprolation
calibrationLoss = 0.0
calibrationSerie = pd.Series([calibrationLoss])
#Complete surface with inteporlated values
bestSurface = interpolatedValues
return calibrationLoss, bestFactors, bestSurface, calibrationSerie
#Interpolation does not assume any factors but relies on some known values
def evalSingleDayWithoutCalibrationWithSensi(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a factorial model")
return super().evalSingleDayWithoutCalibrationWithSensi(initialValueForFactors, dataSetList)
def plotInterpolatedSurface(self,valueToInterpolate, calibratedFactors,
colorMapSystem=None,
plotType=None):
raise NotImplementedError("Not a factorial model")
return
def evalInterdependancy(self, fullSurfaceList):
raise NotImplementedError("Not a Factorial model")
return
def evalSingleDayWithoutCalibration(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a Factorial model")
return
#ToolBox
#######################################################################################################
def getMaskedPoints(incompleteSurface, coordinates):
return coordinates.loc[incompleteSurface.isna()]
def getMaskMatrix(incompleteSurface):
maskMatrix = incompleteSurface.copy().fillna(True)
maskMatrix.loc[~incompleteSurface.isna()] = False
return maskMatrix
#maskedGrid : surface precising missing value with a NaN
#Assuming indexes and columns are sorted
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def selectPolygonOuterPoints(coordinates):
outerPoints = []
#Group coordinates by first coordinate
splittedCoordinates = {}
for tple in coordinates.values :
if tple[0] not in splittedCoordinates :
splittedCoordinates[tple[0]] = []
splittedCoordinates[tple[0]].append(tple[1])
#Get maximum and minimum for the second dimension
for key in splittedCoordinates.keys():
yMin = np.nanmin(splittedCoordinates[key])
yMax = np.nanmax(splittedCoordinates[key])
outerPoints.append((key,yMin))
outerPoints.append((key,yMax))
return outerPoints
def removeNaNcooridnates(coordinatesList):
isNotNaN = [False if (np.isnan(x[0]) or np.isnan(x[1])) else True for x in coordinatesList]
return coordinatesList[isNotNaN]
#Order a list of vertices to form a polygon
def orderPolygonVertices(outerPointList):
sortedPointList = np.sort(outerPointList) #np sort supports array of tuples
#Points are built as a pair of two points for value in the first dimension
#Hence the polygon starts with points having the first value for the second dimension
#(and order them along the first dimension)
orderedListOfVertices = sortedPointList[::2]
#We then browse the remaining points but in the reverse order for the second dimension
orderedListOfVertices = sortedPointList[1::2][::-1]
return orderedListOfVertices
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def buildInnerDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()]
outerPointsList = selectPolygonOuterPoints(coordinatesWithValues)
verticesList = orderPolygonVertices(outerPointsList)
expiryVertices, tenorVectices = zip(*verticesList)
return expiryVertices, tenorVectices
#Select swaption coordinates (expiry, tenor) whose value is known
#and their coordinate corresponds to maximum/minimum value for x axis and y axis
#This defines a quadrilateral
def buildOuterDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()].values
firstDimValues = list(map(lambda x : x[0], coordinatesWithValues))
secondDimValues = list(map(lambda x : x[1], coordinatesWithValues))
maxExpiry = np.amax(firstDimValues)
minExpiry = np.nanmin(firstDimValues)
maxTenor = np.amax(secondDimValues)
minTenor = np.nanmin(secondDimValues)
expiryVertices = [maxExpiry, maxExpiry, minExpiry, minExpiry, maxExpiry]
tenorVectices = [maxTenor, minTenor, minTenor, maxTenor, maxTenor]
return expiryVertices, tenorVectices
#verticesList : list of vertices defining the polygon
#Points : multiIndex serie for which we want to check the coordinates belongs to the domain defined by the polygon
#Use Winding number algorithm
def areInPolygon(verticesList, points):
return pd.Series(points.map(lambda p : point_in_polygon.wn_PnPoly(p, verticesList) != 0).values,
index = points.index)
#Return the list (pandas Dataframe) of points which are located in the domain (as a closed set)
#The closure ( i.e. edge of the domain ) is also returned
#defined by points which are not masked
def areInInnerPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildInnerDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#Return the list (pandas Dataframe) of points which are located in the outer domain (as a closed set)
#Outer domain is delimited by the maximum and minimum coordinates of the known values
#inner domain is delimited by the polygon whose vertices are the known points
#showDomain plots the boundary ( i.e. edge of the domain ) and the points which are inside the quadrilateral
def areInOuterPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildOuterDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#######################################################################################################
#Linear interpolation with flat extrapolation
#Assume row are non empty
def interpolateRow(row, coordinates):
definedValues = row.dropna()
if definedValues.size == 1 :
return pd.Series(definedValues.iloc[0] * np.ones_like(row),
index = row.index)
else :
#Flat extrapolation and linear interpolation based on index (Tenor) value
filledRow = row.interpolate(method='index', limit_direction = 'both')
return filledRow
def formatCoordinatesAsArray(coordinateList):
x = np.ravel(list(map(lambda x : x[0], coordinateList)))
y = np.ravel(list(map(lambda x : x[1], coordinateList)))
return np.vstack((x, y)).T
#Linear interpolation combined with Nearest neighbor extrapolation
# drawn from https://github.com/mChataign/DupireNN
def customInterpolator(interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates)))
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates)))
# print(type(xNew))
# print(type(yNew))
# print(np.array((xNew, yNew)).T.shape)
# print(type(interpolatedData))
# print(type(knownPositions))
# print()
fInterpolation = griddata(knownPositions,
np.ravel(interpolatedData),
np.array((xNew, yNew)).T,
method = 'linear',
rescale=True)
fExtrapolation = griddata(knownPositions,
np.ravel(interpolatedData),
np.array((xNew, yNew)).T,
method = 'nearest',
rescale=True)
return np.where(np.isnan(fInterpolation), fExtrapolation, fInterpolation)
def interpolate(incompleteSurface, coordinates):
knownValues = incompleteSurface.dropna()
knownLocation = coordinates.loc[knownValues.index]
locationToInterpolate = coordinates.drop(knownValues.index)
interpolatedValues = customInterpolator(knownValues.values,
knownLocation.values,
locationToInterpolate.values)
completeSurface = pd.Series(interpolatedValues,
index = locationToInterpolate.index).append(knownValues)
return completeSurface.loc[incompleteSurface.index].rename(incompleteSurface.name)
def extrapolationFlat(incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
correctedSurface = interpolate(filteredSurface, filteredCoordinates)
correctedSurface = correctedSurface.append(pd.Series(incompleteSurface.drop(filteredCoordinates.index),
index = coordinates.drop(filteredCoordinates.index).index))
return correctedSurface.sort_index()
#######################################################################################################
class LinearInterpolation(InterpolationModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestLinearInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
#Extrapolation is flat and interpolation is linear
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
interpolatedSurface = interpolate(filteredSurface, filteredCoordinates)
nanSurface = incompleteSurface.drop(interpolatedSurface.index)
return interpolatedSurface.append(nanSurface)[coordinates.index].rename(incompleteSurface.name)
# #Build the learner
# def buildModel(self):
# raise NotImplementedError()
# return
#######################################################################################################
class SplineInterpolation(LinearInterpolation):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestSplineInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
def customInterpolator(self, interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates)))
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates)))
fInterpolation = griddata(knownPositions,
np.ravel(interpolatedData),
(xNew, yNew),
method = 'cubic',
rescale=True)
fExtrapolation = griddata(knownPositions,
np.ravel(interpolatedData),
(xNew, yNew),
method = 'nearest',
rescale=True)
return np.where(np.isnan(fInterpolation), fExtrapolation, fInterpolation)
#Extrapolation is flat and interpolation is linear
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
knownValues = filteredSurface.dropna()
knownLocation = filteredCoordinates.loc[knownValues.index]
locationToInterpolate = filteredCoordinates.drop(knownValues.index)
interpolatedValues = self.customInterpolator(knownValues.values,
knownLocation.values,
locationToInterpolate.values)
completeSurface = pd.Series(interpolatedValues,
index = locationToInterpolate.index).append(knownValues)
interpolatedSurface = completeSurface.loc[filteredSurface.index].rename(filteredSurface.name)
nanSurface = incompleteSurface.drop(interpolatedSurface.index)
return interpolatedSurface.append(nanSurface)[coordinates.index].rename(incompleteSurface.name)
#######################################################################################################
class GaussianProcess(InterpolationModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestGaussianModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
self.TrainGaussianHyperparameters = (self.hyperParameters["Train Interpolation"]
if ("Train Interpolation" in self.hyperParameters) else False)
self.sigmaF = self.hyperParameters["sigmaF"] if ("sigmaF" in self.hyperParameters) else 50.0
self.bandwidth = self.hyperParameters["bandwidth"] if ("bandwidth" in self.hyperParameters) else 0.5
self.sigmaBounds = self.hyperParameters["sigmaBounds"] if ("sigmaBounds" in self.hyperParameters) else (1.0, 200.0)
self.bandwidthBounds = self.hyperParameters["bandwidthBounds"] if ("bandwidthBounds" in self.hyperParameters) else (0.01, 10.0)
self.kernel = (ConstantKernel(constant_value=self.sigmaF,
constant_value_bounds=self.sigmaBounds)
* RBF(length_scale=self.bandwidth,
length_scale_bounds=self.bandwidthBounds))
def kernelRBF(self, X1, X2, sigma_f=1.0, l=1.0):
'''
Isotropic squared exponential kernel. Computes
a covariance matrix from points in X1 and X2.
Args:
X1: Array of m points (m x d).
X2: Array of n points (n x d).
Returns:
Covariance matrix (m x n).
'''
#print("sigma_f : ",sigma_f," l : ",l)
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return sigma_f**2 * np.exp(-0.5 / l**2 * sqdist)
def predictGaussianModel(self, X, XStar, Y, sigma_f, l):
KStar = self.kernelRBF(X, XStar, sigma_f, l)
KStarT = KStar.T
K = self.kernelRBF(X, X, sigma_f, l)
#Add noise to avoid singular matrix problem
noise = (1e-9) * np.eye(K.shape[0])
KInv = np.linalg.inv(K + noise)
KStarStar = self.kernelRBF(XStar, XStar, sigma_f, l)
YStar = np.dot(np.dot(KStarT,KInv),Y)
YStarUncertainty = KStarStar - np.dot(np.dot(KStarT,KInv),KStar)
return YStar, YStarUncertainty
def predictGaussianModelFormatted(self, knownValues, locationToInterpolate, coordinates):
knownLocation = coordinates.loc[knownValues.index]
#Optimize on log parameters
interpolatedValues, _ = self.predictGaussianModel(formatCoordinatesAsArray(knownLocation.values),
formatCoordinatesAsArray(locationToInterpolate.values),
knownValues.values,
np.exp(self.kernel.theta[0]),
np.exp(self.kernel.theta[1]))
return pd.Series(interpolatedValues, index = locationToInterpolate.index)
#Interpolate or extrapolate certain values given the knowledge of other ones
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface,
coordinates)
nanSurface = incompleteSurface.drop(filteredSurface.index)
extrapolationMode = self.hyperParameters["extrapolationMode"] if "extrapolationMode" in self.hyperParameters else None
#NoExtrapolation : NoExtrapolation | InnerDomain | OuterDomain
#LocationToInterpolate : Index of missing values
#knownValues : Serie of values which are known
knownValues = filteredSurface.dropna()
if knownValues.size == filteredSurface.size : #No value to interpolate
return incompleteSurface
resSurface = filteredSurface.copy()
interpolatedPoint = None
if extrapolationMode == 'InnerDomain' :
interpolatedPoint = areInInnerPolygon(filteredSurface, filteredCoordinates)
elif extrapolationMode == 'OuterDomain' :
interpolatedPoint = areInOuterPolygon(filteredSurface, filteredCoordinates)
else : #NoExtrapolation
interpolatedPoint = filteredCoordinates.drop(knownValues.index)
if self.TrainGaussianHyperparameters :
interpolatedValues = self.predictGaussianModelFormatted(knownValues,
interpolatedPoint,
filteredCoordinates)
else :
knownLocation = filteredCoordinates.loc[knownValues.index]
interpolator = GaussianProcessRegressor(kernel=self.kernel,
random_state=0,
normalize_y=True).fit(formatCoordinatesAsArray(knownLocation.values),
knownValues.values)
interpolatedValues = pd.Series(interpolator.predict(formatCoordinatesAsArray(interpolatedPoint.values), return_std=False),
index = interpolatedPoint.index)
resSurface.loc[interpolatedValues.index] = interpolatedValues
return extrapolationFlat(resSurface.append(nanSurface)[incompleteSurface.index].rename(incompleteSurface.name),
coordinates)
def nll_fn(self, X_trainSerie, Y_trainSerie, theta, noise=1e-3):
'''
Computes the negative log marginal
likelihood for training data X_train and Y_train and given
noise level.
Args:
X_train: training locations (m x d).
Y_train: training targets (m x 1).
noise: known noise level of Y_train.
theta: gaussian hyperparameters [sigma_f, l]
'''
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(Y_trainSerie,
X_trainSerie)
Y_Train = filteredSurface.dropna().values
X_train = formatCoordinatesAsArray(filteredCoordinates.loc[filteredSurface.dropna().index].values)
# Numerically more stable implementation of Eq. (7) as described
# in http://www.gaussianprocess.org/gpml/chapters/RW2.pdf, Section
# 2.2, Algorithm 2.1.
K = (self.kernelRBF(X_train, X_train, sigma_f=theta[0], l=theta[1]) +
noise**2 * np.eye(len(X_train)))
L = np.linalg.cholesky(K)
return (np.sum(np.log(np.diagonal(L))) +
0.5 * Y_train.T.dot(np.linalg.lstsq(L.T, np.linalg.lstsq(L, Y_train)[0])[0]) +
0.5 * len(X_train) * np.log(2*np.pi))
#Apply nll_fn for each day of YSerie and sum results
def computeTrainHistoryLogLikelyhood(self, kernelParams, dataSetList):
error = 0
locations = dataSetList[1] #YSerie.iloc[0].index.to_frame().values
func = lambda x : self.nll_fn(locations.loc[x.name], x, np.exp(kernelParams))
marginalLogLikelyhood = dataSetList[0].apply(func, axis = 1)
return marginalLogLikelyhood.sum()
def train(self, inputTrain, nbEpoch, inputTest = None):
if self.TrainGaussianHyperparameters :
#Calibrate globally gaussian process hyperparameters l and sigma on the training set
objectiveFuntion = lambda x : self.computeTrainHistoryLogLikelyhood(x,inputTrain)
nbRestart = 5#15
bestValue = None
bestParam = None
#As loglikelyhood function is nonconvex we try l-bfgs algorithms several times
def randomStart(bounds, nbStart):
return np.random.uniform(low=bounds[0], high=bounds[1], size=nbStart)
optimStarts = np.apply_along_axis(lambda x : randomStart(x,nbRestart),
1,
self.kernel.bounds).T
start = time.time()
for i in range(nbRestart):
print("bounds", np.exp(self.kernel.bounds))
print("random Starts", np.exp(optimStarts[i]))
resOptim = scipy.optimize.fmin_l_bfgs_b(objectiveFuntion,
optimStarts[i],
approx_grad = True,
maxiter = 20,
bounds = self.kernel.bounds)
if self.verbose :
print(resOptim)
if (bestParam is None) or (resOptim[1] < bestValue) :
bestValue = resOptim[1]
bestParam = resOptim[0]
print("Attempt : ", i, " nnLogLikelyHood : ", bestValue, " bestParam : ", np.exp(bestParam))
optimalValues = {'k1__constant_value' : np.exp(bestParam)[0],
'k2__length_scale' : np.exp(bestParam)[1]}
self.kernel.set_params(**optimalValues)
print("Time spent during optimization : ", time.time() - start)
#Else
return super().train(inputTrain, nbEpoch, inputTest = None)
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
#######################################################################################################
class NelsonSiegelCalibrator:
#######################################################################################################
#Construction functions
#######################################################################################################
def __init__(self,
order,
hyperparameters):
self.hyperParameters = hyperparameters
self.order = order
self.beta = []
self.alpha = []
self.verbose = False
def objectiveFunction(self, ttm, beta, alpha):
slopeTime = (1 - np.exp(-alpha[0] * ttm))/(alpha[0] * ttm)
nelsonSiegel = beta[0] + slopeTime * beta[1] + (slopeTime - np.exp(-alpha[0] * ttm)) * beta[2]
if self.order == 4 :
nelsonSiegelSvensson = nelsonSiegel + ((1 - np.exp(-alpha[1] * ttm))/(alpha[1] * ttm) - np.exp(-alpha[1] * ttm)) * beta[3]
return nelsonSiegelSvensson
return nelsonSiegel
def drawnStartingPoints(self, bounds):
randPos = np.random.rand(len(bounds))
return [x[0][0] + (x[0][1] - x[0][0]) * x[1] for x in zip(bounds, randPos)]
def calibrate(self, curvesVol, ttms):
if self.order == 4 :
#Bounds taken from "Calibrating the Nelson–Siegel–Svensson model", <NAME>, <NAME>, <NAME>
#See https://comisef.eu/files/wps031.pdf
bounds = [(-10000,10000), (-10000,10000), (-10000,10000), (-10000,10000), (0,100), (0,200)]
startBounds = [(-1,1), (-1,1), (-1,1), (-1,1), (0,30), (0,30)]
func = lambda x : np.sqrt(np.nanmean(np.square(self.objectiveFunction(ttms/250, x[:4], x[4:]) - curvesVol)))
else :
bounds = [(-10000,10000), (-10000,10000), (-10000,10000), (0,200)]
startBounds = [(-1,1), (-1,1), (-1,1), (0,30)]
func = lambda x : np.sqrt(np.nanmean(np.square(self.objectiveFunction(ttms/250, x[:3], x[3:]) - curvesVol)))
bestFit = None
nbInit = 10
for k in range(nbInit) :
startingPoints = self.drawnStartingPoints(startBounds)
resOptim = scipy.optimize.minimize(func, startingPoints, bounds=bounds, method='L-BFGS-B')
if bestFit is None or resOptim.fun < bestFit :
bestFit = resOptim.fun
self.beta = resOptim.x[:4] if self.order == 4 else resOptim.x[:3]
self.alpha = resOptim.x[4:] if self.order == 4 else resOptim.x[3:]
if self.verbose :
print(resOptim.fun, " ; ", bestFit)
if self.verbose :
print("best error : ", bestFit)
return
def interpolate(self, ttm):
return self.objectiveFunction(ttm/250, self.beta, self.alpha)
#Post-treatments for calibrateModelMoneynessWiseDaily
def mergeResults(xCoordinates,
xAvgCoordinates,
xVol,
interpList,
refList,
nelsonList,
dfList):
interpVolDf = pd.concat(interpList,axis=1)
refVolDf = pd.concat(refList,axis=1)
moneynesses = np.unique(getMoneynessFromCoordinates(dfList))
nelsonIndex = pd.MultiIndex.from_product( [moneynesses, nelsonList[0].columns],
names=["Moneyness", "Nelson-Siegel Parameters"])
nelsonDf = pd.DataFrame(pd.concat(nelsonList,axis=1).values,
index = nelsonList[0].index,
columns = nelsonIndex)
coordinatesIndex = pd.MultiIndex.from_product([moneynesses, xCoordinates[0].columns],
names=["Moneyness", "Rank"])
coordinatesDf = pd.DataFrame(pd.concat(xCoordinates,axis=1).values,
index=nelsonList[0].index,
columns = coordinatesIndex)
volDf = pd.DataFrame(pd.concat(xVol,axis=1).values,
index=nelsonList[0].index,
columns = coordinatesIndex)
return interpVolDf, refVolDf, nelsonDf, coordinatesDf, volDf
#Interpolate Volatility along maturity for predefined nelson-seigel parameters
def getVolFromNelsonParameters(nelsonDf, coordinatesDf):
def getVolFromNelsonParametersApply(nelsonRow, coordinatesRow):
#iterate on moneyness
interpolatedValues = []
for m in coordinatesRow.index.get_level_values("Moneyness").unique():
coordinatesForM = coordinatesRow[coordinatesRow.index.get_level_values("Moneyness") == m]
parametersForM = nelsonRow[nelsonRow.index.get_level_values("Moneyness") == m]
interpolatorNelsonSiegel = NelsonSiegelCalibrator(3, {})
interpolatorNelsonSiegel.beta = parametersForM.head(3).values
interpolatorNelsonSiegel.alpha = parametersForM.tail(1).values
interpolatedValues.append(interpolatorNelsonSiegel.interpolate(coordinatesForM.values))
return pd.Series(np.ravel(interpolatedValues), coordinatesRow.index)
#Format with same format as ttms
nelsonList = list(map(lambda x : x[1], nelsonDf.iterrows()))
coordinatesList = list(map(lambda x : x[1], coordinatesDf.iterrows()))
interpolatedVol = list(map(lambda x : getVolFromNelsonParametersApply(x[0],x[1]),
zip(nelsonList, coordinatesList)))
return pd.DataFrame(np.reshape(interpolatedVol, coordinatesDf.shape),
index = coordinatesDf.index,
columns = coordinatesDf.columns )
#Calibrate nelson siegel interpolation for each day and each moneyness
def calibrateModelMoneynessWiseDaily(dataSet):
dfList = dataSet.getDataForModel()#(dataSet.trainVol.head(20).index)
moneynesses = np.unique(getMoneynessFromCoordinates(dfList))
moneynessDf = getMoneynessFromCoordinates(dfList)
ttmDf = getTTMFromCoordinates(dataSet.formatModelDataAsDataSet(dfList))
volDf = dataSet.formatModelDataAsDataSet(dfList)[0]
rankForMList = []
TTMForMList = []
AvgTTMForMList = []
volForMList = []
interpolationCurvesList = []
interpolatedCurveList = []
refCurveList = []
nelsonList = []
def treatCurve(curveVol, curveTTM):
ttmToInterpolate = curveTTM.dropna()
volToInteporlate = curveVol.dropna()
interpolatorNelsonSiegel = NelsonSiegelCalibrator(3, {})
interpolatorNelsonSiegel.calibrate(volToInteporlate.values, ttmToInterpolate[volToInteporlate.index].values)
interpolationCurve = interpolatorNelsonSiegel.interpolate(ttmToInterpolate.values)
calibratedCurve = pd.Series(volToInteporlate.values,
index = volToInteporlate.index).rename(curveVol.name)
nonCalibratedTTM = curveVol.index.difference(calibratedCurve.index)
calibratedCurve = calibratedCurve.append(pd.Series([np.NaN]*nonCalibratedTTM.size,
index = nonCalibratedTTM)).sort_index()
interpolatedCurve = pd.Series(interpolationCurve, index = ttmToInterpolate.index).rename(curveVol.name)
nonInterpolatedTTM = curveVol.index.difference(interpolatedCurve.index)
interpolatedCurve = interpolatedCurve.append(pd.Series([np.NaN]*nonInterpolatedTTM.size,
index = nonInterpolatedTTM)).sort_index()
return (calibratedCurve, interpolatedCurve, np.append(interpolatorNelsonSiegel.beta , interpolatorNelsonSiegel.alpha))
for m in moneynesses:#For a fixed moneyness
#Gather values for corresponding moneyness
rankForM = moneynessDf[moneynessDf == m].dropna(how="all", axis=1).columns
rankForMList.append(rankForM)
TTMForM = ttmDf[rankForM] #.dropna(how="any", axis=0)
TTMForMList.append(TTMForM)
AvgTTMForMList.append(TTMForM.mean(axis=0).round())
volForMList.append(volDf[rankForM]) #.dropna(how="any", axis=0))
#Turn dataframe as a list of series for applying operation jointly on two dataframe
volSeriesListForM = list(map(lambda x : x[1], volForMList[-1].iterrows()))
coordinatesSeriesListForM = list(map(lambda x : x[1], TTMForMList[-1].iterrows()))
#Estimate Nelson siegel paramters for every day
interpolationCurvesList.append(list(map(lambda x : treatCurve( x[0], x[1]) ,
zip(volSeriesListForM, coordinatesSeriesListForM))))
#Data used for nelson siegle calibration, should be equal to volForMList
refCurveList.append(pd.DataFrame(list(map(lambda x : x[0], interpolationCurvesList[-1])),
index = volForMList[-1].index,
columns = volForMList[-1].columns))
#Interpolated volatility
interpolatedCurveList.append(pd.DataFrame(list(map(lambda x : x[1], interpolationCurvesList[-1])),
index = volForMList[-1].index,
columns = volForMList[-1].columns))
#Parameters estimated every day
nelsonList.append(pd.DataFrame(list(map(lambda x : x[2], interpolationCurvesList[-1])),
index = volForMList[-1].index))
print(m)
return mergeResults(TTMForMList, AvgTTMForMList, volForMList, interpolatedCurveList, refCurveList, nelsonList, dfList)
#Calibrate a model that interpolates a whole surface (not a single smile) with single parameters
def calibrateModelDayWise(dfList):
moneynesses = np.unique(getMoneynessFromCoordinates(dfList))
moneynessDf = getMoneynessFromCoordinates(dfList)
ttmDf = getTTMFromCoordinates(dataSet.formatModelDataAsDataSet(dfList))
volDf = dataSet.formatModelDataAsDataSet(dfList)[0]
rankForMList = []
TTMForMList = []
AvgTTMForMList = []
volForMList = []
interpolationCurvesList = []
interpolatedCurveList = []
refCurveList = []
nelsonList = []
def treatSurface(surfaceVol, surfaceTTM, surfaceMoneyness):
ttmToInterpolate = surfaceTTM.dropna()
moneynessToInterpolate = surfaceMoneyness.dropna()
ttmToInterpolate = ttmToInterpolate[ttmToInterpolate.index.intersection(moneynessToInterpolate.index)]
moneynessToInterpolate = moneynessToInterpolate[ttmToInterpolate.index]
volToInterpolate = surfaceVol.dropna()
interpolatorSpline = gaussianProcess.Spline(3, {})
interpolatorSpline.calibrate(volToInterpolate.values,
ttmToInterpolate[volToInterpolate.index].values,
moneynessToInterpolate[volToInterpolate.index].values)
interpolationCurve = interpolatorSpline.interpolate(ttmToInterpolate.values, moneynessToInterpolate.values)
calibratedCurve = pd.Series(volToInterpolate.values,
index = volToInterpolate.index).rename(surfaceVol.name)
nonCalibratedTTM = surfaceVol.index.difference(calibratedCurve.index)
calibratedCurve = calibratedCurve.append(pd.Series([np.NaN]*nonCalibratedTTM.size,
index = nonCalibratedTTM)).sort_index()
interpolatedCurve = pd.Series(interpolationCurve, index = ttmToInterpolate.index).rename(surfaceVol.name)
nonInterpolatedTTM = surfaceVol.index.difference(interpolatedCurve.index)
interpolatedCurve = interpolatedCurve.append(pd.Series([np.NaN]*nonInterpolatedTTM.size,
index = nonInterpolatedTTM)).sort_index()
return (calibratedCurve, interpolatedCurve, interpolatorSpline.beta)
volSeriesList = list(map(lambda x : x[1], volDf.iterrows()))
moneynessSeriesList = list(map(lambda x : x[1], moneynessDf.iterrows()))
ttmSeriesList = list(map(lambda x : x[1], ttmDf.iterrows()))
dailyData = list(map(lambda x : treatSurface( x[0], x[1], x[2]) ,
zip(volSeriesList, ttmSeriesList, moneynessSeriesList)))
interpolatedDf = pd.DataFrame(pd.concat(list(map(lambda x : x[1], dailyData))),
index = volDf.index,
columns = volDf.columns)
refDf = pd.DataFrame(pd.concat(list(map(lambda x : x[0], dailyData))),
index = volDf.index,
columns = volDf.columns)
paramDf = pd.DataFrame(pd.concat(list(map(lambda x : x[2], dailyData))),
index = volDf.index)
#paramIndex = pd.MultiIndex.from_product([moneynesses, paramDf.columns],
# names=["Moneyness", "Spline Parameters"])
volIndex = pd.MultiIndex.from_product([moneynesses, np.arange(1, int(interpolatedDf.shape[1] / moneynesses.size) + 1, 1)],
names=["Moneyness", "Rank"])
reindexedVolDf = pd.DataFrame(volDf.values,
index = volDf.index,
columns = volIndex)
reindexedCoordinatesDf = pd.DataFrame(coordinatesDf.values,
index = coordinatesDf.index,
columns = volIndex)
return interpolatedDf, refDf, paramDf, reindexedCoordinatesDf, reindexedVolDf
def calibrateDataSetWithNelsonSiegel(pathTestFile, dataSet, restoreResults = True):
if restoreResults :
nelsonDf, interpVolDf = loadData.readInterpolationResult(pathTestFile)
else :
interpVolDf, refVolDf, nelsonDf, coordinatesDf, volDf = calibrateModelMoneynessWiseDaily(dataSet)
loadData.saveInterpolationResult(pathTestFile, nelsonDf, interpVolDf)
moneynesses = np.unique(getMoneynessFromCoordinates(dataSet.getDataForModel()))
volDf = dataSet.formatModelDataAsDataSet(dataSet.getDataForModel())[0]
volIndex = pd.MultiIndex.from_product([moneynesses, np.arange(1, int(volDf.shape[1] / moneynesses.size) + 1, 1)],
names=["Moneyness", "Rank"])
volDf = pd.DataFrame(volDf.values, index = volDf.index, columns = volIndex)
coordinatesDf = getTTMFromCoordinates(dataSet.formatModelDataAsDataSet(dataSet.getDataForModel()))
coordinatesDf = pd.DataFrame(coordinatesDf.values, index = coordinatesDf.index, columns = volIndex)
######################## Plot parameters
plt.plot(nelsonDf.iloc[:,0], label = "Beta1")
plt.show()
plt.plot(nelsonDf.iloc[:,1], label = "Beta2")
plt.show()
plt.plot(nelsonDf.iloc[:,2], label = "Beta3")
plt.show()
plt.plot(nelsonDf.iloc[:,3], label = "alpha1")
plt.show()
print(nelsonDf.head())
######################## Plot error
maeInterp = np.abs(np.nanmean(np.abs(interpVolDf.values - volDf.values)/volDf.values, axis=1))
plt.plot(interpVolDf.index, maeInterp)
plt.show()
rmseInterp = np.sqrt(np.nanmean(np.square(interpVolDf.values - volDf.values), axis=1))
plt.plot(interpVolDf.index, rmseInterp)
plt.show()
############################## Analyse worst estimation
moneynessPlot = 1.0
rowVol = volDf.transpose()[volDf.columns.get_level_values("Moneyness") == moneynessPlot].transpose()
rowInterpVol = interpVolDf.transpose()[volDf.columns.get_level_values("Moneyness") == moneynessPlot].transpose()
rowTTM = coordinatesDf[rowVol.columns]
rowImpliedTotalVariance = np.square(rowVol * rowTTM / 250)
rowInterpImpliedTotalVariance = np.square(pd.DataFrame(rowInterpVol.values,
index = rowVol.index,
columns = rowVol.columns) * rowTTM / 250)
dayPlot = np.argmax(rmseInterp)
plt.plot(rowTTM.dropna(how="all",axis=1).iloc[dayPlot].values,
rowInterpVol.dropna(how="all",axis=1).iloc[dayPlot].values,
"-",
label = "Nelson-Siegel")
plt.plot(rowTTM.dropna(how="all",axis=1).iloc[dayPlot].values,
rowVol.dropna(how="all",axis=1).iloc[dayPlot].values,
"+",
label = "Ref")
plt.legend()
plt.show()
plt.plot(rowTTM.dropna(how="all",axis=1).iloc[dayPlot].values,
rowInterpImpliedTotalVariance.dropna(how="all",axis=1).iloc[dayPlot].values,
"-",
label = "Nelson-Siegel")
plt.title("Implied vol")
plt.plot(rowTTM.dropna(how="all",axis=1).iloc[dayPlot].values,
rowImpliedTotalVariance.dropna(how="all",axis=1).iloc[dayPlot].values,
"+",
label = "Ref")
plt.title("Implied total variance")
plt.legend()
plt.show()
plt.plot(rowTTM.dropna(how="all",axis=1).iloc[-2].values,
(rowVol.dropna(how="all",axis=1).iloc[-2].values - rowInterpVol.dropna(how="all",axis=1).iloc[-1].values)/rowVol.dropna(how="all",axis=1).iloc[-1].values,
"+",
label = "Ref")
plt.title("Implied vol relative mae")
plt.show()
#absolute error
#interp2Df = getVolFromNelsonParameters(nelsonDf, coordinatesDf)
#interp2Df.head()
#plt.plot(interpVolDf.index, np.sqrt(np.nanmean(np.square(interpVolDf.values - volDf.values), axis=1)))
#relative error
#plt.plot(interpVolDf.index, np.abs(np.nanmean(np.abs(interpVolDf.values - interp2Df.values)/interp2Df.values, axis=1)))
#plt.show()
return
class NelsonSiegel(LinearInterpolation):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestNelsonSiegelInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface,
coordinates)
nanSurface = incompleteSurface.drop(filteredSurface.index)
knownValues = filteredSurface.dropna() #No value is interpolated
if knownValues.size == filteredSurface.size : #No value to interpolate
return incompleteSurface
knownLocation = filteredCoordinates.loc[knownValues.index]
locationToInterpolate = filteredCoordinates.drop(knownValues.index)
interpolatedValues = self.customInterpolator(knownValues,
knownLocation,
locationToInterpolate)
completeSurface = pd.Series(interpolatedValues,
index = locationToInterpolate.index).append(knownValues)
interpolatedSurface = completeSurface.loc[filteredSurface.index].rename(filteredSurface.name)
return interpolatedSurface.append(nanSurface)[incompleteSurface.index].rename(incompleteSurface.name)
def customInterpolator(self, interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates))) #Maturity
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates))) #Moneyness
#Group coordinates by moneyness
curveNewDict = {}
for idx in NewCoordinates.index :
m = NewCoordinates[idx][1]
ttm = NewCoordinates[idx][0]
if m not in curveNewDict :
curveNewDict[m] = [[ttm], [idx]]
else :
curveNewDict[m][0].append(ttm)
curveNewDict[m][1].append(idx)
#Group coordinates by moneyness
curveOldDict = {}
for idx in formerCoordinates.index :
m = formerCoordinates[idx][1]
ttm = formerCoordinates[idx][0]
v = interpolatedData[idx]
if m not in curveOldDict :
curveOldDict[m] = [[ttm], [v]]
else :
curveOldDict[m][0].append(ttm)
curveOldDict[m][1].append(v)
fInterpolation = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python modules
import glob
import os
# Third party modules
import gpxpy
from gpxpy.gpx import GPXBounds
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
import geopy
# Own modules
from trackanimation import utils as trk_utils
from trackanimation.utils import TrackException
class DFTrack:
def __init__(self, df_points=None, columns=None):
if df_points is None:
self.df = DataFrame()
if isinstance(df_points, pd.DataFrame):
self.df = df_points
else:
if columns is None:
columns = ['CodeRoute', 'Latitude', 'Longitude', 'Altitude', 'Date',
'Speed', 'TimeDifference', 'Distance', 'FileName']
self.df = DataFrame(df_points, columns=columns)
def export(self, filename='exported_file', export_format='csv'):
"""
Export a data frame of DFTrack to JSON or CSV.
Parameters
----------
export_format: string
Format to export: JSON or CSV
filename: string
Name of the exported file
"""
if export_format.lower() == 'json':
self.df.reset_index().to_json(orient='records', path_or_buf=filename+'.json')
elif export_format.lower() == 'csv':
self.df.to_csv(path_or_buf=filename+'.csv')
else:
raise TrackException('Must specify a valid format to export', "'%s'" % export_format)
def getTracks(self):
"""
Makes a copy of the DFTrack.
Explanation:
http://stackoverflow.com/questions/27673231/why-should-i-make-a-copy-of-a-data-frame-in-pandas
Returns
-------
copy: DFTrack
The copy of DFTrack.
"""
return self.__class__(self.df.copy(), list(self.df))
def sort(self, column_name):
"""
Sorts the data frame by the specified column.
Parameters
----------
column_name: string
Column name to sort
Returns
-------
sort: DFTrack
DFTrack sorted
"""
if isinstance(column_name, list):
for column in column_name:
if column not in self.df:
raise TrackException('Column name not found', "'%s'" % column)
else:
if column_name not in self.df:
raise TrackException('Column name not found', "'%s'" % column_name)
return self.__class__(self.df.sort_values(column_name), list(self.df))
def getTracksByPlace(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API
and, if it does not get anything, it tries with OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
track_place = self.getTracksByPlaceGoogle(place, timeout=timeout, only_points=only_points)
if track_place is not None:
return track_place
track_place = self.getTracksByPlaceOSM(place, timeout=timeout, only_points=only_points)
if track_place is not None:
return track_place
return None
def getTracksByPlaceGoogle(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
try:
geolocator = geopy.GoogleV3()
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError as geo_error:
return None
southwest_lat = float(location.raw['geometry']['bounds']['southwest']['lat'])
northeast_lat = float(location.raw['geometry']['bounds']['northeast']['lat'])
southwest_lng = float(location.raw['geometry']['bounds']['southwest']['lng'])
northeast_lng = float(location.raw['geometry']['bounds']['northeast']['lng'])
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByPlaceOSM(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
try:
geolocator = geopy.Nominatim()
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError as geo_error:
return None
southwest_lat = float(location.raw['boundingbox'][0])
northeast_lat = float(location.raw['boundingbox'][1])
southwest_lng = float(location.raw['boundingbox'][2])
northeast_lng = float(location.raw['boundingbox'][3])
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByDate(self, start=None, end=None, periods=None, freq='D'):
"""
Gets the points of the specified date range
using various combinations of parameters.
2 of 'start', 'end', or 'periods' must be specified.
Date format recommended: 'yyyy-mm-dd'
Parameters
----------
start: date
Date start period
end: date
Date end period
periods: int
Number of periods. If None, must specify 'start' and 'end'
freq: string
Frequency of the date range
Returns
-------
df_date: DFTrack
A DFTrack with the points of the specified date range.
"""
if trk_utils.isTimeFormat(start) or trk_utils.isTimeFormat(end):
raise TrackException('Must specify an appropiate date format', 'Time format found')
rng = pd.date_range(start=start, end=end, periods=periods, freq=freq)
df_date = self.df.copy()
df_date['Date'] = pd.to_datetime(df_date['Date'])
df_date['ShortDate'] = df_date['Date'].apply(lambda date: date.date().strftime('%Y-%m-%d'))
df_date = df_date[df_date['ShortDate'].apply(lambda date: date in rng)]
del df_date['ShortDate']
df_date = df_date.reset_index(drop=True)
return self.__class__(df_date, list(df_date))
def getTracksByTime(self, start, end, include_start=True, include_end=True):
"""
Gets the points between the specified time range.
Parameters
----------
start: datetime.time
Time start period
end: datetime.time
Time end period
include_start: boolean
include_end: boolean
Returns
-------
df_time: DFTrack
A DFTrack with the points of the specified date and time periods.
"""
if not trk_utils.isTimeFormat(start) or not trk_utils.isTimeFormat(end):
raise TrackException('Must specify an appropiate time format', trk_utils.TIME_FORMATS)
df_time = self.df.copy()
index = pd.DatetimeIndex(df_time['Date'])
df_time = df_time.iloc[index.indexer_between_time(start_time=start, end_time=end, include_start=include_start, include_end=include_end)]
df_time = df_time.reset_index(drop=True)
return self.__class__(df_time, list(df_time))
def pointVideoNormalize(self):
df = self.df.copy()
df_norm = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2014 Open Data Science Initiative and other authors. See AUTHORS.txt
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import csv
import copy
import numpy as np
import scipy.io
import datetime
import json
import yaml
import re
import tarfile
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
filename="/tmp/sods.log",
filemode="w",
)
from functools import reduce
import pandas as pd
from .config import *
from . import access
from . import util
DATAPATH = os.path.expanduser(os.path.expandvars(config.get("datasets", "dir")))
PYTRENDS_AVAILABLE = True
try:
from pytrends.request import TrendReq
except ImportError:
PYTRENDS_AVAILABLE = False
GPY_AVAILABLE = True
try:
import GPy
except ImportError:
GPY_AVAILABLE = False
NETPBMFILE_AVAILABLE = True
try:
import netpbmfile
except ImportError:
NETPBMFILE_AVAILABLE = False
GEOPANDAS_AVAILABLE = True
try:
import geopandas
except ImportError:
GEOPANDAS_AVAILABLE = False
if sys.version_info >= (3, 0):
from urllib.parse import quote
from urllib.request import urlopen
else:
from urllib2 import quote
from urllib2 import urlopen
# Global variables
default_seed = 10000
def bmi_steps(data_set="bmi_steps"):
if not access.data_available(data_set):
access.download_data(data_set)
data = pd.read_csv(os.path.join(access.DATAPATH, data_set, "steps-bmi-data.csv"))
X = np.hstack(
(data["steps"].values[:, np.newaxis], data["bmi"].values[:, np.newaxis])
)
Y = data["gender"].values[:, None]
return access.data_details_return(
{"X": X, "Y": Y, "covariates": ["steps", "bmi"], "response": ["gender"]},
data_set,
)
# The data sets
def boston_housing(data_set="boston_housing"):
if not access.data_available(data_set):
access.download_data(data_set)
all_data = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "housing.data"))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return access.data_details_return({"X": X, "Y": Y}, data_set)
def boxjenkins_airline(data_set="boxjenkins_airline", num_train=96):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
access.download_data(data_set)
data = np.loadtxt(
os.path.join(access.DATAPATH, data_set, "boxjenkins_airline.csv"), delimiter=","
)
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"covariates": [util.decimalyear("year")],
"response": ["AirPassengers"],
"info": "Monthly airline passenger data from Box & Jenkins 1976.",
},
data_set,
)
def brendan_faces(data_set="brendan_faces"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "frey_rawface.mat"))
Y = mat_data["ff"].T
return access.data_details_return({"Y": Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set="della_gatta", gene_number=None):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "DellaGattadata.mat"))
X = np.double(mat_data["timepoints"])
if gene_number == None:
Y = mat_data["exprs_tp53_RMA"]
else:
Y = mat_data["exprs_tp53_RMA"][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def epomeo_gpx(data_set="epomeo_gpx", sample_every=4):
"""Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
try:
import gpxpy
import gpxpy.gpx
except ImportError:
print("Need to install gpxpy to process the empomeo_gpx dataset.")
return
if not access.data_available(data_set):
access.download_data(data_set)
files = [
"endomondo_1",
"endomondo_2",
"garmin_watch_via_endomondo",
"viewranger_phone",
"viewranger_tablet",
]
X = []
for file in files:
gpx_file = open(os.path.join(access.DATAPATH, "epomeo_gpx", file + ".gpx"), "r")
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [
point
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
data = [
[
(
point.time
- datetime.datetime(2013, 8, 21, tzinfo=datetime.timezone.utc)
).total_seconds(),
point.latitude,
point.longitude,
point.elevation,
]
for point in points
]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
X = pd.DataFrame(
X[0], columns=["seconds", "latitude", "longitude", "elevation"]
)
X.set_index(keys="seconds", inplace=True)
return access.data_details_return(
{
"X": X,
"info": "Data is an array containing time in seconds, latitude, longitude and elevation in that order.",
},
data_set,
)
if GEOPANDAS_AVAILABLE:
def nigerian_administrative_zones(
data_set="nigerian_administrative_zones", refresh_data=False
):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
from zipfile import ZipFile
with ZipFile(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb.zip"), "r"
) as zip_ref:
zip_ref.extractall(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb")
)
states_file = "nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/"
from geopandas import read_file
Y = read_file(os.path.join(access.DATAPATH, data_set, states_file), layer=1)
Y.crs = "EPSG:4326"
Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def nigerian_covid(data_set="nigerian_covid", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "line-list-nigeria.csv")
Y = pd.read_csv(
filename,
parse_dates=[
"date",
"date_confirmation",
"date_onset_symptoms",
"date_admission_hospital",
"death_date",
],
)
return access.data_details_return({"Y": Y}, data_set)
def nigeria_nmis(data_set="nigeria_nmis", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "healthmopupandbaselinenmisfacility.csv")
Y = pd.read_csv(filename)
return access.data_details_return({"Y": Y}, data_set)
def nigerian_population(data_set="nigerian_population", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "nga_admpop_adm1_2020.csv")
Y = pd.read_csv(filename)
Y.dropna(axis=1, how='all', inplace=True)
Y.dropna(axis=0, how='any', inplace=True)
Y.rename(columns = {"ADM0_NAME":"admin0Name_en",
"ADM0_PCODE" : "admin0Pcode",
"ADM1_NAME" : "admin1Name_en",
"ADM1_PCODE" : "admin1Pcode",
"T_TL" :"population"},
inplace=True)
Y["admin0Name_en"] = Y["admin0Name_en"].str.capitalize()
Y["admin1Name_en"] = Y["admin1Name_en"].str.capitalize()
Y = Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def pmlr(volumes="all", data_set="pmlr", refresh_data=False):
"""Abstracts from the Proceedings of Machine Learning Research"""
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
proceedings = access.pmlr_proceedings_list(data_set)
# Create a new resources entry for downloading contents of proceedings.
data_name_full = "pmlr"
access.data_resources[data_set]["dirs"] = [['.']]
for entry in proceedings:
if volumes == "all" or entry["volume"] in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
access.data_resources[data_name_full]["files"].append([file])
access.data_resources[data_name_full]["dirs"].append([dirname])
access.data_resources[data_name_full]["urls"].append(urln)
Y = []
# Download the volume data
if not access.data_available(data_name_full):
access.download_data(data_name_full)
for entry in reversed(proceedings):
volume = entry["volume"]
# data_name_full = data_name_full_stub + "v" + str(volume)
if volumes == "all" or volume in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
volume_file = open(
os.path.join(access.DATAPATH, data_name_full, dirname, file), "r"
)
Y += yaml.load(volume_file, Loader=yaml.FullLoader)
Y = pd.DataFrame(Y)
Y["published"] = pd.to_datetime(Y["published"])
# Y.columns.values[4] = util.json_object('authors')
# Y.columns.values[7] = util.json_object('editors')
try:
Y["issued"] = Y["issued"].apply(
lambda x: np.datetime64(datetime.datetime(*x["date-parts"]))
)
except TypeError:
raise TypeError("Type error for entry\n" + Y["issued"]) from e
def full_name(person):
order = ["given", "prefix", "family", "suffix"]
names = [str(person[key]) for key in order if key in person and person[key] is not None]
return " ".join(names)
Y["author"] = Y["author"].apply(
lambda x: ', '.join([full_name(author) for author in x])
)
Y["editor"] = Y["editor"].apply(
lambda x: ', '.join([full_name(editor) for editor in x])
)
columns = list(Y.columns)
columns[14] = util.datetime64_("published")
columns[11] = util.datetime64_("issued")
Y.columns = columns
return access.data_details_return(
{
"Y": Y,
"info": "Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.",
},
data_set,
)
def football_data(season="1617", data_set="football_data"):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
league_dict = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "EC": 4}
def league2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
return league_dict[string]
def football2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
if string in access.football_dict:
return access.football_dict[string]
else:
access.football_dict[string] = len(access.football_dict) + 1
return len(access.football_dict) + 1
def datestr2num(s):
return util.date2num(datetime.datetime.strptime(s.decode("utf-8"), "%d/%m/%y"))
data_set_season = data_set + "_" + season
access.data_resources[data_set_season] = copy.deepcopy(access.data_resources[data_set])
access.data_resources[data_set_season]["urls"][0] += season + "/"
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ["E0.csv", "E1.csv", "E2.csv", "E3.csv"]
if start_year > 4 and start_year < 93:
files += ["EC.csv"]
access.data_resources[data_set_season]["files"] = [files]
if not access.data_available(data_set_season):
access.download_data(data_set_season)
start = True
for file in reversed(files):
filename = os.path.join(access.DATAPATH, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(access.DATAPATH, data_set_season, "temp.csv")
input = open(filename, encoding="ISO-8859-1")
output = open(writename, "w")
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(
writename,
skiprows=1,
usecols=(0, 1, 2, 3, 4, 5),
converters={
0: league2num,
1: datestr2num,
2: football2num,
3: football2num,
},
delimiter=",",
)
if start:
X = table[:, :4]
Y = table[:, 4:]
start = False
else:
X = np.append(X, table[:, :4], axis=0)
Y = np.append(Y, table[:, 4:], axis=0)
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [
util.discrete(league_dict, "league"),
util.datenum("match_day"),
util.discrete(access.football_dict, "home team"),
util.discrete(access.football_dict, "away team"),
],
"response": [util.integer("home score"), util.integer("away score")],
},
data_set,
)
def sod1_mouse(data_set="sod1_mouse"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "sod1_C57_129_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0)
num_repeats = 4
num_time = 4
num_cond = 4
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast(data_set="spellman_yeast"):
"""This is the classic Spellman et al 1998 Yeast Cell Cycle gene expression data that is widely used as a benchmark."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast_cdc15(data_set="spellman_yeast"):
"""These are the gene expression levels from the CDC-15 experiment of Spellman et al (1998)."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
t = np.asarray(
[
10,
30,
50,
70,
80,
90,
100,
110,
120,
130,
140,
150,
170,
180,
190,
200,
210,
220,
230,
240,
250,
270,
290,
]
)
times = ["cdc15_" + str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return access.data_details_return(
{
"Y": Y,
"t": t,
"info": "Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).",
},
data_set,
)
def lee_yeast_ChIP(data_set="lee_yeast_ChIP"):
"""Yeast ChIP data from Lee et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "binding_by_gene.tsv")
S = pd.read_csv(filename, header=1, index_col=0, sep="\t")
transcription_factors = [col for col in S.columns if col[:7] != "Unnamed"]
annotations = S[["Unnamed: 1", "Unnamed: 2", "Unnamed: 3"]]
S = S[transcription_factors]
return access.data_details_return(
{
"annotations": annotations,
"Y": S,
"transcription_factors": transcription_factors,
},
data_set,
)
def fruitfly_tomancak(data_set="fruitfly_tomancak", gene_number=None):
"""Fruitfly gene expression data from Tomancak et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "tomancak_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time - 1, num_time)
xr = np.linspace(0, num_repeats - 1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def drosophila_protein(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
Y = pd.read_csv(filename, header=0)
return access.data_details_return({"Y": Y}, data_set)
def drosophila_knirps(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = | pd.read_csv(filename, header=0) | pandas.read_csv |
import time
from collections import defaultdict
from datetime import timedelta
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfer_entropy import TransferEntropy
plt.style.use('fivethirtyeight')
# %%
eqs = 'SPY DIA XLK XLV XLF IYZ XLY XLP XLI XLE XLU XME IYR XLB XPH IWM PHO ' \
'SOXX WOOD FDN GNR IBB ILF ITA IYT KIE PBW ' \
'AFK EZA ECH EWW EWC EWZ EEM EIDO EPOL EPP EWA EWD EWG EWH EWJ EWI EWK ' \
'EWL EWM EWP EWQ EWS EWT EWU EWY GXC HAO EZU RSX TUR'.split()
fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
fx = 'FXA FXB FXC FXE FXF FXY'.split()
assets = eqs + fi + cmdtys + fx
def cum_rets(rets):
cum_rets = []
cum_rets.append(1)
for i, ret in enumerate(rets):
cum_rets.append(cum_rets[i]*(1+ret))
return cum_rets
# %%
ete_mats = {}
mod = TransferEntropy(assets=assets)
period = 'Q'
months = mod.prices.index.to_period(period).unique().to_timestamp()
iters = len(months)-24
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
mod.compute_effective_transfer_entropy(sims=30, bins=6,
std_threshold=1)
ete = mod.ete.copy()
ete_mats[start] = ete
pbar.update(1)
ete_df = pd.concat(ete_mats)
ete_df.to_csv(f'../ete_{period}.csv')
# %%
q = 4
res = defaultdict(dict)
mod = TransferEntropy(assets=assets)
iters = len(months)-1
for start, end in zip(months[:-1], months[1:]):
ete = ete_mats[start]
ete_out = ete.sum(axis=0)
ete_in = ete.sum(axis=1)
end -= timedelta(1)
mod.set_timeperiod(start, end)
returns = mod.prices.iloc[-1]/mod.prices.iloc[0]-1
vols = mod.data.std()
names = 'eteout etein etenetout etetotal'.split()
for name, ETE in zip(names, [ete_out, ete_in,
ete_out-ete_in, ete_in+ete_out]):
df = pd.DataFrame({'returns': returns, 'vol': vols, name: ETE})
df['q'] = | pd.qcut(ETE, q=q, labels=False) | pandas.qcut |
from datetime import datetime, date, time, timezone
from passlib.apps import custom_app_context as pwd_context
import shelve
import pandas as pd
import pickle
import uuid
import streamlit as st
import time
class Survey_Instance():
version = .02
config_file = 'config'
survey_db = 'survey_db.pkl'
comment_db = 'comment_db.pkl'
pass_db = 'pass.pkl'
def __init__(self, session):
self.session = session
def authenticate(self):
self.open_shelf()
if self.session.user_class == 'Student':
drop_down = 'student_list'
elif self.session.user_class == 'Teacher':
drop_down = 'teacher_list'
self.selected_teacher = st.selectbox('Please choose your name below', self.shelf[drop_down])
self.password = st.text_input('Please enter your password below')
self.check_password()
def check_password(self):
self.open_pass_db()
password_check = pwd_context.verify(self.password, self.loaded_passwords.get(self.selected_teacher))
if password_check == True:
st.success('You are now authenticated. Please pick from an action in the main menu to the left.')
self.session.auth_status=True
self.session.user=self.selected_teacher
else:
st.error('Your password is not correct')
self.session.auth_status=False
self.session.user='Not logged in'
def open_pass_db(self):
try:
with open(self.pass_db, 'rb') as handle:
self.loaded_passwords = pickle.load(handle)
except:
st.error('There was an error opening password db')
finally:
return self.loaded_passwords
def add_user(self):
user_id = uuid.uuid4().int
user = st.text_input('Type the name of the user. E.g., <NAME>')
password = st.text_input('Type the password for this user.')
user_class = st.selectbox('Choose the role for the user', ['Student', 'Teacher', 'Admin'])
password_hash = pwd_context.hash(password)
user_list = ()
##left off
def open_user_directory(self):
try:
with open(self.user_directory.pkl, 'rb') as handle:
self.user_directory = pickle.load(handle)
except:
st.error('There was an error opening user directory')
def add_user_to_directory(self, user_list):
directory_columns = self.user_directory.columns
new_data_frame = pd.DataFrame(user_list, columns=directory_columns)
self.user_directory = self.user_directory.append(new_data_frame)
def open_shelf(self):
self.shelf = shelve.open(self.config_file)
def close_shelf(self):
self.shelf.close()
def create_grade_list(self, start=0, end=12):
self.grade_list = ['Grade ' + str(n) for n in range(start+1, end+1)]
def create_survey(self):
st.write('In create_survey session ID: ', self.session.session_id)
st.write('In create_survey survey ID:', self.session.survey_id)
st.write('In create_survey user class:', self.session.user_class)
self.reset_button = st.empty()
self.open_shelf()
self.create_grade_list()
self.survey_answers = pd.DataFrame(self.shelf['question_list'])
self.survey_answers['Answer'] = ""
self.survey_answers['Date Administered'] = ""
self.survey_answers['Teacher'] = st.selectbox('Teacher', self.shelf['teacher_list'], key=self.session.session_id)
self.survey_answers['Student'] = st.selectbox('Student', self.shelf['student_list'], key=self.session.session_id)
self.survey_answers['Subject'] = st.selectbox('Subject', self.shelf['subject_list'], key=self.session.session_id)
self.survey_answers['Grade'] = st.selectbox('Grade', self.grade_list, key=self.session.session_id)
self.survey_answers['User'] = self.session.user
for question in self.survey_answers['Question'].items():
print(question[1])
self.survey_answers.at[question[0], 'Answer'] = st.slider(question[1], 1, 5, key=self.session.session_id)
self.survey_answers.at[question[0], 'Date Administered'] = datetime.now()
self.survey_comment = st.text_area('Please enter any comments for this survey below', key=self.session.session_id)
self.comment = {self.session.survey_id:self.survey_comment}
self.survey_answers['Survey ID'] = self.session.survey_id
self.close_shelf()
def save_survey(self):
try:
self.survey_db = pd.read_pickle(self.survey_db)
except:
pass
else:
self.survey_answers = self.survey_db.append(self.survey_answers, ignore_index=True)
self.session.saved_status=True
finally:
self.survey_answers.to_pickle(self.survey_db)
self.save_comment()
st.balloons()
def saved_status(self):
if self.session.saved_status == True:
self.saved_verb = 'has'
else:
self.saved_verb = 'has not'
def save_comment(self):
try:
with open(self.comment_db, 'rb') as handle:
self.comment_dictionary = pickle.load(handle)
except:
self.comment_dictionary = self.comment
else:
self.comment_dictionary[self.session.survey_id] = self.survey_comment
finally:
with open(self.comment_db, 'wb') as handle:
pickle.dump(self.comment_dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)
def show_survey(self):
st.write(self.survey_answers)
def new_survey(self):
self.session.session_id +=1
self.session.saved_status=False
self.session.survey_id = str(uuid.uuid4())
def provide_status(self, mode='base'):
st.sidebar.subheader('Status')
st.sidebar.markdown('User Class: *' + self.session.user_class + '*')
st.sidebar.markdown('User: ' + self.session.user)
st.sidebar.markdown('Authenticated: ' + str(self.session.auth_status))
self.saved_status()
if mode=='edit':
st.sidebar.markdown('* You are **editing** the survey for '+self.survey_answers['Student'][0])
st.sidebar.markdown('* The survey **'+self.saved_verb+'** been saved.')
st.sidebar.markdown('* You may save the survey for *'+self.survey_answers['Student'][0]+ \
'* as many times as you would like. However, please remember to click the \
**Reset / New Survey** button above if you want to begin creating a new survey for another student.')
def open_survey_db(self):
try:
self.survey_db = | pd.read_pickle(self.survey_db) | pandas.read_pickle |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[ | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
import pandas
pibs = pandas.read_csv('pibs_ibge.csv', sep=',', encoding='utf-8')
tse = pandas.read_csv('codigos_tse.csv', sep=',', encoding='utf-8')
pibs['BUSCA'] = pibs['UF'].map(str) + "_" + pibs['CIDADE'].map(str)
tse['BUSCA'] = tse['UF_TSE'].map(str) + "_" + tse['NOME_TSE'].map(str)
saida = | pandas.merge(pibs, tse, on='BUSCA', how='left') | pandas.merge |
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import date
from joblib import load
from tkinter import *
from tkinter import ttk
import tkinter.font as tkFont
import os
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.pyplot import ginput
from matplotlib.patches import Arc
from matplotlib.widgets import Cursor
from functools import partial
from Shot import Player, Shot
from mplsoccer.pitch import Pitch
model = load('xG_model/lgbm_model.joblib')
### Can't figure out how to add a cursor to the plot ###
# class BlittedCursor():
# """
# A cross hair cursor using blitting for faster redraw.
# """
# def __init__(self, ax):
# self.ax = ax
# self.background = None
# self.horizontal_line = ax.axhline(color='k', lw=0.8, ls='--')
# self.vertical_line = ax.axvline(color='k', lw=0.8, ls='--')
# # text location in axes coordinates
# self.text = ax.text(0.72, 0.9, '', transform=ax.transAxes)
# self._creating_background = False
# ax.figure.canvas.mpl_connect('draw_event', self.on_draw)
# def on_draw(self, event):
# self.create_new_background()
# def set_cross_hair_visible(self, visible):
# need_redraw = self.horizontal_line.get_visible() != visible
# self.horizontal_line.set_visible(visible)
# self.vertical_line.set_visible(visible)
# self.text.set_visible(visible)
# return need_redraw
# def create_new_background(self):
# if self._creating_background:
# # discard calls triggered from within this function
# return
# self._creating_background = True
# self.set_cross_hair_visible(False)
# self.ax.figure.canvas.draw()
# plt.draw()
# self.background = self.ax.figure.canvas.copy_from_bbox(self.ax.bbox)
# self.set_cross_hair_visible(True)
# self._creating_background = False
# def on_mouse_move(self, event):
# if self.background is None:
# self.create_new_background()
# if not event.inaxes:
# need_redraw = self.set_cross_hair_visible(False)
# if need_redraw:
# self.ax.figure.canvas.restore_region(self.background)
# self.ax.figure.canvas.blit(self.ax.bbox)
# else:
# self.set_cross_hair_visible(True)
# # update the line positions
# x, y = event.xdata, event.ydata
# self.horizontal_line.set_ydata(y)
# self.vertical_line.set_xdata(x)
# self.text.set_text('x=%1.2f, y=%1.2f' % (x, y))
# self.ax.figure.canvas.restore_region(self.background)
# self.ax.draw_artist(self.horizontal_line)
# self.ax.draw_artist(self.vertical_line)
# self.ax.draw_artist(self.text)
# self.ax.figure.canvas.blit(self.ax.bbox)
def draw_pitch():
global fig, ax
pitch = Pitch(pitch_type='uefa', pitch_color='grass', goal_type='box', line_color='white', stripe=True)
fig, ax = pitch.draw()
#Hide axis
plt.axis('off')
fig.canvas.mpl_connect('button_press_event', onclick)
plt.ion()
plt.show()
def onclick(event):
global fig, ax, circle
global home_goals, away_goals, home_shots, away_shots, home_SOT, away_SOT, shot_index
x_loc = round(event.xdata,2)
y_loc = round(event.ydata,2)
if team_button1.pressed:
team = "Home"
team_name = home_team
if(home_dropdown.get() == ''):
shot_output.config(text='Please Choose a Player for the Shot', foreground="red")
return
split = home_dropdown.get().split("--")
player_name = split[1]
player_number = int(split[0])
else:
team = "Away"
team_name = away_team
if(away_dropdown.get() == ''):
shot_output.config(text='Please Choose a Player for the Shot', foreground="red")
return
split = away_dropdown.get().split("--")
player_name = split[1]
player_number = int(split[0])
if body_part_button1.pressed:
body_part = 0
else:
body_part = 1
if assist_type_button1.pressed:
assist_type = 3
elif assist_type_button2.pressed:
assist_type = 0
elif assist_type_button3.pressed:
assist_type = 1
elif assist_type_button4.pressed:
assist_type = 2
else:
assist_type = 4
if shot_type_button1.pressed:
shot_type = 4
elif shot_type_button2.pressed:
shot_type = 0
elif shot_type_button3.pressed:
shot_type = 1
elif shot_type_button4.pressed:
shot_type = 2
else:
shot_type = 3
# Goal
if str(event.key) == "shift" and str(event.button) == "MouseButton.LEFT":
on_target = 1
goal = 1
new_shot = Shot(shot_index, team_name, player_name, player_number, on_target, goal, x_loc, y_loc, body_part, assist_type, shot_type)
circle = plt.Circle((x_loc, y_loc), 1.0, color='red')
ax.add_artist(circle)
# +1 shots, SOT, and goal
if team == "Home":
home_goals += 1
home_shots += 1
home_SOT += 1
score_home_label.configure(text=str(home_goals))
shots_home_label.configure(text=str(home_shots))
SOT_home_label.configure(text=str(home_SOT))
else:
away_goals += 1
away_shots += 1
away_SOT += 1
score_away_label.configure(text=str(away_goals))
shots_away_label.configure(text=str(away_shots))
SOT_away_label.configure(text=str(away_SOT))
shot_index += 1
# Shot Off Target
elif str(event.button) == "MouseButton.RIGHT":
on_target = 0
goal = 0
new_shot = Shot(shot_index, team_name, player_name, player_number, on_target, goal, x_loc, y_loc, body_part, assist_type, shot_type)
circle = plt.Circle((x_loc, y_loc), 1.0, color='blue')
ax.add_artist(circle)
# +1 shots
if team == "Home":
home_shots += 1
shots_home_label.configure(text=str(home_shots))
else:
away_shots += 1
shots_away_label.configure(text=str(away_shots))
shot_index += 1
# Shot On Target
elif str(event.button) == "MouseButton.LEFT":
on_target = 1
goal = 0
new_shot = Shot(shot_index, team_name, player_name, player_number, on_target, goal, x_loc, y_loc, body_part, assist_type, shot_type)
circle = plt.Circle((x_loc, y_loc), 1.0, color='orange')
ax.add_artist(circle)
# +1 shots, SOT
if team == "Home":
home_shots+=1
home_SOT += 1
shots_home_label.configure(text=str(home_shots))
SOT_home_label.configure(text=str(home_SOT))
else:
away_shots += 1
away_SOT += 1
shots_away_label.configure(text=str(away_shots))
SOT_away_label.configure(text=str(away_SOT))
shot_index += 1
xG_output = calcXG(new_shot)
display_location(x_loc, y_loc, new_shot, xG_output)
createDict(new_shot, xG_output)
plt.draw()
def calcXG(shot_obj):
data = pd.DataFrame.from_dict([{'shot_type_name': shot_obj.getShotType(), 'x': shot_obj.getX(), 'y': shot_obj.getY(), 'body_part_name': shot_obj.getBodyPart(), 'assist_type': shot_obj.getAssistType()}])
return round(model.predict_proba(data)[:,1][0], 5)
def display_location(x, y, shot, xG):
string_x = str(round(x,1))
string_y = str(round(y,1))
shot_output.configure(text=f"xG: {xG}")
def createDict(shot_obj, xG):
new_dict = dict({'shot_id': shot_obj.getIndex(), 'Team': shot_obj.getTeam(), 'player_name': shot_obj.getPlayerName(), 'player_number': shot_obj.getPlayerNumber(), 'onTarget': shot_obj.getOnTarget(), 'isGoal': shot_obj.getGoal(), 'x': shot_obj.getX(), 'y': shot_obj.getY(), 'body_part_name': shot_obj.getBodyPart(), 'assist_type': shot_obj.getAssistType(), 'shot_type_name': shot_obj.getShotType(), 'xG': xG})
List.append(new_dict)
class TeamButton():
def __init__(self, input_text, text_font, data_entry):
self.input_text = input_text
self.text_font = text_font
self.data_entry = data_entry
if self.input_text == "Home":
self.pressed = True
self.button = Button(self.data_entry, text=input_text, command=self.updateHome, relief=SUNKEN, font=self.text_font)
else:
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateAway, relief=RAISED, font=self.text_font)
def updateHome(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
team_button2.button.configure(relief=RAISED)
self.pressed = True
team_button2.pressed = False
def updateAway(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
team_button1.button.configure(relief=RAISED)
self.pressed = True
team_button1.pressed = False
class BodyPartButton():
def __init__(self, input_text, text_font, data_entry):
self.input_text = input_text
self.text_font = text_font
self.data_entry = data_entry
if self.input_text == "Foot":
self.pressed = True
self.button = Button(self.data_entry, text=input_text, command=self.updateFoot, relief=SUNKEN, font=self.text_font)
else:
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateOther, relief=RAISED, font=self.text_font)
def updateFoot(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
body_part_button2.button.configure(relief=RAISED)
self.pressed = True
body_part_button2.pressed = False
def updateOther(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
body_part_button1.button.configure(relief=RAISED)
self.pressed = True
body_part_button1.pressed = False
class AssistTypeButton():
def __init__(self, input_text, text_font, data_entry):
self.input_text = input_text
self.text_font = text_font
self.data_entry = data_entry
if self.input_text == "Direct":
self.pressed = True
self.button = Button(self.data_entry, text=input_text, command=self.updateDirect, relief=SUNKEN, font=self.text_font)
elif self.input_text == "Pass":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updatePass, relief=RAISED, font=self.text_font)
elif self.input_text == "Recovery":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateRecovery, relief=RAISED, font=self.text_font)
elif self.input_text == "Clearance":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateClearance, relief=RAISED, font=self.text_font)
else: # Rebound Button
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateRebound, relief=RAISED, font=self.text_font)
def updateDirect(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
assist_type_button2.button.configure(relief=RAISED)
assist_type_button3.button.configure(relief=RAISED)
assist_type_button4.button.configure(relief=RAISED)
assist_type_button5.button.configure(relief=RAISED)
self.pressed = True
assist_type_button2.pressed = False
assist_type_button3.pressed = False
assist_type_button4.pressed = False
assist_type_button5.pressed = False
def updatePass(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
assist_type_button1.button.configure(relief=RAISED)
assist_type_button3.button.configure(relief=RAISED)
assist_type_button4.button.configure(relief=RAISED)
assist_type_button5.button.configure(relief=RAISED)
self.pressed = True
assist_type_button1.pressed = False
assist_type_button3.pressed = False
assist_type_button4.pressed = False
assist_type_button5.pressed = False
def updateRecovery(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
assist_type_button1.button.configure(relief=RAISED)
assist_type_button2.button.configure(relief=RAISED)
assist_type_button4.button.configure(relief=RAISED)
assist_type_button5.button.configure(relief=RAISED)
self.pressed = True
assist_type_button1.pressed = False
assist_type_button2.pressed = False
assist_type_button4.pressed = False
assist_type_button5.pressed = False
def updateClearance(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
assist_type_button1.button.configure(relief=RAISED)
assist_type_button2.button.configure(relief=RAISED)
assist_type_button3.button.configure(relief=RAISED)
assist_type_button5.button.configure(relief=RAISED)
self.pressed = True
assist_type_button1.pressed = False
assist_type_button2.pressed = False
assist_type_button3.pressed = False
assist_type_button5.pressed = False
def updateRebound(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
assist_type_button1.button.configure(relief=RAISED)
assist_type_button2.button.configure(relief=RAISED)
assist_type_button3.button.configure(relief=RAISED)
assist_type_button4.button.configure(relief=RAISED)
self.pressed = True
assist_type_button1.pressed = False
assist_type_button2.pressed = False
assist_type_button3.pressed = False
assist_type_button4.pressed = False
class ShotTypeButton():
def __init__(self, input_text, text_font, data_entry):
self.input_text = input_text
self.text_font = text_font
self.data_entry = data_entry
if self.input_text == "Open Play":
self.pressed = True
self.button = Button(self.data_entry, text=input_text, command=self.updateOpenPlay, relief=SUNKEN, font=self.text_font)
elif self.input_text == "Free Kick":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateFreeKick, relief=RAISED, font=self.text_font)
elif self.input_text == "Corner":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateCorner, relief=RAISED, font=self.text_font)
elif self.input_text == "Throw In":
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateThrowIn, relief=RAISED, font=self.text_font)
else: # Direct Set Piece
self.pressed = False
self.button = Button(self.data_entry, text=input_text, command=self.updateDirectSetPiece, relief=RAISED, font=self.text_font)
def updateOpenPlay(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
shot_type_button2.button.configure(relief=RAISED)
shot_type_button3.button.configure(relief=RAISED)
shot_type_button4.button.configure(relief=RAISED)
shot_type_button5.button.configure(relief=RAISED)
self.pressed = True
shot_type_button2.pressed = False
shot_type_button3.pressed = False
shot_type_button4.pressed = False
shot_type_button5.pressed = False
def updateFreeKick(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
shot_type_button1.button.configure(relief=RAISED)
shot_type_button3.button.configure(relief=RAISED)
shot_type_button4.button.configure(relief=RAISED)
shot_type_button5.button.configure(relief=RAISED)
self.pressed = True
shot_type_button1.pressed = False
shot_type_button3.pressed = False
shot_type_button4.pressed = False
shot_type_button5.pressed = False
def updateCorner(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
shot_type_button1.button.configure(relief=RAISED)
shot_type_button2.button.configure(relief=RAISED)
shot_type_button4.button.configure(relief=RAISED)
shot_type_button5.button.configure(relief=RAISED)
self.pressed = True
shot_type_button1.pressed = False
shot_type_button2.pressed = False
shot_type_button4.pressed = False
shot_type_button5.pressed = False
def updateThrowIn(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
shot_type_button1.button.configure(relief=RAISED)
shot_type_button2.button.configure(relief=RAISED)
shot_type_button3.button.configure(relief=RAISED)
shot_type_button5.button.configure(relief=RAISED)
self.pressed = True
shot_type_button1.pressed = False
shot_type_button2.pressed = False
shot_type_button3.pressed = False
shot_type_button5.pressed = False
def updateDirectSetPiece(self):
if self.pressed == False:
self.button.configure(relief=SUNKEN)
shot_type_button1.button.configure(relief=RAISED)
shot_type_button2.button.configure(relief=RAISED)
shot_type_button3.button.configure(relief=RAISED)
shot_type_button4.button.configure(relief=RAISED)
self.pressed = True
shot_type_button1.pressed = False
shot_type_button2.pressed = False
shot_type_button3.pressed = False
shot_type_button4.pressed = False
def saveCSV():
# save all the data in placeholders to a csv/excel file
today = date.today()
today = today.strftime("%b-%d-%Y")
df = | pd.DataFrame(List) | pandas.DataFrame |
import scipy.io
import numpy as np
import pandas as pd
from io import StringIO
from datetime import datetime
def ezsleep(filepath):
F = open(filepath).read()
data = pd.read_csv(StringIO(F),header=None)
data = data.values.ravel().astype(np.float64)
return data
def MKmat(matfilepath):
data = scipy.io.loadmat(matfilepath)
df = | pd.DataFrame.from_dict(data, orient='index', columns=['A']) | pandas.DataFrame.from_dict |
import csv
import mne
import pandas as pd
import matplotlib.pyplot as plt
import torch
import math
import numpy as np
import json
import re
import ast
def make_file_list(edf_list: str, csv_list: str, data_dir: str) -> list:
file_list = []
file1 = open(edf_list)
file2 = open(csv_list)
reader1 = csv.reader(file1) # reader for the edf file locations
reader2 = csv.reader(file2) # reader for the rec file location
for i in zip(reader1, reader2):
first = data_dir + i[0][0][2:]
second = data_dir + i[1][0][2:-3] + 'lbl'
file_list.append([first, second])
file1.close()
file2.close()
return file_list # returns list of file locations
# https://stackoverflow.com/questions/20910213/loop-over-two-generator-together
def read_and_export_files(file_list: list, montage: dict, save_loc: str):
global nr
for direct in file_list:
edf_dir = direct[0]
csv_dir = direct[1]
data = mne.io.read_raw_edf(edf_dir, preload=True) # read edf file
if data.__len__() < 82500: # if the file has less than 5,5 mins of
continue # recorded data then it's discarded
data = data.filter(0.1, 100) # use filter on data
data = data.notch_filter(60) # use filter on data
sfreq = int(data.info['sfreq']) # get the sampling freqency
df = data.to_data_frame() # make pandas dataframe
inv_map = {v[0]: k for k, v in montage1.items()}
# to make sure, that the correct targets are given to the right
# channels, the index order is used.
which_montages = set()
target = []
with open(csv_dir, "r") as file: # read rec file
ls = csv.reader(file, delimiter='å')
for rows in ls:
l = 0
try:
l = rows[0]
except:
continue
if l:
if l[:7] == "symbols":
m = re.match(r".+(\{.+\})", l)
string = m.group(1).replace("'", '"')
string = re.sub(r'(\d+)', r'"\1"', string)
sym_dict = json.loads(string)
# make dict with the keys to the artifacts
elif l[:5] == "label":
m = re.match(r".+\{(.+)(\[.+\])\}", l)
first = m.group(1).split(", ")
second = m.group(2).strip('][').split(", ")
ind = second.index('1.0')
obs = sym_dict[str(ind)] # get the encoding of the data
if (obs in ['eyem', 'artf', 'chew',
'shiv', 'musc', 'elpp', 'bckg']):
target.append([int(first[4]),
float(first[2]),
float(first[3])])
which_montages.add(int(first[4]))
sorted_index = sorted(list(which_montages)) # sort the montage index
first = True
for i in sorted_index: # using the montage information we make the new
col_names = montage.get(i) # data-frame using only the channels
# that has been labeled
if (col_names[0] == "EKG"): # & first # special case that is removed
continue
# df_new = df[col_names[1]]
# df_new = df_new.rename(col_names[0])
# first = False
# elif (col_names[0] == "EKG"): # special case for montage 2
# list1 = df[col_names[1]]
# list1 = list1.rename(col_names[0])
# df_new = pd.concat([df_new, diff], axis=1, join='inner')
if first:
list1 = df[col_names[1]] # get the first series
list2 = df[col_names[2]] # get the second series
df_new = list1 - list2
df_new = pd.DataFrame(df_new.rename(col_names[0])) # Rename
first = False
else:
list1 = df[col_names[1]]
list2 = df[col_names[2]]
diff = list1 - list2
diff = diff.rename(col_names[0]) # Rename
df_new = | pd.concat([df_new, diff], axis=1, join='inner') | pandas.concat |
import os
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
########### Data Constants ###########
DATA_DIR = '../data/'
if not os.access('/tmp/figures', os.F_OK):
os.mkdir('/tmp/figures')
if not os.access('/tmp/figures', os.W_OK):
print('Cannot write to /tmp/figures, please fix it.')
exit()
else:
print('figures saved to /tmp/figures')
########### Prepare Functions ###########
def filter_df(rule_df, keyword):
df_ant = pd.DataFrame(columns=rule_df.columns)
df_con = pd.DataFrame(columns=rule_df.columns)
ind_ant = 0
ind_con = 0
for df_iter in rule_df.iterrows():
df_row = df_iter[1]
if keyword in df_row['antecedents']:
df_ant.loc[ind_ant] = df_row
ind_ant +=1
if keyword in df_row['consequents']:
df_con.loc[ind_con] = df_row
ind_con += 1
return df_ant, df_con
def get_usr_thr(column, mode='top'):
pareto = len(column) * 0.2
total_cnt = 0
if mode == 'top':
ascend = False
elif mode == 'bot':
ascend = True
for count in column.value_counts(ascending=ascend).to_list():
total_cnt += count
if total_cnt >= pareto:
return count
def get_df(file, header=None):
df = | pd.read_csv(file, header=None) | pandas.read_csv |
import numpy as np
import logging as logging
import pandas as pd
import parmed as pmd
from networkx.algorithms import isomorphism
from openforcefield.typing.engines.smirnoff import (
ForceField,
generateTopologyFromOEMol,
generateGraphFromTopology,
)
from openeye.oechem import (
oemolistream,
oemolostream,
OEIFlavor_MOL2_Forcefield,
OEIFlavor_Generic_Default,
OEIFlavor_PDB_Default,
OEIFlavor_PDB_ALL,
OEFormat_MOL2,
OEFormat_MOL2H,
OEWriteMolecule,
OETriposAtomNames,
OEMol,
OEFormat_PDB,
OESmilesToMol,
OEAddExplicitHydrogens,
OEHasAtomIdx,
OEAtomGetResidue,
)
def compare_parameters(reference_prmtop, target_prmtop):
"""Compare force field parameters between a reference and target parameter file.
Parameters
----------
reference_prmtop : str
File name of reference parameter file
target_prmtop : str
File name of target parameter file
"""
print("Establishing mapping between structures...")
reference_to_target_mapping = create_atom_map(reference_prmtop, target_prmtop)
reference = pmd.load_file(reference_prmtop, structure=True)
target = pmd.load_file(target_prmtop, structure=True)
print("Comparing LJ parameters...")
lj = compare_lj_parameters(reference, target, reference_to_target_mapping)
print("Comparing bond parameters...")
bonds = compare_bonds(reference, target, reference_to_target_mapping)
print("Comparing angle parameters...")
angles = compare_angles(reference, target, reference_to_target_mapping)
print("Comparing dihedral parameters...")
dihedrals = compare_dihedrals(reference, target, reference_to_target_mapping)
print("Comparing improper parameters...")
impropers = compare_impropers(reference, target, reference_to_target_mapping)
def create_atom_map(reference_prmtop, target_prmtop):
"""Create a mapping between the atoms in the first and second structure using `networkx.algorithms.isomorphism`.
I am not sure how well this performs in general.
Parameters
----------
reference_prmtop : str
File name of reference parameter file
target_prmtop : str
File name of target parameter file
Returns
-------
"""
reference = pmd.load_file(reference_prmtop, structure=True)
target = pmd.load_file(target_prmtop, structure=True)
reference_graph = generateGraphFromTopology(reference.topology)
target_graph = generateGraphFromTopology(target.topology)
graph_matcher = isomorphism.GraphMatcher(reference_graph, target_graph)
reference_to_target_mapping = dict()
if graph_matcher.is_isomorphic():
logging.debug("Reference → Target (AMBER 1-based indexing)")
for (reference_atom, target_atom) in graph_matcher.mapping.items():
reference_to_target_mapping[reference_atom] = target_atom
reference_name = reference[reference_atom].name
target_name = target[target_atom].name
reference_type = reference[reference_atom].type
target_type = target[target_atom].type
# ParmEd is 0-indexed.
# Add 1 to match AMBER-style indexing.
logging.debug(
f"{reference_name:4} {reference_type:4} {reference_atom + 1:3d} → "
f"{target_atom + 1:3d} {target_type:4} {target_name:4}"
)
return reference_to_target_mapping
def find_bonds(structure):
df = pd.DataFrame()
for atom in structure.atoms:
for bond in atom.bonds:
df = df.append(
pd.DataFrame(
{
"atom1": bond.atom1.name,
"atom2": bond.atom2.name,
"atom1_idx": bond.atom1.idx,
"atom2_idx": bond.atom2.idx,
"atom1_type": bond.atom1.type,
"atom2_type": bond.atom2.type,
"req": bond.type.req,
"k": bond.type.k,
},
index=[0],
),
ignore_index=True,
)
return df.drop_duplicates()
def find_angles(structure):
df = pd.DataFrame()
for atom in structure.atoms:
for angle in atom.angles:
df = df.append(
pd.DataFrame(
{
"atom1": angle.atom1.name,
"atom2": angle.atom2.name,
"atom3": angle.atom3.name,
"atom1_idx": angle.atom1.idx,
"atom2_idx": angle.atom2.idx,
"atom3_idx": angle.atom3.idx,
"atom1_type": angle.atom1.type,
"atom2_type": angle.atom2.type,
"atom3_type": angle.atom3.type,
"thetaeq": angle.type.theteq,
"k": angle.type.k,
},
index=[0],
),
ignore_index=True,
)
return df.drop_duplicates()
def find_dihedrals(structure):
df = pd.DataFrame()
for atom in structure.atoms:
for dihedral in atom.dihedrals:
df = df.append(
pd.DataFrame(
{
"atom1": dihedral.atom1.name,
"atom2": dihedral.atom2.name,
"atom3": dihedral.atom3.name,
"atom4": dihedral.atom4.name,
"atom1_idx": dihedral.atom1.idx,
"atom2_idx": dihedral.atom2.idx,
"atom3_idx": dihedral.atom3.idx,
"atom4_idx": dihedral.atom4.idx,
"atom1_type": dihedral.atom1.type,
"atom2_type": dihedral.atom2.type,
"atom3_type": dihedral.atom3.type,
"atom4_type": dihedral.atom4.type,
"improper": dihedral.improper,
"per": dihedral.type.per,
"phi_k": dihedral.type.phi_k,
"phase": dihedral.type.phase,
},
index=[0],
),
ignore_index=True,
)
return df.drop_duplicates()
def find_impropers(structure):
df = pd.DataFrame()
for atom in structure.atoms:
for improper in atom.dihedrals:
df = df.append(
pd.DataFrame(
{
"atom1": improper.atom1.name,
"atom2": improper.atom2.name,
"atom3": improper.atom3.name,
"atom4": improper.atom4.name,
"atom1_idx": improper.atom1.idx,
"atom2_idx": improper.atom2.idx,
"atom3_idx": improper.atom3.idx,
"atom4_idx": improper.atom4.idx,
"atom1_type": improper.atom1.type,
"atom2_type": improper.atom2.type,
"atom3_type": improper.atom3.type,
"atom4_type": improper.atom4.type,
"per": improper.type.per,
"phi_k": improper.type.phi_k,
"phase": improper.type.phase,
},
index=[0],
),
ignore_index=True,
)
return df.drop_duplicates()
def label_smirks(structure_mol2, verbose=True, structure=None):
ifs = oemolistream()
flavor = OEIFlavor_MOL2_Forcefield
ifs.SetFlavor(OEFormat_MOL2, flavor)
molecules = []
# Read in molecules
for mol in ifs.GetOEMols():
OETriposAtomNames(mol)
# Add all the molecules in this file to a list, but only return the first one.
molecules.append(OEMol(mol))
# This should now handle single-residue and multi-residue hosts.
ff = ForceField("forcefield/smirnoff99Frosst.offxml")
labels = ff.labelMolecules(molecules, verbose=False)
if not verbose:
return labels
else:
# Labels should be a list of length 1 if we pass it a single molecule...
for force in labels[0].keys():
print(force, end="\n")
for index in range(len(labels[0][force])):
atom_indices = labels[0][force][index][0]
atom_names = []
atom_types = []
for atom_index in atom_indices:
atom_name = structure[atom_index].name
atom_type = structure[atom_index].type
atom_names.append(atom_name)
atom_types.append(atom_type)
atom_name_string = "-".join(atom_names)
atom_type_string = "-".join(atom_types)
smirks_string = labels[0][force][index][2]
pid = labels[0][force][index][1]
parameter = ff.getParameter(paramID=pid)
# Sometimes two a SMIRKS pattern for *two* atoms is printed in the nonbonded generator.
# I'm not sure why that is.
print(f"{atom_name_string:<14} {atom_type_string:<14}" f"{parameter}")
return
def compare_lj_parameters(reference, target, reference_to_target_mapping, verbose=True):
lennard_jones = pd.DataFrame()
logging.debug("Reference → Target")
logging.debug(
f"{'Name':4} {'Eps':5} {'Sigma':5} → " f"{'Name':4} {'Eps':5} {'Sigma':5}"
)
for reference_atom, target_atom in reference_to_target_mapping.items():
reference_name = reference[reference_atom].name
reference_type = reference[reference_atom].type
reference_sigma = reference[reference_atom].sigma
reference_epsilon = reference[reference_atom].epsilon
target_name = target[target_atom].name
target_type = target[target_atom].type
target_sigma = target[target_atom].sigma
target_epsilon = target[target_atom].epsilon
lennard_jones = lennard_jones.append(
pd.DataFrame(
{
"target_name": target_name,
"target_type": target_type,
"target_e": np.round(target_epsilon, decimals=5),
"target_s": np.round(target_sigma, decimals=5),
"reference_name": reference_name,
"reference_type": reference_type,
"reference_e": np.round(reference_epsilon, decimals=5),
"reference_s": np.round(reference_sigma, decimals=5),
},
index=[0],
),
ignore_index=True,
)
if verbose:
if (np.round(reference_epsilon, 4) != np.round(target_epsilon, 4)) or (
np.round(reference_sigma, 4) != np.round(target_sigma, 4)
):
print(
f"\x1b[31m{reference_name:>4} {reference_sigma:4.3f} {reference_epsilon:4.3f} → "
f"{target_name:>4} {target_sigma:4.3f} {target_epsilon:4.3f}\x1b[0m"
)
else:
print(
f"{reference_name:>4} {reference_sigma:4.3f} {reference_epsilon:4.3f} → "
f"{target_name:>4} {target_sigma:4.3f} {target_epsilon:4.3f}"
)
return lennard_jones
def compare_bonds(reference, target, reference_to_target_mapping, verbose=True):
reference_bonds = find_bonds(reference)
target_bonds = find_bonds(target)
assert len(reference.bonds) == len(reference_bonds)
assert len(target.bonds) == len(target_bonds)
bonds = pd.DataFrame()
for reference_atom, target_atom in reference_to_target_mapping.items():
reference_atom_bonds = reference_bonds[
(reference_bonds["atom1_idx"] == reference_atom)
| (reference_bonds["atom2_idx"] == reference_atom)
]
target_atom_bonds = target_bonds[
(target_bonds["atom1_idx"] == target_atom)
| (target_bonds["atom2_idx"] == target_atom)
]
df = reference_atom_bonds.join(target_atom_bonds, lsuffix="_r", rsuffix="_t")
for index, bond in df.iterrows():
reference_atom1 = bond["atom1_r"]
reference_atom2 = bond["atom2_r"]
reference_k = bond["k_r"]
reference_req = bond["req_r"]
target_atom1 = bond["atom1_t"]
target_atom2 = bond["atom2_t"]
target_k = bond["k_t"]
target_req = bond["req_t"]
if verbose:
if (np.round(reference_k, 4) != np.round(target_k, 4)) or (
np.round(reference_req, 4) != np.round(target_req, 4)
):
print(
f"\x1b[31m{reference_atom1:>4}--{reference_atom2:<4} {reference_k:4.3f} {reference_req:4.3f} → "
f"{target_atom1:>4}--{target_atom2:<4} {target_k:4.3f} {target_req:4.3f}\x1b[0m"
)
else:
print(
f"{reference_atom1:>4}--{reference_atom2:<4} {reference_k:4.3f} {reference_req:4.3f} → "
f"{target_atom1:>4}--{target_atom2:<4} {target_k:4.3f} {target_req:4.3f}"
)
bonds = bonds.append(df, ignore_index=True)
return bonds
def compare_angles(reference, target, reference_to_target_mapping, verbose=True):
reference_angles = find_angles(reference)
target_angles = find_angles(target)
assert len(reference.angles) == len(reference_angles)
assert len(target.angles) == len(target_angles)
angles = pd.DataFrame()
for reference_atom, target_atom in reference_to_target_mapping.items():
reference_atom_angles = reference_angles[
(reference_angles["atom1_idx"] == reference_atom)
| (reference_angles["atom2_idx"] == reference_atom)
| (reference_angles["atom3_idx"] == reference_atom)
]
target_atom_angles = target_angles[
(target_angles["atom1_idx"] == target_atom)
| (target_angles["atom2_idx"] == target_atom)
| (target_angles["atom3_idx"] == target_atom)
]
df = reference_atom_angles.join(target_atom_angles, lsuffix="_r", rsuffix="_t")
for index, angle in df.iterrows():
reference_atom1 = angle["atom1_r"]
reference_atom2 = angle["atom2_r"]
reference_atom3 = angle["atom3_r"]
reference_k = angle["k_r"]
reference_thetaeq = angle["thetaeq_r"]
target_atom1 = angle["atom1_t"]
target_atom2 = angle["atom2_t"]
target_atom3 = angle["atom3_t"]
target_k = angle["k_t"]
target_thetaeq = angle["thetaeq_t"]
if verbose:
if (np.round(reference_k, 4) != np.round(target_k, 4)) or (
np.round(target_thetaeq, 4) != np.round(target_thetaeq, 4)
):
print(
f"\x1b[31m{reference_atom1:>4}--{reference_atom2:^4}--{reference_atom3:<4} {reference_k:4.3f} {reference_thetaeq:4.3f} → "
f"{target_atom1:>4}--{target_atom2:^4}--{target_atom3:<4} {target_k:4.3f} {target_thetaeq:4.3f}\x1b[0m"
)
else:
print(
f"{reference_atom1:>4}--{reference_atom2:<4}--{reference_atom3:<4} {reference_k:4.3f} {reference_thetaeq:4.3f} → "
f"{target_atom1:>4}--{target_atom2:<4}--{target_atom3:<4} {target_k:4.3f} {target_thetaeq:4.3f}"
)
angles = angles.append(df, ignore_index=True)
return angles
def compare_dihedrals(reference, target, reference_to_target_mapping, verbose=True):
reference_dihedrals = find_dihedrals(reference)
target_dihedrals = find_dihedrals(target)
assert len(reference.dihedrals) == len(reference_dihedrals)
assert len(target.dihedrals) == len(target_dihedrals)
dihedrals = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
from glob import glob
import os
import numpy as np
import pandas as pd
from natsort import natsorted
def prepare_forex(asset, path):
if not os.path.exists(path + asset + "/h5"):
os.makedirs(path + asset + "/h5")
def dateparse(date, time):
return pd.to_datetime(date + time, format='%Y%m%d%H%M%S%f')
def process_data(file):
data = pd.read_csv(file, header=None, names=["Date", "Time", "Bid", "Ask"], index_col="datetime",
parse_dates={'datetime': ['Date', 'Time']}, date_parser=dateparse)
# Add the midquote
data["Midquote"] = (data["Bid"] + data["Ask"]) / 2
data.drop(["Bid", "Ask"], axis=1, inplace=True)
data = data.iloc[:, 0]
# Shift the index such that trading time is from 0-24h
idx_1 = data[:'2014-08-02'].index + pd.Timedelta('8h')
idx_2 = data['2014-08-03':].index + pd.Timedelta('6h')
data.index = idx_1.union(idx_2)
# Change the first and the last timestamp
def change_timestamp(x):
if len(x) > 0:
x[0] = x[0].replace(hour=0, minute=0, second=0, microsecond=0)
x[-1] = x[-1].replace(hour=23, minute=59, second=59, microsecond=999999)
return x
new_idx = data.index.to_series().groupby(pd.TimeGrouper("1d")).apply(change_timestamp)
data.index = new_idx
# Save the data to the disk
for day, data_day in data.groupby( | pd.TimeGrouper("1d") | pandas.TimeGrouper |
#!/usr/bin/env python
# coding: utf-8
# # Global Utility Functions
# **Description**
#
# This notebook contains shared functions used in other notebook.
#
# - *Plotting* and *Reporting/printing* related functions are mostly cosmetical to generate nice plots for the thesis and provide easy readable output.
# - The function that are *Scoring* related are important, as they are used to calculate the evaluation metrics.
# - The *Data Preparation* related functions are also very important, as they include the sliding window logic, and the (quite complex) normalization & cv scenario creation logic.
#
# The functions are followed by a *Check function* cell, which is used to demonstrated the different functions for transparency and sanity check their logic. This transparency, by demonstrating the results right next to the code is also the reason, why I implemented these functions as a Jupyter Notebook instead as a Python Module (where it would be better placed for more productive scenarios)
#
# **Usage**
#
# 1. To make the functions in this notebook available in another notebook, run the following line in the consuming notebook:
# ```
# %run utils.ipynb
# ```
#
#
# 2. To investigate the functions inside this notebook, enable the testing output by setting in the configuration section ([1. Preparations](#1)):
# ```
# TEST_MODE = True
# ```
#
# **Table of Contents**
#
# **1 - [Preparations](#1)**
# **2 - [Plotting related](#2)**
# **3 - [Reporting/printing related](#3)**
# **4 - [Scoring related](#4)**
# **5 - [Data Preparation related](#5)**
# ## 1. Preparations <a id='1'> </a>
# ### Imports
# In[18]:
# Standard
import warnings
import random
from pathlib import Path
# Extra
import pandas as pd
import numpy as np
from dataclasses import asdict
from sklearn.metrics import confusion_matrix, roc_curve, accuracy_score, make_scorer, auc
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.utils import resample as sk_resample
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import animation, rc
from tqdm.auto import tqdm
from IPython.display import HTML
import math #mychanges
# ### Configuration
# Only relevant for sanity checking function in this notebook.
# In[2]:
TEST_MODE = False # Set to "True" to perform sanity checks, set to "False" before importing this notebook into others
MAGENTA = (202/255, 18/255, 125/255)
# ## 2. Plotting related <a id='2'> </a>
# ### utils_save_plot()
# In[3]:
def utils_save_plot(fig, filepath):
"""Save plot to file using certain layout and dpi."""
fig.savefig(filepath, bbox_inches="tight", pad_inches=0.01, dpi=600)
# **Check Function:**
# In[4]:
if TEST_MODE:
plt.plot([1, 3, 2, 4])
TEST_OUTPUT_PATH = Path.cwd() / "output" / "utils"
TEST_OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
utils_save_plot(plt, TEST_OUTPUT_PATH / "utils_save_plot.png")
# ### utils_set_output_style()
# In[5]:
def utils_set_output_style():
"""Set styles for matplotlib charts and pandas tables."""
# Charts
# for seaborn:
sns.set_style("darkgrid")
sns.set_context("paper")
sns.set(font="sans")
sns.set_palette("tab10")
# for plain matplotlib:
plt.style.use(["seaborn-darkgrid", "seaborn-paper"])
plt.rc("font", family="sans", size=8)
plt.rc("axes", titlesize=6)
plt.rc("axes", labelsize=6)
plt.rc("xtick", labelsize=6)
plt.rc("ytick", labelsize=6)
plt.rc("xtick.major", pad=1)
plt.rc("ytick.major", pad=3)
plt.rc("legend", fontsize=6)
plt.rc("figure", titlesize=6)
# Tables
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
pd.plotting.register_matplotlib_converters()
# **Check Function:**
# In[6]:
if TEST_MODE:
utils_set_output_style()
plt.plot([1, 3, 2, 4])
# ### utils_boxplot_style <dict>, utils_lineplot_style <dict>
# In[7]:
# Define a style I use a lot for boxplots:
utils_boxplot_style = dict(
color="tab:blue",
linewidth=0.5,
saturation=1,
width=0.7,
flierprops=dict(
marker="o", markersize=2, markerfacecolor="none", markeredgewidth=0.5
),
)
# Define a style I use a lot for lineplots:
utils_lineplot_style = dict(
color="tab:blue", linewidth=0.5, marker="o", markersize=3, markeredgewidth=0.5
)
# **Check Function:**
# In[8]:
if TEST_MODE:
utils_set_output_style()
fig = plt.figure(dpi=180, figsize=(5.473, 2))
sns.boxplot(
x=["Dist 1", "Dist 2"],
y=[[2, 4, 3, 4, 15, 8, 3, 0, 2, 21], [12, 14, 13, 17, 15, 8, 11, 0, 2, 21]],
**utils_boxplot_style
)
# ### utils_plot_randomsearch_results()
# In[9]:
def utils_plot_randomsearch_results(df_results, n_top=1):
# Prepare data for plotting
df_plot = df_results[df_results["rank_test_eer"] <= n_top].rename(
columns={
"param_nu": r"$\nu$",
"param_gamma": r"$\gamma$",
"mean_test_accuracy": "Mean Test Acc.",
"mean_test_eer": "Mean Test EER",
}
)
df_plot["Mean Test EER"] = df_plot["Mean Test EER"] * -1 # Because fewer is more
median_nu = df_plot[r"$\nu$"].median()
median_gamma = df_plot[r"$\gamma$"].median()
# Plot
fig = plt.figure(figsize=(5.473 / 1.3, 2), dpi=180)
g = sns.scatterplot(
x=r"$\nu$",
y=r"$\gamma$",
data=df_plot,
size="Mean Test EER",
sizes=(7, 60),
hue="Mean Test EER",
alpha=1,
# palette="Blues",
linewidth=0,
)
# Format Legend labels
leg = g.get_legend()
new_handles = [h for h in leg.legendHandles]
new_labels = []
for i, handle in enumerate(leg.legendHandles):
label = handle.get_label()
try:
new_labels.append(f"{abs(float(label)):.3f}")
except ValueError:
new_labels.append("")
# Plot mean values
plt.plot(
[-0.01, 0.31],
[median_gamma, median_gamma],
linestyle="dashed",
linewidth=0.8,
alpha=0.7,
color="black",
)
plt.text(
0.23,
median_gamma * 1.7 ** 2,
r"median($\gamma$)",
fontsize=6,
color="black",
alpha=0.9,
)
plt.text(
0.23,
median_gamma * 1.2 ** 2,
f"{median_gamma:.3f}",
fontsize=5,
color="black",
alpha=0.9,
)
plt.plot(
[median_nu, median_nu],
[0.0001, 1000],
linestyle="dashed",
linewidth=0.8,
alpha=0.7,
color="black",
)
plt.text(
median_nu + 0.005, 400, r"median($\nu$)", fontsize=6, color="black", alpha=0.9
)
plt.text(
median_nu + 0.005, 200, f"{median_nu:.3f}", fontsize=5, color="black", alpha=0.9
)
# Adjust axes & legend
plt.yscale("log")
plt.ylim(0.0001, 1000)
plt.xlim(0, 0.305)
plt.legend(
new_handles,
new_labels,
bbox_to_anchor=(1.02, 1),
loc=2,
borderaxespad=0.0,
title="Mean EER per Owner\n(Validation Results)",
title_fontsize=5,
)
fig.tight_layout()
return fig
# ### utils_plot_session_probability()
# In[12]:
def utils_plot_session_probability(y_impostor, subject, session):
"""Plot the owner probability for every sample of session."""
df_y = | pd.DataFrame(y_impostor) | pandas.DataFrame |
# Adapted, non-cluster, version of script for finding optimal ML hyper-parameters
# for a give ML algorithm, feature set, and optimization metric. All combinations tested
# script requires following parameters:
# 1. 0-13 which index corresponding ML algorithms
# 2. feature set name (complete list of options can be found in publications supplement)
# 3. optimization metric (Prec, Acc, MCC, Multi)
# This script was tested using:
# > python MLwGrid.py 0 AllSumSph MCC
# All inputs in MLCombosAll.txt were used for publication
#general requirements
import pandas as pd
import numpy as np
import sys
import warnings
warnings.filterwarnings(action="ignore")
#preprocessing stuff
from sklearn.model_selection import train_test_split, GridSearchCV, GroupShuffleSplit, StratifiedShuffleSplit, cross_validate, StratifiedKFold
from sklearn import preprocessing
from sklearn import impute
#classifiers
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier, DistanceMetric
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, RidgeClassifier, PassiveAggressiveClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
#process results
from sklearn.metrics import roc_curve, auc, recall_score, accuracy_score, precision_score, confusion_matrix, make_scorer, matthews_corrcoef, jaccard_score
# custom script
import GetFeatureSet as GetFeatureSet
# correspinding ML algorithm for a given index
names = ["LogRegr", "Ridge", "PassAggr",
"QDA", "LDA", "NaiveBayes",
"NearNeigh",
"LinSVM", "RBFSVM", "SigSVM",
"RandomForest", "ExtraTrees", "GradBoost",
"NeurNet"]
classifiers = [
LogisticRegression(solver="liblinear", tol = 1e-4),
RidgeClassifier(solver="auto", tol = 1e-4),
PassiveAggressiveClassifier(tol = 1e-4),
QuadraticDiscriminantAnalysis(priors=None), #also almost no real parameters to adjust
LinearDiscriminantAnalysis(solver = "lsqr"),
GaussianNB(priors=None), #only one real parameter to adjust!
KNeighborsClassifier(algorithm = "ball_tree", weights = 'distance'),
SVC(kernel="linear", max_iter=10000, tol = 1e-4),
SVC(kernel="rbf", class_weight = "balanced", tol = 1e-4),
SVC(kernel="sigmoid", class_weight = "balanced", tol = 1e-4),
RandomForestClassifier(class_weight = "balanced", bootstrap = True, n_estimators=500, max_features='sqrt', criterion='entropy'),
ExtraTreesClassifier(n_estimators=500, min_samples_split=3, max_depth=None, criterion="gini"),
GradientBoostingClassifier(criterion = 'friedman_mse', min_samples_split = 3, n_estimators=1000, loss='deviance'),
MLPClassifier(learning_rate_init = 0.01, activation='relu'),
]
parameter_space = [
{ "penalty": ["l2", 'l1'], "C":[1.0, 0.01, 0.001], "class_weight":['balanced', None]}, #LogRegr
{ "alpha": np.logspace(-4, 1, 6), "class_weight":['balanced', None]}, #Ridge
{ "C": np.logspace(-1, 3, 5), "class_weight":['balanced', None] }, #PassAggr
{ "reg_param": np.linspace(0.5, 1, 7) }, #QDA
{ "shrinkage": ["auto", 0, 0.1, 0.25, 0.5, 0.75, 1] }, #LDA
{ "var_smoothing": np.logspace(-9, 0, 10) }, #Gauss
[ {"metric": ["minkowski"], "p":[2, 3], "n_neighbors": [5, 8, 10, 15]}, {"metric": ["chebyshev"], "n_neighbors": [5, 8, 10, 15]} ], #kNN
{ "C": np.logspace(-3, 1, 5), "class_weight":['balanced', None] }, #SVC lin
{ "C": np.logspace(-3, 1, 5), "gamma": ["scale", "auto"] }, #SVC rbf
{ "C": np.logspace(-3, 1, 5),"gamma": ["scale", "auto"] }, #SVC sig
{ "max_depth": [6, 10, 20, None], 'min_samples_split': [5, 25, 50] }, #RF
{ "class_weight": ["balanced", None], "max_features": ['log2', None], "bootstrap" : [True, False] }, #ExtraTrees
{ "learning_rate": [0.1, 0.01], "max_features" : ['log2', None], "subsample":[0.5, 0.65, 0.8]},#GBClassifier
{ "hidden_layer_sizes": [(50,), (100,), (200,)], "alpha": [0.1, 0.01, 0.001] } #MLPClass
]
name_index = int(sys.argv[1]) # number for given ML algorithm
feature_set = str(sys.argv[2]) #All_Sph, All_Shell, Gen, etc
opt_type = str(sys.argv[3]) #Prec Acc MCC Multi
name = names[name_index]
## read ion all sites/features
sites = pd.read_csv("../publication_sites/sites_calculated_features_scaled.txt", sep=',')
sites = sites.set_index('SITE_ID',drop=True)
## get training/kfold sites, random under sample, and split out target value ("Catalytic")
X = sites.loc[sites.Set == "data"].copy()
X_Cat = X[X['Catalytic']==True]
X_nonCat = X[X['Catalytic']==False]
# the following line controls under sampling
X_nonCat = X_nonCat.sample(n=len(X_Cat)*3, axis=0, random_state=1)
X = X_Cat.append(X_nonCat)
y = X['Catalytic']; del X['Catalytic']
## get test sites and split out target value ("Catalytic")
testX = sites.loc[sites.Set == "test"].copy()
testY = testX['Catalytic']; del testX['Catalytic']
#split into features and classification
X = GetFeatureSet.feature_subset(X, feature_set, noBSA=True)
print("DataSet entries: %s \t features: %s"%(X.shape[0], X.shape[1]))
testX = GetFeatureSet.feature_subset(testX, feature_set, noBSA=True)
print("TestSet entries: %s \t features: %s"%(testX.shape[0], testX.shape[1]))
def setDisplay(X, x, Y, y):
print("\nTRAIN entries: %s \t features: %s"%(X.shape[0], X.shape[1]))
print("\tNum catalytic: %s \n\tNum non-catalytic: %s"%(len(Y[Y==1]),len(Y[Y==0])))
print("CV entries: %s \t features: %s"%(x.shape[0], x.shape[1]))
print("\tNum catalytic: %s \n\tNum non-catalytic: %s"%(len(y[y==1]),len(y[y==0])))
this_clf = classifiers[name_index]
num_jobs = 15
inner_cv_type = StratifiedShuffleSplit(n_splits=7)
these_params = parameter_space[name_index]
def prec_score_custom(y_true, y_pred, this_label = True):
return( precision_score(y_true, y_pred, pos_label= this_label) )
def mcc_score(y_true, y_pred):
return( matthews_corrcoef(y_true, y_pred))
def jac_score(y_true, y_pred, this_label = True):
return( jaccard_score(y_true, y_pred, pos_label=this_label))
if opt_type == "Prec":
this_scoring = make_scorer(prec_score_custom, greater_is_better = True)
elif opt_type == "Acc":
this_scoring = "accuracy"
elif opt_type == "MCC":
this_scoring = make_scorer(mcc_score, greater_is_better = True)
elif opt_type == "Multi":
this_scoring = {"Acc":'accuracy', "MCC": make_scorer(mcc_score, greater_is_better = True), "Jaccard": make_scorer(jac_score, greater_is_better = True) }
else:
print("Invalid scoring term")
sys.exit()
outer_cv_type=StratifiedKFold(n_splits=7)
outer_cv_results = []
outer_coeffs = []
outer_params = []
outer_feat_imp = []
for i, (train_idx, test_idx) in enumerate(outer_cv_type.split(X,y)):
print("OUTER LOOP NUMBER:", i)
X_train, X_outerCV = X.iloc[train_idx].copy(), X.iloc[test_idx].copy()
y_train, y_outerCV = y.iloc[train_idx].copy(), y.iloc[test_idx].copy()
print("outer CV display:")
setDisplay(X_train, X_outerCV, y_train, y_outerCV)
print("post add_oversampling display:")
setDisplay(X_train, X_outerCV, y_train, y_outerCV)
#run feature selection and CV
clf = GridSearchCV(estimator = this_clf, cv=inner_cv_type, param_grid = these_params, scoring = this_scoring, iid = True, refit = False, verbose=100, n_jobs = num_jobs)
clf.fit(X_train.reset_index(drop=True), y_train.reset_index(drop=True))
results = clf.cv_results_
#somehow get best combination of multiple scoring terms
print(results)
ranks = []
for key in results:
if "rank_test_" in key:
ranks.append(results[key])
#best params will have to be identified for full data set for final model building after best model is selected
best_params = results['params'][np.argmin(np.sum(np.asarray(ranks), axis = 0))]
print(best_params)
outer_params.append(best_params)
## set the new classifier to these parameters
outer_clf = this_clf.set_params(**best_params)
## fit on all training data - this is what GridSearchCV(refit = True) will do anyways,
## but its selection of params is not necessary Meghans
outer_clf.fit(X_train.reset_index(drop=True), y_train.reset_index(drop=True))
outerCV = pd.DataFrame(y_outerCV, columns=['Catalytic'])
#predict based on fitted outer CV model
outerCV_preds = pd.DataFrame(outer_clf.predict(X_outerCV.reset_index(drop=True)), columns=['Prediction'])
outerCV_preds['SITE_ID']=X_outerCV.index
outerCV_preds = outerCV_preds.set_index('SITE_ID', drop=True)
outerCV = | pd.merge(outerCV, outerCV_preds, left_index=True, right_index=True) | pandas.merge |
import sklearn.neighbors._base
import sys
sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base
import pandas as pd
from sklearn.base import TransformerMixin
import numpy as np
from sklearn.impute import SimpleImputer, KNNImputer
from missingpy import MissForest
class prepross(TransformerMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X], index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
def rm_rows_cols(df, row_thresh=0.8, col_thresh=0.8):
if "Index" in df.columns:
df.drop("Index", axis=1, inplace=True)
df.columns = [col.strip() for col in df.columns]
df = df.drop_duplicates()
df = df.dropna(axis=0,how='all').dropna(axis=1,how='all').dropna(axis=1,how='all').dropna(axis=0,thresh= int(len(df.columns)*0.8)).dropna(axis=1,thresh=int(len(df)*0.8))
df = df.infer_objects()
return df
def replace_special_character(df,usr_char=None,do=None, ignore_col=None):
spec_chars = ["!", '"', "#", "%", "&", "'", "(", ")",
"*", "+", ",", "-", ".", "/", ":", ";", "<",
"=", ">", "?", "@", "[", "\\", "]", "^", "_",
"`", "{", "|", "}", "~", "–", "//", "%*", ":/", ".;", "Ø", "§",'$',"£"]
if do== 'remove':
for chactr in usr_char:
spec_chars.remove(chactr)
elif do=='add':
for chactr in usr_char:
spec_chars.append(chactr)
if len(ignore_col)>0:
df_to_concat = df[ignore_col]
df = df[list(set(df.columns)-set(ignore_col))]
else:
df_to_concat = pd.DataFrame()
for c in spec_chars:
df = df.replace("\\"+c, '', regex=True)
df = pd.concat([df,df_to_concat], axis=1)
return df
def custom_imputation(df, imputation_type="RDF"):
categorical_columns = []
numeric_columns = []
for c in df.columns:
if df[c].map(type).eq(str).any(): # check if there are any strings in column
categorical_columns.append(c)
else:
numeric_columns.append(c)
# create two DataFrames, one for each data type
data_numeric = df[numeric_columns] # Numerical List.
data_categorical = df[categorical_columns] # Categorical List.
# Imputation of Categorical Values by Mean
data_categorical = pd.DataFrame(prepross().fit_transform(data_categorical),
columns=data_categorical.columns)
data_numeric.reset_index(drop=True, inplace=True)
data_categorical.reset_index(drop=True, inplace=True)
if imputation_type=="KNN":
if len(data_numeric.columns) >= 1:
imp = KNNImputer(n_neighbors=5, weights="uniform")
data_numeric_final = pd.DataFrame(imp.fit_transform(data_numeric), columns=data_numeric.columns)
data_numeric_final.reset_index(drop=True, inplace=True)
final_df = pd.concat([data_numeric_final, data_categorical], axis=1)
else:
final_df = data_categorical
if imputation_type=="RDF":
if len(data_numeric.columns) >= 1:
imp = MissForest(max_iter=10, decreasing=False, missing_values=np.nan,
copy=True, n_estimators=100, criterion=('mse', 'gini'),
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,
verbose=0, warm_start=False, class_weight=None)
data_numeric_final = pd.DataFrame(imp.fit_transform(data_numeric), columns=data_numeric.columns)
data_numeric_final.reset_index(drop=True, inplace=True)
final_df = pd.concat([data_numeric_final, data_categorical], axis=1)
else:
final_df = data_categorical
if imputation_type == "mean" or \
imputation_type == "median" or \
imputation_type == "most_frequent" or \
imputation_type == "constant":
if len(data_numeric.columns) >= 1:
imp = SimpleImputer(missing_values=np.nan, strategy=imputation_type)
data_numeric_final = pd.DataFrame(imp.fit_transform(data_numeric), columns=data_numeric.columns)
data_numeric_final.reset_index(drop=True, inplace=True)
final_df = | pd.concat([data_numeric_final, data_categorical], axis=1) | pandas.concat |
# coding: utf-8
from geopy import distance
from multiprocessing import Pool, cpu_count
import numpy as np
import pandas as pd
def select_date(df, start, end):
"""Return rows which are in the closed range between start and end."""
return df[(start <= df.Date) & (df.Date <= end)]
def dist_in_km(a, b):
return distance.distance((a.Latitude, a.Longitude), (b.Latitude, b.Longitude)).km
def find_min_dist(row, spray, offset_days, fill_na_by=np.nan):
return spray.pipe(
select_date, row.Date - offset_days, row.Date
).apply(
dist_in_km, args=(row,), axis=1
).pipe(
# at this point, df should not be empty, just in case
lambda df: fill_na_by if df.empty else df.min()
)
def select_sprayed(df, spray, offset_days):
spray_date_index = df.Date != df.Date # all False
for spray_date in spray.Date.unique():
# Select dates between unique spray date and the day offset_days after
spray_date_index |= (spray_date <= df.Date) & (df.Date <= offset_days.apply(spray_date))
return df[spray_date_index]
def parallel_apply(df, func, args, n=cpu_count() + 1):
p = Pool(n)
splitted = [{'df': spl_df, 'args': args} for spl_df in np.array_split(df, n * 20)]
result = p.map(func, splitted)
p.close()
p.join()
return pd.concat(result)
def create_min_dist_series(args_dict):
result_series = args_dict['df'].apply(find_min_dist, args=args_dict['args'], axis=1)
# if no results, return empty series
if result_series.shape[0] == 0:
return | pd.Series() | pandas.Series |
#
import os
import time
import pandas
import itertools
import numpy
import json
import sqlalchemy
from tinkoff_api.quotes_loader import call_them_all
from m_utils.transform import lag_it, percent_it, fill_it
def sql_formatting(x):
return str(x).replace('[', '').replace(']', '').replace("'", '')
class SparseLoader:
def __init__(self, api_key, target_quotes, news_horizon, effect_horizon, db_config, reload_quotes=False,
news_titles_source=None, verbose=False, timeit=False, base_option='for_merge', add_time_features=False,
nlp_treator=None, nlp_treator_signature=None, nlp_treator_config=None, nlp_ductor='post',
export_chunk=100_000):
self.verbose = verbose
self.timeit = timeit
self.run_time = None
self.base_option = base_option
self.add_time_features = add_time_features
self.export_chunk = export_chunk
self.where_to_save = './result.csv'
self.nlp_treator = nlp_treator
self.nlp_treator_signature = nlp_treator_signature
self.nlp_treator_config = nlp_treator_config
self.nlp_ductor = nlp_ductor
self.api_key = api_key
self.target_quotes = target_quotes
self.news_horizon = news_horizon
self.effect_horizon = effect_horizon
self.db_config = db_config
self.reload_quotes = reload_quotes
self.news_titles_source = news_titles_source
self.connection = None
self.news_titles_frame = None
self.quotes_frame = None
self.news_titles_alias = 'news_titles'
self.quotes_alias = 'quotes'
self.result_alias = 'result_table'
def fix_time(self):
self.run_time = time.time()
def do_time(self):
self.run_time = time.time() - self.run_time
print(self.run_time)
def establish_connection(self):
if self.timeit:
self.fix_time()
if self.base_option == 'for_merge':
if self.verbose:
print('Establishing Connection')
with open(self.db_config) as f:
db_config = json.load(f)
user, password, host, port, dbname = db_config['user'], db_config['password'], db_config['host'], db_config[
'port'], db_config['dbname']
connection_string = "postgresql+psycopg2://{}:{}@{}:{}/{}".format(user, password, host, port, dbname)
engine = sqlalchemy.create_engine(connection_string)
self.connection = engine.connect()
if self.timeit:
self.do_time()
else:
if self.verbose:
print('Skipped Connection')
def prepare_news_titles_frame(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Preparing News Titles Frame')
if self.news_titles_source is None:
raise Exception("You should specify news titles source")
if self.news_titles_source is not None:
self.news_titles_frame = pandas.read_excel(self.news_titles_source)
self.news_titles_frame['time'] = pandas.to_datetime(self.news_titles_frame['time'])
def fix_tz(x):
return x.tz_localize(tz='UTC')
self.news_titles_frame['time'] = self.news_titles_frame['time'].apply(func=fix_tz)
def fixit(x):
return x.ceil(freq='T')
self.news_titles_frame['time'] = self.news_titles_frame['time'].apply(func=fixit)
if self.nlp_treator is not None and self.nlp_ductor == 'pre':
old_name = 'title'
new_name = 'Text'
self.news_titles_frame = self.news_titles_frame.rename(columns={old_name: new_name})
self.news_titles_frame = self.nlp_treator(self.news_titles_frame,
self.nlp_treator_signature, self.nlp_treator_config)
self.news_titles_frame = self.nlp_treator(self.news_titles_frame,
self.nlp_treator_signature, self.nlp_treator_config)
self.news_titles_frame = self.news_titles_frame.rename(columns={new_name: old_name})
# self.news_titles_frame = self.news_titles_frame[['id', 'time', 'title']]
self.news_titles_frame = self.news_titles_frame.drop(columns=['source', 'category'])
lag_markers = list(
itertools.product(self.news_titles_frame['id'].values,
numpy.array(numpy.arange(self.news_horizon - 1)) + 1))
lag_markers = pandas.DataFrame(data=lag_markers, columns=['id', 'lag'])
self.news_titles_frame = self.news_titles_frame.merge(right=lag_markers, left_on=['id'],
right_on=['id'])
def minute_offset(x):
return pandas.DateOffset(minutes=x)
self.news_titles_frame['time'] = pandas.to_datetime(self.news_titles_frame['time'])
self.news_titles_frame['news_time'] = self.news_titles_frame['time'].copy()
self.news_titles_frame['time'] = self.news_titles_frame['lag'].apply(func=minute_offset)
self.news_titles_frame['time'] = self.news_titles_frame['news_time'] + self.news_titles_frame['time']
if self.base_option == 'for_merge':
self.news_titles_frame.to_sql(name=self.news_titles_alias, con=self.connection,
if_exists='replace',
index=False)
if self.timeit:
self.do_time()
def get_dates(self):
if self.verbose:
print('Getting Dates')
if self.base_option == 'without':
beginning_date, ending_date = self.news_titles_frame['time'].min() - pandas.DateOffset(
minutes=self.effect_horizon), self.news_titles_frame['time'].max()
else:
beginning_date_query = """
SELECT (MIN(time) - ({1} * INTERVAL '1 minute')) AS mn
FROM {0}
""".format(self.news_titles_alias, self.effect_horizon)
ending_date_query = """
SELECT MAX(time) as mx
FROM {0}
""".format(self.news_titles_alias)
beginning_date = pandas.read_sql(sql=beginning_date_query, con=self.connection).values[0, 0]
ending_date = pandas.read_sql(sql=ending_date_query, con=self.connection).values[0, 0]
return beginning_date, ending_date
async def call_quotes(self):
beginning_date, ending_date = self.get_dates()
self.quotes_frame = await call_them_all(tickers=self.target_quotes, start_date=beginning_date,
end_date=ending_date, token=self.api_key)
async def prepare_quotes(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Preparing Quotes')
if self.reload_quotes:
await self.call_quotes()
if self.timeit:
self.do_time()
def quotes_fill(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Filling Quotes')
beginning_date, ending_date = self.get_dates()
self.quotes_frame = self.quotes_frame.set_index(keys=['ticker', 'time'])
self.quotes_frame = self.quotes_frame.sort_index(ascending=True)
self.quotes_frame = fill_it(frame=self.quotes_frame, freq='T', zero_index_name='ticker',
first_index_name='time')
self.quotes_frame = self.quotes_frame.reset_index()
if self.timeit:
self.do_time()
def quotes_lag(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Lagging Quotes')
self.quotes_frame = self.quotes_frame.set_index(keys=['ticker', 'time'])
self.quotes_frame = self.quotes_frame.sort_index(ascending=True)
self.quotes_frame = lag_it(frame=self.quotes_frame, n_lags=self.effect_horizon, suffix='_LAG',
exactly=False)
self.quotes_frame = self.quotes_frame.reset_index()
if self.timeit:
self.do_time()
def quotes_percent(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Evaluating Quotes Percents')
self.quotes_frame = self.quotes_frame.set_index(keys=['ticker', 'time'])
self.quotes_frame = self.quotes_frame.sort_index(ascending=True)
self.quotes_frame = percent_it(frame=self.quotes_frame, horizon=1)
self.quotes_frame = self.quotes_frame.reset_index()
if self.timeit:
self.do_time()
def time_features(self, the_data):
if self.add_time_features:
from busy_exchange.utils import BusyDayExchange, BusyTimeExchange
"""
the_data['time'] = the_data['time'].dt.tz_convert('EST')
the_data['is_holi'] = the_data['time'].apply(func=BusyDayExchange.is_holi).astype(dtype=float)
the_data['is_full'] = the_data['time'].apply(func=BusyDayExchange.is_full).astype(dtype=float)
the_data['is_cut'] = the_data['time'].apply(func=BusyDayExchange.is_cut).astype(dtype=float)
the_data['to_holi'] = the_data['time'].apply(func=BusyDayExchange.to_holiday, args=(True,))
the_data['to_full'] = the_data['time'].apply(func=BusyDayExchange.to_fullday, args=(True,))
the_data['to_cut'] = the_data['time'].apply(func=BusyDayExchange.to_cutday, args=(True,))
the_data['af_holi'] = the_data['time'].apply(func=BusyDayExchange.to_holiday, args=(False,))
the_data['af_full'] = the_data['time'].apply(func=BusyDayExchange.to_fullday, args=(False,))
the_data['af_cut'] = the_data['time'].apply(func=BusyDayExchange.to_cutday, args=(False,))
"""
the_data['mday'] = the_data['time'].dt.day
the_data['wday'] = the_data['time'].dt.dayofweek
the_data['hour'] = the_data['time'].dt.hour
the_data['minute'] = the_data['time'].dt.minute
# the_data['to_open'] = the_data['time'].apply(func=BusyTimeExchange.to_open)
# the_data['to_close'] = the_data['time'].apply(func=BusyTimeExchange.to_close)
return the_data
async def read(self):
if self.verbose:
print('Reading')
self.establish_connection()
self.prepare_news_titles_frame()
await self.prepare_quotes()
self.quotes_fill()
self.quotes_lag()
# self.quotes_percent()
if self.base_option == 'for_merge':
self.quotes_frame.to_sql(name=self.quotes_alias, con=self.connection,
if_exists='replace',
index=False)
query = """
CREATE TEMPORARY TABLE {0} AS
SELECT RS.*
FROM
(SELECT NF."id"
, NF.title
, NF."lag"
, NF.news_time
, QD.*
FROM
public.newstitle_frame AS NF
FULL OUTER JOIN
public.{1} AS QD
ON NF."time" = QD."time") AS RS
WHERE 37 = 37
;
""".format(self.result_alias, self.quotes_alias)
self.connection.execute(query)
if self.export_chunk is None:
reader_query = """
SELECT *
FROM {0}
;
""".format(self.result_alias)
the_data = pandas.read_sql(sql=reader_query, con=self.connection)
the_data = self.time_features(the_data)
if self.nlp_treator is not None and self.nlp_ductor == 'post':
old_name = 'title'
new_name = 'Text'
the_data['title'] = the_data['title'].fillna('NoData')
print('HUGO BOSS: to memory')
the_data = the_data.rename(columns={old_name: new_name})
the_data = self.nlp_treator(the_data,
self.nlp_treator_signature, self.nlp_treator_config)
the_data = the_data.rename(columns={new_name: old_name})
return the_data
else:
size_query = """
SELECT COUNT(*)
FROM {0}
;
""".format(self.result_alias)
final_table_d0_size = pandas.read_sql(sql=size_query, con=self.connection).values[0, 0]
n_chunks = (final_table_d0_size // self.export_chunk) + 1
chunks = [(j * self.export_chunk, (j + 1) * self.export_chunk - 1) for j in range(n_chunks)]
chunks[-1] = (chunks[-1][0], final_table_d0_size)
if self.verbose:
print("Final table's D0:\t {0}\nChunks:\n{1}".format(final_table_d0_size, chunks))
iteration_columns_query = """
ALTER TABLE {0}
ADD COLUMN chunker SERIAL;
""".format(self.result_alias)
self.connection.execute(iteration_columns_query)
if os.path.exists(self.where_to_save):
os.remove(self.where_to_save)
for j in range(n_chunks):
reader_query = """
SELECT *
FROM {0}
WHERE chunker >= {1} and chunker <= {2}
;
""".format(self.result_alias, chunks[j][0], chunks[j][1])
data_chunk = pandas.read_sql(sql=reader_query, con=self.connection)
data_chunk = self.time_features(data_chunk)
if self.nlp_treator is not None and self.nlp_ductor == 'post':
old_name = 'title'
new_name = 'Text'
data_chunk['title'] = data_chunk['title'].fillna('NoData')
print('HUGO BOSS: to disk')
data_chunk = data_chunk.rename(columns={old_name: new_name})
data_chunk = self.nlp_treator(data_chunk,
self.nlp_treator_signature, self.nlp_treator_config)
data_chunk = data_chunk.rename(columns={new_name: old_name})
data_chunk.columns = [x.replace('_PCT1', '') for x in data_chunk.columns.values]
data_chunk = data_chunk.dropna().sort_values(by=['title', 'lag'])
if j == 0:
data_chunk.to_csv(self.where_to_save, sep=';', index=False, mode='a', header=True)
else:
data_chunk.to_csv(self.where_to_save, sep=';', index=False, mode='a', header=False)
else:
the_data = self.quotes_frame.merge(right=self.news_titles_frame, left_on='time', right_on='time')
the_data = self.time_features(the_data)
if self.nlp_treator is not None and self.nlp_ductor == 'post':
old_name = 'title'
new_name = 'Text'
the_data['title'] = the_data['title'].fillna('NoData')
print('HUGO BOSS: in memory')
the_data = the_data.rename(columns={old_name: new_name})
the_data = self.nlp_treator(the_data,
self.nlp_treator_signature, self.nlp_treator_config)
the_data = the_data.rename(columns={new_name: old_name})
return the_data
class KernelLoader:
def __init__(self, api_key, target_quotes, news_horizon, effect_horizon, db_config,
window_function, window_function_kwargs,
reload_quotes=False,
news_titles_source=None, verbose=False, timeit=False, base_option='for_merge', add_time_features=False,
nlp_treator=None, nlp_treator_signature=None, nlp_treator_config=None, nlp_ductor='post',
export_chunk=100_000):
self.window_function = window_function
self.window_function_kwargs = window_function_kwargs
self.verbose = verbose
self.timeit = timeit
self.run_time = None
self.base_option = base_option
self.add_time_features = add_time_features
self.export_chunk = export_chunk
self.where_to_save = './result.csv'
self.nlp_treator = nlp_treator
self.nlp_treator_signature = nlp_treator_signature
self.nlp_treator_config = nlp_treator_config
self.nlp_ductor = nlp_ductor
self.api_key = api_key
self.target_quotes = target_quotes
self.news_horizon = news_horizon
self.effect_horizon = effect_horizon
self.db_config = db_config
self.reload_quotes = reload_quotes
self.news_titles_source = news_titles_source
self.connection = None
self.news_titles_frame = None
self.quotes_frame = None
self.news_titles_alias = 'news_titles'
self.quotes_alias = 'quotes'
self.result_alias = 'result_table'
def fix_time(self):
self.run_time = time.time()
def do_time(self):
self.run_time = time.time() - self.run_time
print(self.run_time)
def establish_connection(self):
if self.timeit:
self.fix_time()
if self.base_option == 'for_merge':
if self.verbose:
print('Establishing Connection')
with open(self.db_config) as f:
db_config = json.load(f)
user, password, host, port, dbname = db_config['user'], db_config['password'], db_config['host'], db_config[
'port'], db_config['dbname']
connection_string = "postgresql+psycopg2://{}:{}@{}:{}/{}".format(user, password, host, port, dbname)
engine = sqlalchemy.create_engine(connection_string)
self.connection = engine.connect()
if self.timeit:
self.do_time()
else:
if self.verbose:
print('Skipped Connection')
def prepare_news_titles_frame(self):
if self.timeit:
self.fix_time()
if self.verbose:
print('Preparing News Titles Frame')
if self.news_titles_source is None:
raise Exception("You should specify news titles source")
if self.news_titles_source is not None:
self.news_titles_frame = pandas.read_excel(self.news_titles_source)
self.news_titles_frame['time'] = pandas.to_datetime(self.news_titles_frame['time'])
def fix_tz(x):
return x.tz_localize(tz='UTC')
self.news_titles_frame['time'] = self.news_titles_frame['time'].apply(func=fix_tz)
def fixit(x):
return x.ceil(freq='T')
self.news_titles_frame['time'] = self.news_titles_frame['time'].apply(func=fixit)
if self.nlp_treator is not None: # and self.nlp_ductor == 'pre':
old_name = 'title'
new_name = 'Text'
self.news_titles_frame = self.news_titles_frame.rename(columns={old_name: new_name})
self.news_titles_frame = self.nlp_treator(self.news_titles_frame,
self.nlp_treator_signature, self.nlp_treator_config)
self.news_titles_frame = self.news_titles_frame.rename(columns={new_name: old_name})
# self.news_titles_frame = self.news_titles_frame[['id', 'time', 'title']]
self.news_titles_frame = self.news_titles_frame.drop(columns=['source', 'category'])
"""
lag_markers = list(
itertools.product(self.news_titles_frame['id'].values,
numpy.array(numpy.arange(self.news_horizon - 1)) + 1))
lag_markers = pandas.DataFrame(data=lag_markers, columns=['id', 'lag'])
self.news_titles_frame = self.news_titles_frame.merge(right=lag_markers, left_on=['id'],
right_on=['id'])
"""
def minute_offset(x):
return pandas.DateOffset(minutes=x)
self.news_titles_frame['time'] = pandas.to_datetime(self.news_titles_frame['time'])
"""
self.news_titles_frame['news_time'] = self.news_titles_frame['time'].copy()
self.news_titles_frame['time'] = self.news_titles_frame['lag'].apply(func=minute_offset)
self.news_titles_frame['time'] = self.news_titles_frame['news_time'] + self.news_titles_frame['time']
if self.base_option == 'for_merge':
self.news_titles_frame.to_sql(name=self.news_titles_alias, con=self.connection,
if_exists='replace',
index=False)
"""
if self.timeit:
self.do_time()
def get_dates(self):
if self.verbose:
print('Getting Dates')
if self.base_option == 'without':
beginning_date, ending_date = self.news_titles_frame['time'].min() - pandas.DateOffset(
minutes=self.effect_horizon), self.news_titles_frame['time'].max()
else:
beginning_date_query = """
SELECT (MIN(time) - ({1} * INTERVAL '1 minute')) AS mn
FROM {0}
""".format(self.news_titles_alias, self.effect_horizon)
ending_date_query = """
SELECT MAX(time) as mx
FROM {0}
""".format(self.news_titles_alias)
beginning_date = pandas.read_sql(sql=beginning_date_query, con=self.connection).values[0, 0]
ending_date = | pandas.read_sql(sql=ending_date_query, con=self.connection) | pandas.read_sql |
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import nltk
from nltk import wordpunct_tokenize
from nltk.stem.snowball import EnglishStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
vectorizer = TfidfVectorizer(input='content', analyzer='word')
svd = TruncatedSVD(n_components=500, n_iter=5, random_state=27)
nltk.download('punkt')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
#After we use get_text, use nltk's clean_html function.
def nltkPipe(soup_text):
#Convert to tokens
tokens = [x.lower() for x in wordpunct_tokenize(soup_text)]
text = nltk.Text(tokens)
#Get lowercase words. No single letters, and no stop words
words = [w.lower() for w in text if w.isalpha() and len(w) > 1 and w.lower() not in stop_words]
#Remove prefix/suffixes to cut down on vocab
stemmer = EnglishStemmer()
words_nostems = [stemmer.stem(w) for w in words]
return words_nostems
def getTitleTokens(html):
soup = BeautifulSoup(html,'html.parser')
soup_title = soup.title
if soup_title != None:
soup_title_text = soup.title.get_text()
text_arr = nltkPipe(soup_title_text)
return text_arr
else:
return []
def getBodyTokens(html):
soup = BeautifulSoup(html,'html.parser')
#Get the text body
soup_para = soup.find_all('p')
soup_para_clean = ' '.join([x.get_text() for x in soup_para if x.span==None and x.a==None])
text_arr = nltkPipe(soup_para_clean)
return text_arr
#Build the model
def get_html(in_df):
keep_cols = ["Webpage_id","Tag"]
use_df = in_df[keep_cols]
html_reader_obj = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=10000)
frames = []
match_indices = use_df['Webpage_id'].values.tolist()
print(len(match_indices),' indices left...')
while len(match_indices) > 0:
for chunk in html_reader_obj:
merge_df = | pd.merge(use_df,chunk,how='inner',on='Webpage_id') | pandas.merge |
import pandas as pd
import streamlit as st
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn import linear_model
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
import category_encoders as ec
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import plotly.graph_objects as go
st.markdown("# DOPP3")
st.markdown('## Which characteristics are predictive for countries with large populations living in extreme poverty?')
with st.echo():
# READ TRANSFORMED CSV FILE
raw = pd.read_csv("../data/transformed.csv")
#st.write(raw.head(100))
feature_descriptions = | pd.read_csv("../data/feature_descriptions.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os.path as op
from glob import glob
main_dir = '../behav_data'
out_dir = 'data/ratings_complete'
sub_dirs = sorted(glob(op.join(main_dir, 'raw', 'sub-*')))
to_drop_all = [
'regular_or_catch', 'stim_path', 'trial_nr',
'duration', 'rating_coord_deg',
]
test_df = pd.read_csv('../stims/stimuli-expressive_selection-test.tsv')
for sub_dir in sub_dirs:
sub = op.basename(sub_dir).split('-')[1]
if sub == '11':
continue # not yet complete
print(f"\nProcessing sub-{sub}")
dfs = []
### PREPROCESSING NEUTRAL RATINGS ###
for run in [1, 2]:
tsv = op.join(sub_dir, f'sub-{sub}_task-neutral_run-{run}.tsv')
df = pd.read_csv(tsv, sep='\t', index_col=0).drop(to_drop_all + ['filename', 'block', 'trial'], axis=1)
df = df.set_index(df.trial_type).drop('rating_type', axis=1)
df['run'] = run
df['rep'] = run
df['session'] = 4
df = df.rename(columns={
'behav_trial_nr': 'trial', 'behav_run': 'block',
'rating_valence_norm': 'valence',
'rating_arousal_norm': 'arousal',
'rating_dominance': 'dominance',
'rating_trustworthiness': 'trustworthiness',
'rating_attractiveness': 'attractiveness'
}
)
df['arousal'] = (df['arousal'] + 1) / 2 # normalize to [0, 1]
for attr in ['dominance', 'trustworthiness', 'attractiveness']:
df[attr] = 2 * ((df[attr] + 4) / 8) - 1
value_vars = ['valence', 'arousal', 'dominance', 'trustworthiness', 'attractiveness']
id_vars = [col for col in df.columns if col not in value_vars]
df = pd.melt(df, id_vars=id_vars, value_vars=value_vars, var_name='rating_type', value_name='rating').dropna(how='any', axis=0)
dfs.append(df)
df = pd.concat(dfs, axis=0)
df = df.set_index(df.trial_type)
df.index.name = None
cols = ['rep', 'session', 'run', 'block', 'trial', 'trial_type', 'rating_type', 'rating', 'rating_RT']
#cols = cols + [col for col in df.columns if col not in cols]
df = df.loc[:, cols].sort_values(cols, axis=0)
print("Shape neutral df: %s" % (df.shape,))
df.to_csv(f'{out_dir}/sub-{sub}_task-neutral_ratings.tsv', sep='\t')
### DONE PREPROCESSING NEUTRAL RATINGS ###
dfs = []
for ses in [1, 2, 3]:
if ses == 1:
tsvs = sorted(glob(op.join(sub_dir, f'sub-{sub}_ses-?_task-expressive_run-?.tsv')))
tmp = []
for i, tsv in enumerate(tsvs):
df = pd.read_csv(tsv, sep='\t', index_col=0)
df['rep'] = int(op.basename(tsv).split('ses-')[1][0])
df['run'] = int(op.basename(tsv).split('run-')[1][0])
tmp.append(df)
df = pd.concat(tmp, axis=0, sort=True)
else:
tsvs = sorted(glob(op.join(sub_dir, f'sub-{sub}_ses-{ses}_task-expressive_run-?_redo.tsv')))
tmp = []
for i, tsv in enumerate(tsvs):
df = pd.read_csv(tsv, sep='\t', index_col=0)
df['run'] = int(op.basename(tsv).split('run-')[1][0])
df['rep'] = 1
print(df)
tmp.append(df)
df = | pd.concat(tmp, axis=0, sort=True) | pandas.concat |
"""muttlib.plotting test suite.
`muttlib` uses `pytest-mpl` to plots testing.
To use, you simply need to mark the function where you want to compare images using
@pytest.mark.mpl_image_compare, and make sure that the function returns
a Matplotlib figure (or any figure object that has a savefig method):
```python
import pytest
import matplotlib.pyplot as plt
@pytest.mark.mpl_image_compare
def test_succeeds():
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot([1,2,3])
return fig
```
To generate the baseline images, run the tests with the --mpl-generate-path option
with the name of the directory where the generated images should be placed:
```python
pytest --mpl-generate-path=baseline
```
More info about `pytest-mpl` library: https://github.com/matplotlib/pytest-mpl#using
"""
from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from muttlib.plotting import plot
from muttlib.plotting.constants import (
DAILY_TIME_GRANULARITY,
HOURLY_TIME_GRANULARITY,
PLOT_CONFIG,
DS_COL,
Y_COL,
YHAT_COL,
)
@pytest.fixture
def sample_data_df():
# Taken from https://raw.githubusercontent.com/facebook/prophet/master/examples/example_retail_sales.csv
return pd.DataFrame.from_records(
np.array(
[
('2013-02-01T00:00:00.000000000', 373938),
('2013-03-01T00:00:00.000000000', 421638),
('2013-04-01T00:00:00.000000000', 408381),
('2013-05-01T00:00:00.000000000', 436985),
('2013-06-01T00:00:00.000000000', 414701),
('2013-07-01T00:00:00.000000000', 422357),
('2013-08-01T00:00:00.000000000', 434950),
('2013-09-01T00:00:00.000000000', 396199),
('2013-10-01T00:00:00.000000000', 415740),
('2013-11-01T00:00:00.000000000', 423611),
('2013-12-01T00:00:00.000000000', 477205),
('2014-01-01T00:00:00.000000000', 383399),
('2014-02-01T00:00:00.000000000', 380315),
('2014-03-01T00:00:00.000000000', 432806),
('2014-04-01T00:00:00.000000000', 431415),
('2014-05-01T00:00:00.000000000', 458822),
('2014-06-01T00:00:00.000000000', 433152),
('2014-07-01T00:00:00.000000000', 443005),
('2014-08-01T00:00:00.000000000', 450913),
('2014-09-01T00:00:00.000000000', 420871),
('2014-10-01T00:00:00.000000000', 437702),
('2014-11-01T00:00:00.000000000', 437910),
('2014-12-01T00:00:00.000000000', 501232),
('2015-01-01T00:00:00.000000000', 397252),
('2015-02-01T00:00:00.000000000', 386935),
('2015-03-01T00:00:00.000000000', 444110),
('2015-04-01T00:00:00.000000000', 438217),
('2015-05-01T00:00:00.000000000', 462615),
('2015-06-01T00:00:00.000000000', 448229),
('2015-07-01T00:00:00.000000000', 457710),
('2015-08-01T00:00:00.000000000', 456340),
('2015-09-01T00:00:00.000000000', 430917),
('2015-10-01T00:00:00.000000000', 444959),
('2015-11-01T00:00:00.000000000', 444507),
('2015-12-01T00:00:00.000000000', 518253),
('2016-01-01T00:00:00.000000000', 400928),
('2016-02-01T00:00:00.000000000', 413554),
('2016-03-01T00:00:00.000000000', 460093),
('2016-04-01T00:00:00.000000000', 450935),
('2016-05-01T00:00:00.000000000', 471421),
],
dtype=[('ds', '<M8[ns]'), ('y', '<i8')],
),
)
@pytest.fixture
def sample_data_yhat_df():
# Taken from `sample_data_df`
return pd.DataFrame.from_records(
np.array(
[
('2013-02-01T00:00:00.000000000', 3.7394, 0.3739, 7.4788, 1),
('2013-03-01T00:00:00.000000000', 4.2164, 0.4216, 8.4328, 1),
('2013-04-01T00:00:00.000000000', 4.0838, 0.4084, 8.1676, 1),
('2013-05-01T00:00:00.000000000', 4.3699, 0.4370, 8.7397, 1),
('2013-06-01T00:00:00.000000000', 4.1470, 0.4147, 8.2940, 1),
('2013-07-01T00:00:00.000000000', 4.2236, 0.4224, 8.4471, 1),
('2013-08-01T00:00:00.000000000', 4.3495, 0.4350, 8.6990, 1),
('2013-09-01T00:00:00.000000000', 3.9620, 0.3962, 7.9240, 1),
('2013-10-01T00:00:00.000000000', 4.1574, 0.4157, 8.3148, 1),
('2013-11-01T00:00:00.000000000', 4.2361, 0.4236, 8.4722, 1),
('2013-12-01T00:00:00.000000000', 4.7721, 0.4772, 9.5441, 1),
('2014-01-01T00:00:00.000000000', 3.8340, 0.3834, 7.6680, 1),
('2014-02-01T00:00:00.000000000', 3.8032, 0.3803, 7.6063, 1),
('2014-03-01T00:00:00.000000000', 4.3281, 0.4328, 8.6561, 1),
('2014-04-01T00:00:00.000000000', 4.3142, 0.4314, 8.6283, 1),
('2014-05-01T00:00:00.000000000', 4.5882, 0.4588, 9.1764, 1),
('2014-06-01T00:00:00.000000000', 4.3315, 0.4332, 8.6630, 1),
('2014-07-01T00:00:00.000000000', 4.4301, 0.4430, 8.8601, 1),
('2014-08-01T00:00:00.000000000', 4.5091, 0.4509, 9.0183, 1),
('2014-09-01T00:00:00.000000000', 4.2087, 0.4209, 8.4174, 1),
('2014-10-01T00:00:00.000000000', 4.3770, 0.4377, 8.7540, 1),
('2014-11-01T00:00:00.000000000', 4.3791, 0.4379, 8.7582, 1),
('2014-12-01T00:00:00.000000000', 5.0123, 0.5012, 10.0246, 1),
('2015-01-01T00:00:00.000000000', 3.9725, 0.3973, 7.9450, 1),
('2015-02-01T00:00:00.000000000', 3.8694, 0.3869, 7.7387, 1),
('2015-03-01T00:00:00.000000000', 4.4411, 0.4441, 8.8822, 1),
('2015-04-01T00:00:00.000000000', 4.3822, 0.4382, 8.7643, 1),
('2015-05-01T00:00:00.000000000', 4.6262, 0.4626, 9.2523, 1),
('2015-06-01T00:00:00.000000000', 4.4823, 0.4482, 8.9646, 1),
('2015-07-01T00:00:00.000000000', 4.5771, 0.4577, 9.1542, 1),
('2015-08-01T00:00:00.000000000', 4.5634, 0.4563, 9.1268, 1),
('2015-09-01T00:00:00.000000000', 4.3092, 0.4309, 8.6183, 1),
('2015-10-01T00:00:00.000000000', 4.4496, 0.4450, 8.8992, 1),
('2015-11-01T00:00:00.000000000', 4.4451, 0.4445, 8.8901, 1),
('2015-12-01T00:00:00.000000000', 5.1825, 0.5183, 10.3651, 1),
('2016-01-01T00:00:00.000000000', 4.0093, 0.4009, 8.0186, 1),
('2016-02-01T00:00:00.000000000', 4.1355, 0.4136, 8.2711, 1),
('2016-03-01T00:00:00.000000000', 4.6009, 0.4601, 9.2019, 1),
('2016-04-01T00:00:00.000000000', 4.5094, 0.4509, 9.0187, 1),
('2016-05-01T00:00:00.000000000', 4.7142, 0.4714, 9.4284, 1),
],
dtype=[
('ds', '<M8[ns]'),
('y', '<i8'),
('yhat_lower', '<i8'),
('yhat_upper', '<i8'),
('sign', '<i8'),
],
),
)
def perturb_ts(df, col, scale=1):
"""Add noise to ts
"""
mean = df[col].mean() * scale
df[col] += np.random.default_rng(42).uniform(
low=-mean / 2, high=mean / 2, size=len(df)
)
return df
@pytest.mark.mpl_image_compare
def test_create_forecast_figure(sample_data_df):
time_series = sample_data_df.iloc[:30]
predictions = sample_data_df.iloc[30:]
predictions = predictions.rename(columns={Y_COL: YHAT_COL})
full_series = pd.concat([predictions, time_series])
full_series[DS_COL] = pd.to_datetime(full_series[DS_COL])
end_date = pd.to_datetime(predictions[DS_COL]).min()
forecast_window = (pd.to_datetime(predictions[DS_COL]).max() - end_date).days
fig = plot.create_forecast_figure(
full_series,
'test',
end_date,
forecast_window,
time_granularity=DAILY_TIME_GRANULARITY,
plot_config=deepcopy(PLOT_CONFIG),
)
return fig
@pytest.mark.mpl_image_compare
def test_create_forecast_figure_overlapping(sample_data_yhat_df):
time_series = sample_data_yhat_df
predictions = sample_data_yhat_df.iloc[30:]
predictions = predictions.rename(columns={Y_COL: YHAT_COL})
predictions = perturb_ts(predictions, YHAT_COL, scale=0.1)
full_series = pd.concat([predictions, time_series])
full_series[DS_COL] = pd.to_datetime(full_series[DS_COL])
end_date = pd.to_datetime(predictions[DS_COL]).min()
forecast_window = ( | pd.to_datetime(predictions[DS_COL]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Group 58
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import plotly.express as px
import matplotlib.pyplot as plt
from plotly.offline import plot
import plotly.graph_objects as go
from pandas.plotting import parallel_coordinates
from sklearn.metrics import accuracy_score,confusion_matrix
import seaborn as sns
def lrelu(x,deriv=False):
if deriv:
der = np.ones(x.shape)*0.01
der[x>0] = 1
return der
return (x*(x>0)+0.01*x*(x<=0))
def sigmoid(z,deriv=False):
if deriv:
return sigmoid(z)*(1-sigmoid(z))
return (1/(1+np.exp(-z)))
def relu(x,deriv=False):
if deriv:
der = np.zeros(x.shape)
der[x>0] = 1
return der
return x*(x>=0)
class MLP():
np.random.seed(1) #use a random seed
n_epochs = 0
actf = []
activations = []
cost = []
cost_validation = []
zetas = []
shapes = []
def __init__(self,nn_architecture, mean=0, devStd=1, zeroWeights=False, zeroBiases=True):
if len(nn_architecture['layers']) != len(nn_architecture['activations']):
raise ValueError('For each layer there must be an activation function')
self.shapes = nn_architecture['layers']
self.actf = nn_architecture['activations']
#Kaiming weights normalization
if(zeroWeights):
self.weights = np.asarray([np.zeros((shape)) for shape in self.shapes])
else:
self.weights = np.asarray([np.random.normal(mean, devStd, shape) for shape in self.shapes])
#self.weights = np.asarray([np.random.randn(*shape)*np.sqrt(2/shape[0]) for shape in self.shapes])
if(zeroBiases):
self.biases = np.asarray([np.zeros((shape[0],1)) for shape in self.shapes]) #biases are 0 at the beginning
else:
self.biases = np.asarray([np.random.normal(mean, devStd, (shape[0],1)) for shape in self.shapes])
#self.biases = np.asarray([np.random.randn(shape[0],1) for shape in self.shapes])
def feedforward(self,a): #feedforward
self.activations.clear()
self.zetas.clear()
self.activations.append(a)
for i in range(len(self.shapes)):
#print(len(self.shapes))
z = self.weights[i] @ a + self.biases[i]
self.zetas.append(z)
a = self.actf[i](z)
self.activations.append(a)
return a #return sigmoid(last_layer_value)
def backprop(self,o,y):
grad_w =np.asarray([np.zeros(weight.shape) for weight in self.weights])
grad_b =np.asarray([np.zeros(bias.shape) for bias in self.biases])
#use binary cross entropy -> d^L = grad_a(Cost)*sig'(z^L)
dC_do = mean_squared_error(o,y,deriv=True)* sigmoid(self.zetas[-1],deriv=True) # calculate delta_lastLayer
grad_b[-1] = dC_do
grad_w[-1] = dC_do @ self.activations[-2].T #
#backpropagate error
for l in range(2,len(self.shapes)+1):
#chain rule -> d^l = w^(l+1).T @ d^l+1 * derivate_actfun(z^l)
dC_do = np.multiply((self.weights[-l+1].T @ dC_do), self.actf[-l](self.zetas[-l],deriv=True))
#compute grad bias and grad weights
grad_b[-l] = dC_do
grad_w[-l] = dC_do @ self.activations[-l-1].T
return (grad_b,grad_w)
def train(self,df_train,df_test,epochs=100,batch_size=32,lr=0.3,adapt_lr = True, early_stopping = 5):
last_cost = np.iinfo('int32').max #max value to compare with
no_impr = 0
x_test_norm = MinMaxScaler().fit_transform(df_test.values[:,:-1])
y_test = df_test.values[:,-1].reshape(-1,1)
x_train_norm = MinMaxScaler().fit_transform(df_train.values[:,:-1]) #normalize data ( only features)[ even though MinMaxScaler won't affect y vector]
y_train = df_train.values[:,-1].reshape(-1,1)
df_train_norm = np.concatenate((x_train_norm,df_train.values[:,-1].reshape(-1,1)),axis=1) # rebuild dataframe
for e in range(epochs):
np.random.shuffle(df_train_norm) #shuffle the dataframe
batches = [df_train_norm[bs:bs+batch_size] for bs in range(0,len(df_train_norm),batch_size) ] #generate batches
for batch in batches: #for each batch compute
#reduce learning rate of a magnitude after 130 epochs (adaptive lr)
if adapt_lr and e > 130:
lr = lr/10
self.update_wb(batch,lr)
#
cost = mean_squared_error(self.feedforward(x_train_norm.T),y_train)
cost_validation = mean_squared_error(self.feedforward(x_test_norm.T),y_test) #my validation is the test set itself
train_pred,y_train = self.predict(df_train)
accuracy_train = accuracy_score(train_pred,y_train)
#Early stopping: if the cost in the validate sample ( in our case directly on the test set)
#does not decrease for more than *early_stopping* epochs I may start overfitting the training set
if cost_validation >= last_cost:
no_impr+=1
else:
no_impr = 0
if early_stopping != None and no_impr>= early_stopping:
break
last_cost = cost_validation
self.cost.append(cost)
self.cost_validation.append(cost_validation)
self.n_epochs = e+1
#print('epoch {0}--> loss: {1} -----> acc = {2}'.format(e,cost,accuracy_train))
def accuracy_score(pred,y):
return ((predictions == y_test).sum() / y_test.shape[0])
def update_wb(self,batch,lr):
grad_w_tot = np.asarray([np.zeros(weight.shape) for weight in self.weights])
grad_b_tot = np.asarray([np.zeros(bias.shape) for bias in self.biases])
x_train = np.expand_dims(batch[:,:-1],axis=1)
y_train = np.expand_dims(batch[:,-1],axis=1)
for x,y in zip(x_train,y_train): #for each sample i use forward and backprop to get gradients of weights/baises
output = self.feedforward(x.T)
gradb,gradw = self.backprop(output,y)
#must sum grad_w/grad_b in the same batch
grad_w_tot = [gradw_i+gwt for gradw_i,gwt in zip(gradw,grad_w_tot)]
grad_b_tot = [gradb_i+gbt for gradb_i,gbt in zip(gradb,grad_b_tot)]
#update weights and biases --> w = w - lr/len(batch)*grad_w
self.weights = [weight - (lr/len(batch))*gw_i for weight,gw_i in zip(self.weights,grad_w_tot)]
self.biases = [bias - (lr/len(batch))*gb_i for bias,gb_i in zip(self.biases,grad_b_tot )]
def predict(self,df_test):
x_test = MinMaxScaler().fit_transform(df_test.values[:,:-1]) #normalize test set
y_test = df_test.values[:,-1].reshape(-1,1) #(n_observations,1)
predictions = []
for x in x_test:
x = x.reshape(-1,1)
predictions.append( self.feedforward(x))
predictions= np.asarray(predictions)
predictions[predictions<0.5] = 0
predictions[predictions>=0.5] =1
predictions = predictions.flatten().reshape(-1,1)
return (predictions,y_test)
# calculate mean squared error
def mean_squared_error(actual, predicted,deriv=False):
if deriv==True:
return (actual-predicted)
sum_square_error = 0.0
for i in range(len(actual.T)):
sum_square_error += (actual.T[i] - predicted[i])**2.0
mean_square_error = 1.0 / len(actual) * sum_square_error
return mean_square_error
def tryWithZero():
n_epochs = 200
b_size = 16
l_rate = 0.3
nn_architecture = {
'layers':[(10,2),(10,10),(1,10)],
'activations':[relu,relu,sigmoid]
}
mlp = MLP(nn_architecture, zeroWeights=True, zeroBiases=True)
mlp.train(df_train,df_test, n_epochs, b_size, l_rate)
predictions,y_test = mlp.predict(df_test)
accuracy = accuracy_score(predictions,y_test)
plt.plot(range(mlp.n_epochs),mlp.cost)
plt.plot(range(mlp.n_epochs),mlp.cost_validation,'r')
plt.title('MSE error over epochs')
plt.xlabel('number of epochs')
plt.ylabel('error')
plt.legend(['Error on Training','Error on test'])
print('accuracy: ',accuracy)
confMat = confusion_matrix(predictions.flatten().reshape(-1,1),y_test)
confMatDataframe = pd.DataFrame(confMat)
confMatDataframe.index.name = 'Actual'
confMatDataframe.columns.name = 'Predicted'
sns.heatmap(confMatDataframe, cmap="Blues", annot=True)
plt.show()
def heatmapAccuracy():
n_epochs = 200
b_size = 16
nn_architecture = {
'layers':[(10,2),(10,10),(1,10)],
'activations':[relu,relu,sigmoid]
}
lrate_array = [0.0001, 0.001, 0.01, 0.1, 1, 10]
mean = 0
stdDev_array = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
dataFrame = np.zeros((6, 11))
for i in range (len(lrate_array)):
for j in range (len(stdDev_array)):
mlp = MLP(nn_architecture, mean, stdDev_array[j])
mlp.train(df_train,df_test, n_epochs, b_size, lrate_array[i])
predictions,y_test = mlp.predict(df_test)
dataFrame[i][j] = accuracy_score(predictions,y_test)
dataFrame = pd.DataFrame(dataFrame)
dataFrame.columns = ["0", "0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1"]
dataFrame.columns.name = "Standard deviation"
dataFrame.index = ["0.0001", "0.001", "0.01", "0.1", "1", "10"]
dataFrame.index.name = "Learning rate"
sns.heatmap(dataFrame, cmap="Blues", annot=True)
plt.show()
def heatmapActivation(mlpToTest, df_test, title_x):
x_test = MinMaxScaler().fit_transform(df_test.values[:,:-1]) #normalize test set
y_test = df_test.values[:,-1].reshape(-1,1) #(n_observations,1)
dataLayer1 = np.zeros((82, 10))
dataLayer2 = np.zeros((82, 10))
item = 0
for x in x_test:
x = x.reshape(-1,1)
mlpToTest.feedforward(x)
dataLayer1[item] = np.reshape(mlpToTest.activations[1], (10))
dataLayer2[item] = np.reshape(mlpToTest.activations[2], (10))
item += 1
fig, (ax1, ax2) = plt.subplots(1,2)
fig.suptitle(title_x, fontsize=16)
ax1.set_title("First hidden layer")
dataLayer1 = | pd.DataFrame(dataLayer1) | pandas.DataFrame |
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
check_kwargs_and_nopython,
get_jit_arguments,
jit_user_function,
)
def generate_numba_apply_func(
args: Tuple,
kwargs: Dict[str, Any],
func: Callable[..., Scalar],
engine_kwargs: Optional[Dict[str, bool]],
):
"""
Generate a numba jitted apply function specified by values from engine_kwargs.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the rolling apply function.
Parameters
----------
args : tuple
*args to be passed into the function
kwargs : dict
**kwargs to be passed into the function
func : function
function to be applied to each window and will be JITed
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = | get_jit_arguments(engine_kwargs) | pandas.core.util.numba_.get_jit_arguments |
import socket
import logging
from os import mkdir, path
from sys import exc_info, getsizeof
from traceback import extract_tb
import json
import pandas as pd
from datetime import datetime
UDP_SERVER_PORT = 4040
UDP_SERVER_IP = "0.0.0.0"
LOG_FOLDERNAME = 'log'
LOG_FILENAME = 'log.log'
LOG_FILEMODE = 'a'
LOG_FORMAT = '%(asctime)-15s %(levelname)-8s - %(message)s'
LOG_DATEFMT = '%Y-%m-%d %H:%M:%S'
LOG_LEVEL = logging.DEBUG
# folder to store log file
if not path.exists(LOG_FOLDERNAME):
mkdir(LOG_FOLDERNAME)
# check exists dataframe otherwise create it.
if not path.isfile('dataframe.csv'):
df = pd.DataFrame(columns=['datetime', 'plant', 'temperature', 'air-humidity', 'soil-humidity'])
df.set_index('datetime', inplace=True)
df.index = pd.to_datetime(df.index)
df.to_csv('dataframe.csv')
del df
# basicConfig set the root logger
logging.basicConfig(filename=LOG_FOLDERNAME+'/'+LOG_FILENAME, filemode=LOG_FILEMODE,
format=LOG_FORMAT, datefmt=LOG_DATEFMT, level=LOG_LEVEL)
def _extract_exception_function():
# get function that threw the exception
trace = exc_info()[-1]
stack = extract_tb(trace, 1)
function_name = stack[0][2]
return function_name
def udp_server_set_up():
_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
_s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
_pair = (UDP_SERVER_IP, UDP_SERVER_PORT)
_s.bind(_pair)
except socket.error as _e:
# obsolete -> is similar to OSError
logging.error("@" + _extract_exception_function() + " - Socket.error: " + str(_e))
_s.close()
_s = None
except OSError as _e:
# this could happen due to an incorrect ip address
logging.error("@" + _extract_exception_function() + " - OSError: " + str(_e))
_s.close()
_s = None
except OverflowError as _e:
# this could happen due to an incorrect port number
logging.error("@" + _extract_exception_function() + " - OverflowError: " + str(_e))
_s.close()
_s = None
return _s
def udp_server_run(s):
while True:
try:
_data, _client = s.recvfrom(4*1024)
yield _data, _client
except Exception as _e:
logging.warning("@" + _extract_exception_function() + " - " + str(_e))
def data_handler(data):
json_data = json.loads(data)
json_data['datetime'] = str(datetime.now())
dfj = pd.DataFrame([json_data], columns=['datetime', 'plant', 'temperature', 'air-humidity', 'soil-humidity'])
dfj.set_index('datetime', inplace=True)
dfj.index = | pd.to_datetime(dfj.index) | pandas.to_datetime |
import pandas as pd
import numpy as np
import json
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/get_json')
def get_json():
country=request.args['country']
url='google'
countries=pd.read_csv('tsv/products_countries.tsv','\t')
products=pd.read_csv('tsv/products.tsv','\t',low_memory=False)[['code','name']]
additives=pd.read_csv('tsv/products_additives.tsv','\t')
d=pd.merge(countries[countries['country']==country], | pd.merge(products,additives,how='inner',left_on='code',right_on='code') | pandas.merge |
import __main__ as main
import sys
import pandas as pd
if not hasattr(main, '__file__'):
argv = ['a',
'data/processed/census/oa_tile_reference.csv',
'data/raw/census/Eng_Wal_OA_Mid_Pop.csv',
'data/raw/census/OA_to_DZ.csv',
'data/raw/census/simd2020_withinds.csv',
'data/raw/census/NI_Mid_Pop.csv',
"data/processed/census/tile_imd.csv"]
else:
argv = sys.argv
tiles = pd.read_csv(argv[1])
england_oa = pd.read_csv(argv[2])
scotland_zone_ref = pd.read_csv(argv[3])
scotland_oa = | pd.read_csv(argv[4]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 26 18:17:16 2017
Useful functions in clusters analysis.
Specially kmeans clusters
@author: srodriguezl
"""
import pandas as pd
# 1
# Function to automatically generate a summary of the cluster
# Before running--> double check:
# 1- Remove variables that you dont want to include in the summary
# 2- Check variables types (df.dtypes) and correct them (df.variable.astype(....))
# df: dataframe
# Target: result of the cluster
# pasthSave: path to save the excel with the result
# includeCatNan: Add a column to indicate NaNs category
def Summary_function(df,Target,pathSave,includeCatNan = False):
# Indentify object and bool variables and create dummy variables
factorVar = df.select_dtypes(include=["object","bool"]).columns
for i in factorVar:
df_dummy = pd.get_dummies(df[i], prefix = i , dummy_na = includeCatNan)
df = | pd.concat([df,df_dummy],axis = 1) | pandas.concat |
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.tools import argparser
import pandas as pd
import pprint
DEVELOPER_KEY = ""
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(q, channelId,token, max_results=50,order="relevance", location=None, location_radius=None):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q,
type="video",
pageToken=token,
order = order,
part="id,snippet", # Part signifies the different types of data you want
maxResults=max_results,
location=location,
locationRadius=location_radius,
channelId=channelId).execute()
title = []
channelId = []
channelTitle = []
categoryId = []
videoId = []
viewCount = []
#likeCount = []
#dislikeCount = []
commentCount = []
favoriteCount = []
category = []
tags = []
videos = []
date=[]
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
title.append(search_result['snippet']['title'])
videoId.append(search_result['id']['videoId'])
response = youtube.videos().list(
part='statistics, snippet',
id=search_result['id']['videoId']).execute()
channelId.append(response['items'][0]['snippet']['channelId'])
channelTitle.append(response['items'][0]['snippet']['channelTitle'])
categoryId.append(response['items'][0]['snippet']['categoryId'])
favoriteCount.append(response['items'][0]['statistics']['favoriteCount'])
viewCount.append(response['items'][0]['statistics']['viewCount'])
#likeCount.append(response['items'][0]['statistics']['likeCount'])
#dislikeCount.append(response['items'][0]['statistics']['dislikeCount'])
date.append(response['items'][0]['snippet']['publishedAt'])
if 'commentCount' in response['items'][0]['statistics'].keys():
commentCount.append(response['items'][0]['statistics']['commentCount'])
else:
commentCount.append([])
if 'tags' in response['items'][0]['snippet'].keys():
tags.append(response['items'][0]['snippet']['tags'])
else:
tags.append([])
youtube_dict = {'tags':tags,'channelId': channelId,'channelTitle': channelTitle,'categoryId':categoryId,'title':title,'videoId':videoId,'viewCount':viewCount,'commentCount':commentCount,'favoriteCount':favoriteCount, 'date':date}
youtube_video=pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in youtube_dict.items() ]))
try:
nexttok = search_response["nextPageToken"]
return(youtube_video, nexttok)
except Exception as e:
nexttok = "last_page"
return(youtube_video, nexttok)
writer = | pd.ExcelWriter('youtube_video.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import pandas as pd
import numpy as np
from datetime import *
nw = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
def add_col_df(df, colname, colval = False, indx=False):
if indx == False:
if colval == False:
ndf = df.assign(coln = 'NWC')
ndf.rename(columns = {'coln': colname}, inplace = True)
return ndf
else:
ndf = df.assign(coln = colval)
ndf.rename(columns = {'coln': colname}, inplace = True)
return ndf
else:
if colval == False:
df.insert(indx, colname, 'NWC', allow_duplicates=False)
return df
else:
df.insert(indx, colname, colval, allow_duplicates=False)
return df
def timediff(df,c1,c2,newcol):
df[c1] = | pd.to_datetime(df[c1]) | pandas.to_datetime |
"""
Functions to validate the input files prior to database insert / upload.
"""
import time
import numpy as np
import pandas as pd
import IEDC_paths, IEDC_pass
from IEDC_tools import dbio, file_io, __version__
def check_datasets_entry(file_meta, create=True, crash_on_exist=True, update=True, replace=False):
"""
Creates an entry in the `datasets` table.
:param file_meta: data file metadata
:param crash_on_exist: if True: function terminates with assertion error if dataset/version already exists
:param update: if True: function updates dataset entry if dataset/version already exists
:param create: if True: funtion creates dataset entry for dataset/version
:param replace: if True: delete existing entry in dataset table and create new one with current data
"""
db_datasets = dbio.get_sql_table_as_df('datasets')
dataset_info = file_meta['dataset_info']
# Check if entry already exists
dataset_name_ver = [i[0] for i in dataset_info.loc[['dataset_name', 'dataset_version']]
.where((pd.notnull(dataset_info.loc[['dataset_name', 'dataset_version']])), None).values]
if dataset_name_ver[1] in ['NULL']:
dataset_name_ver[1] = None
# If exists already
if dataset_name_ver in db_datasets[['dataset_name', 'dataset_version']].values.tolist(): # dataset name + verion already exists in dataset catalog
if crash_on_exist:
raise AssertionError("Database already contains the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
elif update:
update_dataset_entry(file_meta)
elif replace:
# get id
if dataset_name_ver[1] == None:
db_id = db_datasets.loc[(db_datasets['dataset_name'] == dataset_name_ver[0]) &
pd.isna(db_datasets['dataset_version'])].index[0]
else:
db_id = db_datasets.loc[(db_datasets['dataset_name'] == dataset_name_ver[0]) &
(db_datasets['dataset_version'] == dataset_name_ver[1])].index[0]
dbio.run_this_command("DELETE FROM %s.datasets WHERE id = %s;" % (IEDC_pass.IEDC_database, db_id))
# add new one
create_dataset_entry(file_meta)
else:
# do nothing
print("Database already contains the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
return True
# if it doesn't exist yet
else:
if create:
create_dataset_entry(file_meta)
else: # i.e. crash_on_not_exist
raise AssertionError("Database does not contain the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
def create_dataset_entry(file_meta):
dataset_info = file_meta['dataset_info']
dataset_info = dataset_info.replace([np.nan], [None])
dataset_info = dataset_info.replace({'na': None, 'nan': None, 'none': None,
'NULL': None})
dataset_info = dataset_info.to_dict()['Dataset entries']
assert dataset_info['dataset_id'] == 'auto', \
"Was hoping 'dataset_id' in the file template had the value 'auto'. Not sure what to do now..."
# Clean up dict
dataset_info.pop('dataset_id')
if pd.isna(dataset_info['reserve5']):
dataset_info['reserve5'] = 'Created by IEDC_tools v%s' % __version__
# Look up stuff
data_types = dbio.get_sql_table_as_df('types')
dataset_info['data_type'] = data_types.loc[data_types['name'] == dataset_info['data_type']].index[0]
data_layers = dbio.get_sql_table_as_df('layers')
dataset_info['data_layer'] = data_layers.loc[data_layers['name'] == dataset_info['data_layer']].index[0]
data_provenance = dbio.get_sql_table_as_df('provenance')
dataset_info['data_provenance'] = data_provenance.loc[data_provenance['name'] ==
dataset_info['data_provenance']].index[0]
aspects = dbio.get_sql_table_as_df('aspects')
class_defs = dbio.get_sql_table_as_df('classification_definition')
for aspect in [i for i in dataset_info.keys() if i.startswith('aspect_')]:
if dataset_info[aspect] is None or aspect.endswith('classification'):
continue
if dataset_info[aspect+'_classification'] == 'custom':
aspect_class_name = str(dataset_info[aspect]) + '__' + dataset_info['dataset_name']
dataset_info[aspect+'_classification'] = \
class_defs[class_defs['classification_name'] == aspect_class_name].index[0]
dataset_info[aspect] = aspects[aspects['aspect'] == dataset_info[aspect]].index[0]
source_type = dbio.get_sql_table_as_df('source_type')
dataset_info['type_of_source'] = source_type.loc[source_type['name'] == dataset_info['type_of_source']].index[0]
licenses = dbio.get_sql_table_as_df('licences')
dataset_info['project_license'] = licenses.loc[licenses['name'] == dataset_info['project_license']].index[0]
users = dbio.get_sql_table_as_df('users')
dataset_info['submitting_user'] = users.loc[users['name'] == dataset_info['submitting_user']].index[0]
# fix some more
for k in dataset_info:
# not sure why but pymysql doesn't like np.int64
if type(dataset_info[k]) == np.int64:
dataset_info[k] = int(dataset_info[k])
dbio.dict_sql_insert('datasets', dataset_info)
print("Created entry for %s in 'datasets' table." % [dataset_info[k] for k in ['dataset_name', 'dataset_version']])
return None
def update_dataset_entry(file_meta):
raise NotImplementedError
def create_aspects_table(file_meta):
"""
Pulls the info on classification and attributes together, i.e. make sense of the messy attributes in an actual
table... More of a convenience function for tired programmers.
See sheet 'Cover' in template file.
:param file: Filename, string
:return: Dataframe table with name, classification_id, and attribute_no
"""
# Read the file and put metadata and row_classifications in two variables
dataset_info = file_meta['dataset_info']
row_classifications = file_meta['row_classifications']
col_classifications = file_meta['col_classifications']
# Filter relevant rows from the metadata table, i.e. the ones containing 'aspect'
custom_aspects = dataset_info[dataset_info.index.str.startswith('aspect_')]
custom_aspects = custom_aspects[custom_aspects.index.str.endswith('_classification')]
# Get rid of the empty ones
custom_aspects = custom_aspects[custom_aspects['Dataset entries'] != 'none']
# Here comes the fun... Let's put everything into a dict, because that is easily converted to a dataframe
d = {'classification_id': custom_aspects['Dataset entries'].values,
'index': [i.replace('_classification', '') for i in custom_aspects.index],
'name': dataset_info.loc[[i.replace('_classification', '')
for i in custom_aspects.index]]['Dataset entries'].values}
if file_meta['data_type'] == 'LIST':
d['attribute_no'] = row_classifications.reindex(d['name'])['Aspects_Attribute_No'].values
d['position'] = 'row?'
elif file_meta['data_type'] == 'TABLE':
d['attribute_no'] = row_classifications \
.reindex(d['name'])['Row_Aspects_Attribute_No'] \
.fillna(col_classifications.Col_Aspects_Attribute_No).values
# The table format file has no info on the position of aspects. Need to find that.
d['position'] = []
for n in d['name']:
if n in row_classifications.index:
d['position'].append('row' + str(row_classifications.index.get_loc(n)))
if n in col_classifications.index:
d['position'].append('col' + str(col_classifications.index.get_loc(n)))
assert not any([i is None for i in d['attribute_no']]) # 'not any' means 'none'
# Convert to df and get rid of the redundant 'index' column
aspect_table = pd.DataFrame(d, index=d['index']).drop('index', axis=1)
return aspect_table
def get_class_names(file_meta, aspect_table):
"""
Creates and looks up names for classification, i.e. classifications that are not found in the database (custom)
will be generated and existing ones (non-custom) looked up in the classification_definitions table.
The name is generated as a combination of the dataset name and the classification name, e.g.
"1_F_steel_SankeyFlows_2008_Global_origin_process".
The function extends the table created in create_aspects_table() and returns it.
:param file: Filename, string
:return: Dataframe table with name, classification_id, attribute_no, and classification_definition
"""
dataset_info = file_meta['dataset_info']
db_classdef = dbio.get_sql_table_as_df('classification_definition')
r = []
for aspect in aspect_table.index:
if aspect_table.loc[aspect, 'classification_id'] == 'custom':
r.append(aspect_table.loc[aspect, 'name'] + '__' + dataset_info.loc['dataset_name', 'Dataset entries'])
else:
r.append(db_classdef.loc[aspect_table.loc[aspect, 'classification_id'], 'classification_name'])
aspect_table['custom_name'] = r
return aspect_table
def check_classification_definition(class_names, crash=True, warn=True,
custom_only=False, exclude_custom=False):
"""
Checks if classifications exists in the database, i.e. classification_definition.
:param class_names: List of classification names
:param crash: Strongly recommended -- will cause the script to stop if the classification already exists. Otherwise
there could be ambiguous classifications with multiple IDs.
:param warn: Allows to suppress the warning message
:param custom_only: Check only custom classifications
:param exclude_custom: Exclude custom classifications
:return: True or False
"""
db_classdef = dbio.get_sql_table_as_df('classification_definition')
exists = []
for aspect in class_names.index:
attrib_no = class_names.loc[aspect, 'attribute_no']
if attrib_no != 'custom' and custom_only:
continue # skip already existing classifications
if attrib_no == 'custom' and exclude_custom:
continue # skip custom classifications
if class_names.loc[aspect, 'custom_name'] in db_classdef['classification_name'].values:
exists.append(True)
if crash:
raise AssertionError("""Classification '%s' already exists in the DB classification table (ID: %s).
Aspect '%s' cannot be processed.""" %
(class_names.loc[aspect, 'custom_name'],
db_classdef[db_classdef['classification_name']
== 'general_product_categories'].index[0], aspect))
elif warn:
print("WARNING: '%s' already exists in the DB classification table. "
"Adding it again may fail or create ambiguous values." %
class_names.loc[aspect, 'custom_name'])
else:
exists.append(False)
return exists
def check_classification_items(class_names, file_meta, file_data, crash=True, warn=True,
custom_only=False, exclude_custom=False):
"""
Checks in classification_items if a. all classification_ids exists and b. all attributes exist
:param class_names: List of classification names
:param file_data: Dataframe of Excel file, sheet `Data`
:param crash: Strongly recommended -- will cause the script to stop if the classification_id already exists in
classification_items. Otherwise there could be ambiguous values with multiple IDs.
:param custom_only: Check only custom classifications
:param exclude_custom: Exclude custom classifications
:param warn: Allows to suppress the warning message
:return:
"""
db_classdef = dbio.get_sql_table_as_df('classification_definition')
db_classitems = dbio.get_sql_table_as_df('classification_items')
exists = [] # True / False switch
for aspect in class_names.index:
attrib_no = class_names.loc[aspect, 'attribute_no']
# remove garbage from string
try:
attrib_no = attrib_no.strip(' ')
except:
pass
if attrib_no != 'custom' and custom_only:
continue # skip already existing classifications
if attrib_no == 'custom' and exclude_custom:
continue # skip custom classifications
# make sure classification id exists -- must pass, otherwise the next command will fail
assert class_names.loc[aspect, 'custom_name'] in db_classdef['classification_name'].values, \
"Classification '%s' does not exist in table 'classification_definiton'" % \
class_names.loc[aspect, 'custom_name']
# get classification_id
class_id = db_classdef.loc[db_classdef['classification_name'] ==
class_names.loc[aspect, 'custom_name']].index[0]
# Check if the classification_id already exists in classification_items
if class_id in db_classitems['classification_id'].unique():
exists.append(True)
if crash:
raise AssertionError("classification_id '%s' already exists in the table classification_items." %
class_id)
elif warn:
print("WARNING: classification_id '%s' already exists in the table classification_items. "
"Adding its attributes again may fail or create ambiguous values." %
class_id)
else:
exists.append(False)
print(aspect, class_id, 'not in classification_items')
# Next check if all attributes exist
if attrib_no == 'custom':
attrib_no = 'attribute1_oto'
else:
attrib_no = 'attribute' + str(int(attrib_no)) + '_oto'
checkme = db_classitems.loc[db_classitems['classification_id'] == class_id][attrib_no].values
if file_meta['data_type'] == 'LIST':
attributes = file_data[class_names.loc[aspect, 'name']].unique()
elif file_meta['data_type'] == 'TABLE':
if class_names.loc[aspect, 'position'][:3] == 'row':
if len(file_meta['row_classifications'].values) == 1:
attributes = file_data.index.values
else:
attributes = file_data.index.levels[int(class_names.loc[aspect, 'position'][-1])]
elif class_names.loc[aspect, 'position'][:3] == 'col':
if len(file_meta['col_classifications'].values) == 1:
# That means there is only one column level defined, i.e. no MultiIndex
attributes = file_data.columns.values
else:
attributes = file_data.columns.levels[int(class_names.loc[aspect, 'position'][-1])]
for attribute in attributes:
if str(attribute) in checkme:
exists.append(True)
if crash:
raise AssertionError("'%s' already in %s" % (attribute, checkme))
elif warn:
print("WARNING: '%s' already in classification_items" % attribute)
else:
exists.append(False)
print(aspect, attribute, class_id, 'not in classification_items')
return exists
def create_db_class_defs(file_meta, aspect_table):
"""
Writes the custom classification to the table classification_definition.
:param file: The data file to read.
"""
class_names = get_class_names(file_meta, aspect_table)
db_aspects = dbio.get_sql_table_as_df('aspects', index='aspect')
check_classification_definition(class_names, custom_only=True)
for aspect in class_names.index:
if class_names.loc[aspect, 'classification_id'] != 'custom':
continue # skip already existing classifications
d = {'classification_name': str(class_names.loc[aspect, 'custom_name']),
'dimension': str(db_aspects.loc[class_names.loc[aspect, 'name'], 'dimension']),
'description': 'Custom classification, generated by IEDC_tools v%s' % __version__,
'mutually_exclusive': True,
'collectively_exhaustive': False,
'created_from_dataset': True, # signifies that this is a custom classification
'general': False,
'meaning_attribute1': "'%s' aspect of dataset" % aspect # cannot be NULL???
}
dbio.dict_sql_insert('classification_definition', d)
print("Wrote custom classification '%s' to classification_definitions" %
class_names.loc[aspect, 'custom_name'])
def create_db_class_items(file_meta, aspects_table, file_data):
"""
Writes the unique database items / attributes of a custom classification to the database.
:param file: Data file to read
"""
class_names = get_class_names(file_meta, aspects_table)
db_classdef = dbio.get_sql_table_as_df('classification_definition')
check_classification_items(class_names, file_meta, file_data, custom_only=True, crash=True)
for aspect in class_names.index:
if class_names.loc[aspect, 'classification_id'] != 'custom':
continue # skip already existing classifications
# get classification_id
class_id = db_classdef.loc[db_classdef['classification_name'] ==
class_names.loc[aspect, 'custom_name']].index[0]
d = {'classification_id': class_id,
'description': 'Custom classification, generated by IEDC_tools v%s' % __version__,
'reference': class_names.loc[aspect, 'custom_name'].split('__')[1]}
if file_meta['data_type'] == 'LIST':
attributes = sorted(file_data[class_names.loc[aspect, 'name']].apply(str).unique())
elif file_meta['data_type'] == 'TABLE':
if class_names.loc[aspect, 'position'][:-1] == 'col':
if len(file_meta['col_classifications'].values) == 1:
# That means there is only one column level defined, i.e. no MultiIndex
attributes = [str(i) for i in file_data.columns]
else:
attributes = sorted(
[str(i) for i in file_data.columns.levels[int(class_names.loc[aspect, 'position'][-1])]])
elif class_names.loc[aspect, 'position'][:-1] == 'row':
if len(file_meta['row_classifications'].values) == 1:
attributes = [str(i) for i in file_data.index]
else:
attributes = sorted(
[str(i) for i in file_data.index.levels[int(class_names.loc[aspect, 'position'][-1])]])
df = pd.DataFrame({'classification_id': [d['classification_id']] * len(attributes),
'description': [d['description']] * len(attributes),
'reference': [d['reference']] * len(attributes),
'attribute1_oto': attributes})
columns = ('classification_id', 'description', 'reference', 'attribute1_oto')
dbio.bulk_sql_insert('classification_items', columns, df.values.tolist())
print("Wrote attributes for custom classification '%s' to classification_items: %s" % (class_id, attributes))
def add_user(file_meta, quiet=False):
dataset_info = file_meta['dataset_info']
db_user = dbio.get_sql_table_as_df('users')
realname = dataset_info.loc['submitting_user'].values[0]
if realname in db_user['name'].values:
if not quiet:
print("User '%s' already exists in db table users" % realname)
else:
d = {'name': realname,
'username': (realname.split(' ')[0][0] + realname.split(' ')[1]).lower(),
'start_date': time.strftime('%Y-%m-%d %H:%M:%S')
}
dbio.dict_sql_insert('users', d)
print("User '%s' written to db table users" % d['username'])
def add_license(file_meta, quiet=False):
dataset_info = file_meta['dataset_info']
db_licenses = dbio.get_sql_table_as_df('licences')
file_licence = dataset_info.loc['project_license'].values[0]
if file_licence in db_licenses['name'].values:
if not quiet:
print("Licence '%s' already exists in db table 'licences'" % file_licence)
else:
d = {'name': file_licence,
'description': 'n/a, generated by IEDC_tools v%s' % __version__}
dbio.dict_sql_insert('licences', d)
print("Licence '%s' written to db table 'licences'" % file_licence)
def parse_stats_array_list(stats_array_strings):
"""
Parses the 'stats_array string' from the Excel template. E.g. "3;10;3.0;none;" should fill the respecitve columns
in the data table as follows: stats_array_1 = 3, stats_array_2 = 10, stats_array_3 = 3.0, stats_array_4 = none
More info: https://github.com/IndEcol/IE_data_commons/issues/14
:param stats_array_strings:
:return:
"""
temp_list = []
for sa_string in stats_array_strings:
if sa_string == 'none':
temp_list.append([None] * 4)
else:
assert len(sa_string.split(';')) == 4, "The 'stats_array string' is not well formatted: %s" % sa_string
temp_list.append(sa_string.split(';'))
return_df = pd.DataFrame(temp_list)
return_df = return_df.replace(['none'], [None])
# return a list of lists
return [return_df[i].values for i in range(len(return_df.columns))]
def parse_stats_array_table(file, file_meta, row_indices, col_indices):
# db_sa = dbio.get_sql_table_as_df('stats_array', index=None)
if file_meta['data_sources'].loc['Dataset_Uncertainty', 'a'] == 'GLOBAL':
if file_meta['data_sources'].loc['Dataset_Uncertainty', 'b'] in ('none', 'None'):
sa_res = [None] * 4
else:
sa_res = file_meta['data_sources'].loc['Dataset_Uncertainty', 'b'].split(';')
return {'type': 'GLOBAL',
'data': sa_res}
elif file_meta['data_sources'].loc['Dataset_Uncertainty', 'a'] == 'TABLE':
file_sa = file_io.read_stats_array_table(file, row_indices, col_indices)
sa_tmp = file_sa.reset_index().melt(file_sa.index.names)
sa_tmp = sa_tmp.set_index(row_indices)
# parse the string https://stackoverflow.com/a/21032532/2075003
sa_res = sa_tmp['value'].str.split(';', expand=True)
sa_res.columns = ['stats_array_' + str(i+1) for i in range(4)]
sa_res = sa_res.replace(['none'], [None])
sa_res = sa_res.astype({'stats_array_1': int, 'stats_array_2': float,
'stats_array_3': float, 'stats_array_4': float})
return {'type': 'TABLE',
'data': sa_res}
else:
raise AttributeError("Unknown data unit type specified. Must be either 'GLOBAL' or 'TABLE'.")
def get_comment_table(file, file_meta, row_indices, col_indices):
if file_meta['data_sources'].loc['Dataset_Comment', 'a'] == 'GLOBAL':
if file_meta['data_sources'].loc['Dataset_Comment', 'b'] in ('none', 'None'):
comment = None
else:
comment = file_meta['data_sources'].loc['Dataset_Comment', 'b']
return {'type': 'GLOBAL',
'data': comment}
elif file_meta['data_sources'].loc['Dataset_Comment', 'a'] == 'TABLE':
comment = file_io.read_comment_table(file, row_indices, col_indices)
comment = comment.reset_index().melt(comment.index.names)
comment = comment.set_index(row_indices)
return {'type': 'TABLE',
'data': comment}
else:
raise AttributeError("Unknown data unit type specified. Must be either 'GLOBAL' or 'TABLE'.")
def get_unit_list(file_data):
db_units = dbio.get_sql_table_as_df('units', index=None)
res = | pd.DataFrame() | pandas.DataFrame |
import funcy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from dateutil import parser
from tqdm import tqdm
from utils.helpers import *
from utils.plot import plot_joint_distribution
font = {
"size": 30
}
matplotlib.rc("font", **font)
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MOST_RECENT_FILE = sorted(os.listdir(os.path.join(BASE_DIR, "data", "REDCap")))[-1]
REDCAP_FPATH = os.path.join(BASE_DIR, "data", "REDCap", MOST_RECENT_FILE)
SERIES_ID_FPATH = os.path.join(BASE_DIR, "data", "match_redcap_plataforma.csv")
SEGMENTATION_FPATH = os.path.join(BASE_DIR, "data", "inference_df.csv")
get_date_regex = r"ProjetoCOVIDAI_DATA_(?P<data>.*)_\d+.csv"
date_str = re.match(get_date_regex, MOST_RECENT_FILE).group("data")
dataset_date = parser.parse(date_str)
# Normalize name and CPF
df = pd.read_csv(REDCAP_FPATH)
df.nome = df.nome.apply(lambda s: to_normalized_string(s) if pd.notna(s) else s)
df.cpf = df.cpf.apply(lambda v: str(int(v)) if pd.notna(v) else v)
# Fill redcap_repeat_instrument missing data with "dados_pessoais_unico" since these
# rows are not filled automatically by the database
df.redcap_repeat_instrument = df.redcap_repeat_instrument.fillna("dados_pessoais_unico")
# Fill the missing hospitalization date with date of admission to ICU if existent
df.data_admissao_hospitalar = df.data_admissao_hospitalar.fillna(df.data_admissao_uti)
# Calculate length of stay based on hospitalization date and date of discharge or
# date of death
fill_length_of_stay = df.apply(
lambda row: calculate_length_of_stay(
row["data_admissao_hospitalar"],
row["data_alta_hospitalar"],
row["data_obito"]
),
axis=1
)
df.tempo_estadia_hospitalar = df.tempo_estadia_hospitalar.fillna(fill_length_of_stay)
# Calculate the date of discharge from ICU based on the date of admission
# in the ICU and length of stay in the ICU.
df["data_alta_uti"] = df.apply(
lambda row: sum_date_with_interval(
row["data_admissao_uti"],
row["tempo_estadia_uti"]
),
axis=1
)
# Calculate the date of removal of the ventilation based on the date of ventilation
# and the length of ventilation
df["data_remocao_ventilacao"] = df.apply(
lambda row: sum_date_with_interval(
row["data_ventilacao"],
row["tempo_ventilacao_mecanica"]
),
axis=1
)
# Calculate age and body mass index
df["idade"] = df.apply(
lambda row: calculate_age(
row["data_nasc"],
row["data_admissao_hospitalar"],
dataset_date
),
axis=1
)
df["imc"] = df.peso / (df.altura ** 2)
# Some of the rows have the plaquets number in a different unity and need to be
# multiplied by 1000
df.plaquetas = df.plaquetas.apply(lambda v: v * 1000 if v < 1000 else v)
############################## Finished processing the ordinary data ##############################
# Here we define variables useful for processing the rest of the data
cols_intermediate_outcomes = [
"data_sepse",
"sepse",
"data_sdra",
"sdra",
"data_falencia_cardiaca",
"falencia_cardiaca",
"data_choque_septico",
"choque_septico",
"data_coagulopatia",
"coagulopatia",
"data_iam",
"iam",
"data_ira",
"ira"
]
cols_personal_data = [
"nome",
"cpf",
"instituicao",
"data_nasc",
"idade",
"sexo",
"altura",
"peso",
"imc",
"alta",
"obito",
"data_admissao_hospitalar",
"data_admissao_uti",
"data_obito",
"data_alta_hospitalar",
"data_alta_uti",
"data_ventilacao",
"data_remocao_ventilacao",
"tempo_estadia_hospitalar",
"tempo_estadia_uti",
"tempo_ventilacao_mecanica"
] + cols_intermediate_outcomes
cols_comorbidities = [
"has",
"ieca_bra",
"dm",
"asma",
"tabagista",
"dpoc",
"cardiopatia",
"irc",
"neoplasia",
"aids",
"neutropenia"
]
cols_respiratory_comorbidities = [
"asma", "tabagista", "dpoc"
]
cols_cardiac_comorbidities = [
"has", "cardiopatia"
]
cols_dates = [
col for col in df.columns
if "data" in col and col not in
cols_personal_data + ["redcap_data_access_group"]
]
identity_map = {
0: 0,
1: 1
}
irc_map = {
1: "negativo",
2: "nao_dialitico",
3: "dialitico"
}
neoplasia_map = {
1: "negativo",
2: "primaria_ou_secundaria",
3: "outras"
}
map_comorbidities = {
"irc": irc_map,
"neoplasia": neoplasia_map
}
# Now we build a separate dataframe for saving pesonal data.
df_personal_data = df[df.redcap_repeat_instrument == "dados_pessoais_unico"]
# Discriminate patients that were admitted to the hospital and to the ICU. Also, discriminate those that
# were discharged and those who died.
df_personal_data["internacao"] = df_personal_data.data_admissao_hospitalar.notna()
df_personal_data["uti"] = df_personal_data.data_admissao_uti.notna()
df_personal_data["obito"] = df_personal_data.data_obito.notna()
df_personal_data["alta"] = df_personal_data.data_alta_hospitalar.notna()
df_personal_data = df_personal_data[
["record_id"] + cols_personal_data + cols_comorbidities
]
for col in cols_comorbidities:
df_personal_data[col] = df_personal_data[col].map(map_comorbidities.get(col, identity_map))
# Count the number of previous comorbidities each patient has.
df_personal_data["n_comorbidades"] = df_personal_data[cols_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_respiratorias"] = df_personal_data[cols_respiratory_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_cardiacas"] = df_personal_data[cols_cardiac_comorbidities].apply(count_comorbidities, axis=1)
############################## Finished processing the personal data ##############################
# Now we build separate dataframes for saving clinical, treatment, laboratorial, image and confirmatory data.
# Clinical dataframe
cols_clinical = [
"data_dispneia",
"dispneia",
"data_sofa",
"sofa_score",
"data_saturacao_o2",
"saturacao_o2",
"data_saps_3",
"saps_3"
]
df_clinical = df[df.redcap_repeat_instrument == "evolucao_clinica_multiplo"]
df_clinical = df_clinical[["record_id"] + cols_clinical]
# We need separate dataframes for each date. Note that the clinical dataframe has four date. We will separate
# the columns accordingly.
df_dispneia = df_clinical[[
"record_id",
"data_dispneia",
"dispneia"
]]
df_sofa = df_clinical[[
"record_id",
"data_sofa",
"sofa_score"
]]
df_saturacao_o2 = df_clinical[[
"record_id",
"data_saturacao_o2",
"saturacao_o2"
]]
df_saps_3 = df_clinical[[
"record_id",
"data_saps_3",
"saps_3"
]]
# Treatment dataframe
cols_treatment = [
"data_ventilacao",
"ventilacao",
"pao2_fio2",
"data_pronacao",
"pronacao",
"data_hemodialise",
"hemodialise"
]
df_treatment = df[df.redcap_repeat_instrument == "evolucao_tratamento_multiplo"]
df_treatment = df_treatment[["record_id"] + cols_treatment]
# Note that the treatment dataframe has four date. We will separate the columns accordingly
# just as we did for the clinical dataframe.
df_ventilacao = df_treatment[[
"record_id",
"data_ventilacao",
"ventilacao",
"pao2_fio2"
]]
df_pronacao = df_treatment[[
"record_id",
"data_pronacao",
"pronacao"
]]
df_hemodialise = df_treatment[[
"record_id" ,
"data_hemodialise",
"hemodialise"
]]
# Laboratory results dataframe
cols_laboratory = [
"leucocitos",
"linfocitos",
"neutrofilos",
"tgp",
"creatinina",
"pcr",
"d_dimero",
"il_6",
"plaquetas",
"rni",
"troponina",
"pro_bnp",
"bicarbonato",
"lactato"
]
df_laboratory = df[df.redcap_repeat_instrument == "evolucao_laboratorial_multiplo"]
df_laboratory = df_laboratory[["record_id", "data_resultados_lab"] + cols_laboratory]
# Image dataframe
cols_image = [
"uid_imagem",
"tipo_imagem",
"data_imagem",
"padrao_imagem_rsna",
"score_tc_dir_sup",
"score_tc_dir_med",
"score_tc_dir_inf",
"score_tc_esq_sup",
"score_tc_esq_med",
"score_tc_esq_inf"
]
df_image = df[df.redcap_repeat_instrument == "evolucao_imagem_multiplo"]
df_image.uid_imagem = df_image.uid_imagem.apply(lambda s: s.strip() if pd.notna(s) else s)
df_image = df_image[["record_id", "redcap_repeat_instance"] + cols_image]
df_image = pd.merge(
left=df_personal_data[["record_id", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"]],
right=df_image,
how="right",
on="record_id",
validate="one_to_many"
)
uids_internados = set(df_image[df_image.data_admissao_hospitalar.notna()].uid_imagem.unique())
# For images, we also have the data retrieved from the deep segmentation model. We need
# to enrich our dataframe with the percentage of healthy lungs, affected by ground-glass opacity
# and consolidation, and the amount of fat in patient's body.
cols_series_id = [
"record_id",
"redcap_repeat_instance",
"infer_series_id"
]
df_series_id = pd.read_csv(SERIES_ID_FPATH, sep=";")
df_series_id = df_series_id[cols_series_id]
df_series_id = df_series_id.drop_duplicates()
cols_segmentation = [
"UID_Plataforma",
"series_id",
"seg_consolidacao",
"seg_normal",
"seg_vf1",
"seg_vf2",
"seg_vf3",
"volume_pulmao",
"taxa_gordura",
"volume_gordura",
"mediastino"
]
tmp_data = []
df_seg_raw = pd.read_csv(SEGMENTATION_FPATH)
df_seg_raw = df_seg_raw[cols_segmentation]
df_seg_raw = df_seg_raw[df_seg_raw.volume_pulmao >= 1.]
df_seg_raw = pd.merge(left=df_series_id, right=df_seg_raw, left_on="infer_series_id", right_on="series_id", how="right")
# Each TC study might have multiple series. We need to select the one with
grouped = df_seg_raw.groupby("UID_Plataforma")
for uid_imagem, group in grouped:
if any(group.mediastino):
use_group = group[group.mediastino]
else:
use_group = group
sorted_group = use_group.sort_values("volume_pulmao")
tmp_data.append(
dict(sorted_group.iloc[-1])
)
df_seg = pd.DataFrame(tmp_data)
df_seg = df_seg[df_seg.seg_normal.notna()]
df_image = pd.merge(
left=df_image,
right=df_seg,
how="left",
on=["record_id", "redcap_repeat_instance"]
)
df_image[
["record_id", "redcap_repeat_instance", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"] + cols_image
].to_csv(os.path.join(BASE_DIR, "data", "TC_scans.csv"), index=False)
df_image = df_image.rename({"redcap_repeat_instance": "redcap_repeat_instance_image"})
df_matches = df_image[
(df_image.seg_normal.notna()) & (df_image.data_admissao_hospitalar.notna())
]
df_matches[
["record_id", "data_admissao_hospitalar", "instituicao", "data_imagem", "uid_imagem"]
].to_csv(os.path.join(BASE_DIR, "data", "matches.csv"), index=False)
n_matches = df_matches.uid_imagem.nunique()
print(f"{n_matches} between REDCap and segmentation\n")
# COVID-19 confirmation dataframe
df_confirmation = df[df.redcap_repeat_instrument == "confirmacao_covid_multiplo"]
############################## Finished processing the results data ##############################
# Now we are going to create a dataframe that each row corresponds to a moment in the patient stay at the
# hospital. For each date in the patient history, we will update the row with the latest information about
# that patient.
# First, we need to define some helper functions to work on the processing of the data.
def get_group(grouped, key, default_columns):
"""
Gets a group by key from a Pandas Group By object. If the key does not exist, returns an empty
group with the default columns.
"""
if key in grouped.groups:
group = grouped.get_group(key)
else:
group = pd.DataFrame([], columns=default_columns)
return group
def last_register_before_date(registers, date_col, date, default_columns):
"""
Gets the last register before a reference date in a dataframe. If there are no register before the
date, returns an empty register with the default columns.
"""
registers = registers[registers[date_col].notna()]
registers_before_date = registers[
registers[date_col].apply(parser.parse) <= date
]
if len(registers_before_date) == 0:
registers_before_date = pd.DataFrame([(np.nan for col in default_columns)], columns=default_columns)
last_register = registers_before_date.iloc[-1]
return last_register
# Theb, we need to group by patient all the dataframes we built previously.
grouped_dispneia = df_dispneia.groupby("record_id")
grouped_sofa = df_sofa.groupby("record_id")
grouped_saturacao_o2 = df_saturacao_o2.groupby("record_id")
grouped_saps_3 = df_saps_3.groupby("record_id")
grouped_image = df_image.groupby("record_id")
grouped_laboratory = df_laboratory.groupby("record_id")
grouped_ventilacao = df_ventilacao.groupby("record_id")
grouped_pronacao = df_pronacao.groupby("record_id")
grouped_hemodialise = df_hemodialise.groupby("record_id")
# Now we iterate over the personal data dataframe, which has one row per patient.
after_discharge = []
after_death = []
new_rows = []
for i, row in tqdm(df_personal_data.iterrows(), total=len(df_personal_data)):
record_id = row["record_id"]
institution = row["instituicao"]
hospitalization_date = row["data_admissao_hospitalar"]
discharge_date = row["data_alta_hospitalar"]
date_of_death = row["data_obito"]
if pd.notna(date_of_death):
date_of_death = parser.parse(date_of_death)
if pd.notna(discharge_date):
discharge_date = parser.parse(discharge_date)
if pd.notna(hospitalization_date):
hospitalization_date = parser.parse(hospitalization_date)
# Get each group and sort by the date
group_dispneia = get_group(
grouped_dispneia, record_id, df_dispneia.columns
).sort_values("data_dispneia")
group_sofa = get_group(
grouped_sofa, record_id, df_sofa.columns
)
group_saturacao_o2 = get_group(
grouped_saturacao_o2, record_id, df_saturacao_o2.columns
)
group_saps_3 = get_group(
grouped_saps_3, record_id, df_saps_3.columns
)
group_image = get_group(
grouped_image, record_id, df_image.columns
)
group_laboratory = get_group(
grouped_laboratory, record_id, df_laboratory.columns
)
group_ventilacao = get_group(
grouped_ventilacao, record_id, df_ventilacao.columns
)
group_pronacao = get_group(
grouped_pronacao, record_id, df_pronacao.columns
)
group_hemodialise = get_group(
grouped_hemodialise, record_id, df_hemodialise.columns
)
# List the dates available for the patient
patient_dates = set(filter(
pd.notna,
list(group_dispneia.data_dispneia) +
list(group_sofa.data_sofa) +
list(group_saturacao_o2.data_saturacao_o2) +
list(group_saps_3.data_saps_3) +
list(group_image.data_imagem) +
list(group_laboratory.data_resultados_lab) +
list(group_ventilacao.data_ventilacao) +
list(group_pronacao.data_pronacao) +
list(group_hemodialise.data_hemodialise)
))
patient_dates = funcy.lmap(parser.parse, patient_dates)
# Now we iterate over the dates of the patient retrieving the last register for
# each group.
new_patient_rows = []
for date_tmp in patient_dates:
# If the date is after the patient's death or the patient's discharge, we want to ignore
# the register.
if abs(date_tmp.year - dataset_date.year) > 0:
continue
if pd.notna(date_of_death) and date_tmp > date_of_death:
after_death.append(record_id)
continue
if pd.notna(discharge_date) and date_tmp > discharge_date:
after_discharge.append(discharge_date)
continue
last_register_dispneia = last_register_before_date(group_dispneia, "data_dispneia", date_tmp, df_dispneia.columns)
last_register_sofa = last_register_before_date(group_sofa, "data_sofa", date_tmp, df_sofa.columns)
last_register_saturacao_o2 = last_register_before_date(group_saturacao_o2, "data_saturacao_o2", date_tmp, df_saturacao_o2.columns)
last_register_saps_3 = last_register_before_date(group_saps_3, "data_saps_3", date_tmp, df_saps_3.columns)
last_register_image = last_register_before_date(group_image, "data_imagem", date_tmp, df_image.columns)
last_register_laboratory = last_register_before_date(group_laboratory, "data_resultados_lab", date_tmp, df_laboratory.columns)
last_register_pronacao = last_register_before_date(group_pronacao, "data_pronacao", date_tmp, df_pronacao.columns)
last_register_hemodialise = last_register_before_date(group_hemodialise, "data_hemodialise", date_tmp, df_hemodialise.columns)
# Need for mechanical ventilation is one of our target variables. Thus, we do not want to get the last register before the
# current date. We want to know if the patient ever needed mechanical ventilation at any point in time.
ventilacao = group_ventilacao[group_ventilacao.ventilacao == group_ventilacao.ventilacao.max()].sort_values("data_ventilacao", ascending=False)
if len(ventilacao) == 0:
ventilacao = pd.DataFrame([(np.nan for col in group_ventilacao.columns)], columns=group_ventilacao.columns)
ventilacao = ventilacao.iloc[-1]
new_row = {}
new_row.update(row)
new_row.update(dict(last_register_dispneia))
new_row.update(dict(last_register_sofa))
new_row.update(dict(last_register_saturacao_o2))
new_row.update(dict(last_register_saps_3))
new_row.update(dict(last_register_image))
new_row.update(dict(last_register_laboratory))
new_row.update(dict(last_register_pronacao))
new_row.update(dict(last_register_hemodialise))
new_row.update(dict(ventilacao))
new_row["data"] = date_tmp
new_row["record_id"] = record_id
new_row["instituicao"] = institution
new_row["dias_desde_admissao"] = (date_tmp - hospitalization_date).days if pd.notna(hospitalization_date) else np.nan
date_of_outcome = date_of_death if pd.notna(date_of_death) else discharge_date
new_row["dias_antes_desfecho"] = (date_of_outcome - date_tmp).days if pd.notna(date_of_outcome) else np.nan
new_patient_rows.append(new_row)
new_rows.extend(new_patient_rows)
df_final = pd.DataFrame(new_rows)
# We need to calculate some dummy variables for the categorical data.
padrao_rsna_dummies = pd.get_dummies(df_final.padrao_imagem_rsna, prefix="padrao_rsna")
ventilacao_dummies = | pd.get_dummies(df_final.ventilacao, prefix="ventilacao") | pandas.get_dummies |
import sys
import os
import unittest
import numpy as np
from torch import nn
import pandas as pd
import torch
from sddr.utils.dataset import SddrDataset
from patsy import dmatrix
import statsmodels.api as sm
from sddr.utils import orthogonalize_spline_wrt_non_splines, get_info_from_design_matrix, df2lambda
from sddr.utils.family import Family
from sddr.utils.splines import spline, Spline
from sddr.utils import checkups
from sddr.utils.prepare_data import PrepareData
class TestSddrDataset(unittest.TestCase):
'''
Test SddrDataset for model with a linear part, splines and deep networks using the iris data set.
It is tested
- if get_list_of_feature_names() returns the correct list of feature names of the features from the input dataset.
- if get_feature(feature_name) returns the correct features (shape and value)
- if the structured part and the input to the deep network are in datadict are correct (shape and value)
- if the correct target (shape and value) are returned)
We do not check network_info_dict and dm_info_dict herem as they are tested by Testparse_formulas.
'''
def __init__(self,*args,**kwargs):
super(TestSddrDataset, self).__init__(*args,**kwargs)
#define distribution
self.current_distribution = 'Poisson'
self.family = Family(self.current_distribution)
#define formulas and network shape
formulas = {'rate': '~1 + x1 + x2 + spline(x1, bs="bs",df=9) + spline(x2, bs="bs",df=9)+d1(x1)+d2(x2)'}
self.deep_models_dict = {
'd1': {
'model': nn.Sequential(nn.Linear(1,15)),
'output_shape': 15},
'd2': {
'model': nn.Sequential(nn.Linear(1,3),nn.ReLU(), nn.Linear(3,8)),
'output_shape': 8}
}
self.train_parameters = {
'batch_size': 1000,
'epochs': 2500,
'degrees_of_freedom': {'rate': 4}
}
# load data
self.data_path = '../data/test_data/x.csv'
self.ground_truth_path = '../data/test_data/y.csv'
self.data = pd.read_csv(self.data_path ,sep=None,engine='python')
self.target = pd.read_csv(self.ground_truth_path)
self.true_feature_names = ["x1", "x2", "x3", "x4"]
self.true_x2_11 = np.float32(self.data.x2[11])
self.true_target_11 = self.target.values[11]
# perform checks on given distribution name, parameter names and number of formulas given
self.formulas = checkups(self.family.get_params(), formulas)
self.prepare_data = PrepareData(self.formulas,
self.deep_models_dict,
self.train_parameters['degrees_of_freedom'])
def test_pandasinput(self):
"""
Test if SddrDataset correctly works with a pandas dataframe as input.
"""
# load data
data = pd.concat([self.data, self.target], axis=1, sort=False)
dataset = SddrDataset(data, self.prepare_data, "y")
feature_names = dataset.get_list_of_feature_names()
feature_test_value = dataset.get_feature('x2')[11]
linear_input_test_value = dataset[11]["datadict"]["rate"]["structured"].numpy()[2]
deep_input_test_value = dataset[11]["datadict"]["rate"]["d2"].numpy()[0]
target_test_value = dataset[11]["target"].numpy()
#test if outputs are equal to the true values in the iris dataset
self.assertEqual(feature_names, self.true_feature_names)
self.assertAlmostEqual(feature_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(linear_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(deep_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(target_test_value, self.true_target_11,places=4)
# test shapes of outputs
self.assertEqual(self.true_target_11.shape,target_test_value.shape)
self.assertEqual(self.true_x2_11.shape,linear_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,deep_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,feature_test_value.shape)
def test_pandasinputpandastarget(self):
"""
Test if SddrDataset correctly works with a pandas dataframe as input and target also given as dataframe.
"""
# load data
dataset = SddrDataset(self.data, self.prepare_data, self.target)
feature_names = dataset.get_list_of_feature_names()
feature_test_value = dataset.get_feature('x2')[11]
linear_input_test_value = dataset[11]["datadict"]["rate"]["structured"].numpy()[2]
deep_input_test_value = dataset[11]["datadict"]["rate"]["d2"].numpy()[0]
target_test_value = dataset[11]["target"].numpy()
#test if outputs are equal to the true values in the iris dataset
self.assertEqual(feature_names, self.true_feature_names)
self.assertAlmostEqual(feature_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(linear_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(deep_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(target_test_value, self.true_target_11,places=4)
# test shapes of outputs
self.assertEqual(self.true_target_11.shape,target_test_value.shape)
self.assertEqual(self.true_x2_11.shape,linear_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,deep_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,feature_test_value.shape)
def test_filepathinput(self):
"""
Test if SddrDataset correctly works with file paths as inputs.
"""
# create dataset
dataset = SddrDataset(self.data_path, self.prepare_data, self.ground_truth_path)
feature_names = dataset.get_list_of_feature_names()
feature_test_value = dataset.get_feature('x2')[11]
linear_input_test_value = dataset[11]["datadict"]["rate"]["structured"].numpy()[2]
deep_input_test_value = dataset[11]["datadict"]["rate"]["d2"].numpy()[0]
target_test_value = dataset[11]["target"].numpy()
#test if outputs are equal to the true values in the iris dataset
self.assertEqual(feature_names, self.true_feature_names)
self.assertAlmostEqual(feature_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(linear_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(deep_input_test_value, self.true_x2_11,places=4)
self.assertAlmostEqual(target_test_value, self.true_target_11,places=4)
# test shapes of outputs
self.assertEqual(self.true_target_11.shape,target_test_value.shape)
self.assertEqual(self.true_x2_11.shape,linear_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,deep_input_test_value.shape)
self.assertEqual(self.true_x2_11.shape,feature_test_value.shape)
class TestPrepareData(unittest.TestCase):
'''
Test parse_formulas function for different formulas with the iris dataset.
It is tested (for all parameters of the distribution)
- if in datadict
+ the structured part is correct and has correct shape
+ the inputs for the neural networks is correct (values and shape)
- if in network_info_dict
+ the penatly matrix is correct
+ struct_shape is correct
+ the deep shape is correct
- if in dm_info_dict
+ the correct spline slices are given
+ the correct spline input features are given
- if for smoothing splines:
+ the correct penaly matrix is computed
+ the correct regularization parameter lambda is computed
'''
def __init__(self,*args,**kwargs):
super(TestPrepareData, self).__init__(*args,**kwargs)
# load data
data_path = '../data/test_data/x.csv'
ground_truth_path = '../data/test_data/y.csv'
self.x = | pd.read_csv(data_path, sep=None, engine='python') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = | pd.DataFrame(alpha) | pandas.DataFrame |
from datascience import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.style.use("seaborn-muted")
def find_x_pos(widths):
cumulative_widths = [0]
cumulative_widths.extend(np.cumsum(widths))
half_widths = [i/2 for i in widths]
x_pos = []
for i in range(0, len(half_widths)):
x_pos.append(half_widths[i] + cumulative_widths[i])
return x_pos
def plot_group(selection, ESG_sorted):
selected_group = ESG_sorted.where("Group", selection)
width, height = selected_group.column("Capacity_MW"), selected_group.column("Total_Var_Cost_USDperMWH")
x_vals = find_x_pos(width)
# Make the plot
plt.figure(figsize=(9,6))
plt.bar(x_vals, height, width=width, edgecolor = "black")
# Add title and axis names
plt.title(selection)
plt.xlabel('Capacity_MW')
plt.ylabel('Variable Cost')
plt.show()
def price_calc(demand, sorted_table):
price = 0
sum_cap = 0
for i in range(0,len(sorted_table['Capacity_MW'])):
if sum_cap + sorted_table['Capacity_MW'][i] > demand:
price = sorted_table['Total_Var_Cost_USDperMWH'][i]
break
else:
sum_cap += sorted_table['Capacity_MW'][i]
price = sorted_table['Total_Var_Cost_USDperMWH'][i]
return price
def price_line_plot(price):
plt.axhline(y=price, color='r', linewidth = 2)
print("Price: " + str(price))
def demand_plot(demand):
plt.axvline(x=demand, color='r', linewidth = 2)
print("Capacity: " + str(demand))
def all_groups_with_demand(demand, ESG_sorted):
width = ESG_sorted.column("Capacity_MW")
height = ESG_sorted.column("Total_Var_Cost_USDperMWH")
x_vals = find_x_pos(width)
energy_colors_dict = {}
count = 0
colors = ['#EC5F67', '#F29056', '#F9C863', '#99C794', '#5FB3B3', '#6699CC', '#C594C5']
for i in set(ESG_sorted['Group']):
energy_colors_dict[i] = colors[count]
count += 1
colors_mapped = list(pd.Series(ESG_sorted['Group']).map(energy_colors_dict))
ESG_sorted = ESG_sorted.with_column('Color', colors_mapped)
group_colors = ESG_sorted.group("Group", lambda x: x).select("Group", "Color")
group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color")
price = price_calc(demand, ESG_sorted)
# Make the plot
plt.figure(figsize=(9,6))
plt.bar(x_vals, height, width=width, color=ESG_sorted['Color'], edgecolor = "black")
patches = []
for row in group_colors.rows:
patches += [mpatches.Patch(color=row.item("Color"), label=row.item("Group"))]
plt.legend(handles=patches, bbox_to_anchor=(1.1,1))
plt.title('All Energy Sources')
plt.xlabel('Capacity_MW')
plt.ylabel('Variable Cost')
price_line_plot(price)
demand_plot(demand)
def profit(sorted_table, price):
capacity_subset = sum(sorted_table.where("Total_Var_Cost_USDperMWH", are.below(price))["Capacity_MW"])
revenue = capacity_subset * price
cost = 0
for i in range(len(sorted_table.where("Total_Var_Cost_USDperMWH", are.below(price))["Total_Var_Cost_USDperMWH"])):
cost += sorted_table.where("Total_Var_Cost_USDperMWH", are.below(price))["Total_Var_Cost_USDperMWH"][i]\
* sorted_table.where("Total_Var_Cost_USDperMWH", are.below(price))["Capacity_MW"][i]
return revenue - cost
def calc_profit(selection, demand, ESG_sorted):
price = price_calc(demand, ESG_sorted)
selected_group = ESG_sorted.where("Group", selection)
print("Your profit is ${:.2f}".format(profit(selected_group, price)))
def all_group_bids(demand, hour, sorted_joined_table):
def price_calc(demand, sorted_table):
price = 0
sum_cap = 0
for i in range(0,len(sorted_table['Capacity_MW'])):
if sum_cap + sorted_table['Capacity_MW'][i] > demand:
price = sorted_table['PRICE' + str(hour)][i]
break
else:
sum_cap += sorted_table['Capacity_MW'][i]
price = sorted_table['PRICE' + str(hour)][i]
return price
sorted_joined_table = sorted_joined_table.sort("PRICE" + str(hour))
width = sorted_joined_table.column("Capacity_MW")
height = sorted_joined_table.column('PRICE' + str(hour))
x_vals = find_x_pos(width)
energy_colors_dict = {}
count = 0
colors = ['#EC5F67', '#F29056', '#F9C863', '#99C794', '#5FB3B3', '#6699CC', '#C594C5']
for i in set(sorted_joined_table['Group']):
energy_colors_dict[i] = colors[count]
count += 1
colors_mapped = list(pd.Series(sorted_joined_table['Group']).map(energy_colors_dict))
sorted_joined_table = sorted_joined_table.with_column('Color', colors_mapped)
group_colors = sorted_joined_table.group("Group", lambda x: x).select("Group", "Color")
group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color")
price = price_calc(demand, sorted_joined_table)
# Make the plot
plt.figure(figsize=(9,6))
plt.bar(x_vals, height, width=width, color=sorted_joined_table['Color'], edgecolor = "black")
patches = []
for row in group_colors.rows:
patches += [mpatches.Patch(color=row.item("Color"), label=row.item("Group"))]
plt.legend(handles=patches, bbox_to_anchor=(1.1,1))
plt.title('All Energy Sources')
plt.xlabel('Capacity_MW')
plt.ylabel('Price')
price_line_plot(price)
demand_plot(demand)
def your_portfolio_plot(selection, hour, demand, sorted_joined_table):
def price_calc(demand, sorted_table):
price = 0
sum_cap = 0
for i in range(0,len(sorted_table['Capacity_MW'])):
if sum_cap + sorted_table['Capacity_MW'][i] > demand:
price = sorted_table['PRICE' + str(hour)][i]
break
else:
sum_cap += sorted_table['Capacity_MW'][i]
price = sorted_table['PRICE' + str(hour)][i]
return price
your_source = sorted_joined_table.where("Group", selection)
width_yours = your_source.column("Capacity_MW")
height_yours = your_source.column('PRICE' + str(hour))
height_yours_marginal_cost = your_source.column("Total_Var_Cost_USDperMWH")
new_x_yours = find_x_pos(width_yours)
energy_colors_dict = {}
count = 0
colors = ['#EC5F67', '#F29056', '#F9C863', '#99C794', '#5FB3B3', '#6699CC', '#C594C5']
for i in set(sorted_joined_table['Group']):
energy_colors_dict[i] = colors[count]
count += 1
colors_mapped = list( | pd.Series(sorted_joined_table['Group']) | pandas.Series |
#Modules to install via pip pandas,ipynb
import os
import sys
import time
from lib import trace_classification
sys.path.append('../')
import os
import pandas as pd
import numpy as np
import json
#Modules to install via pip pandas,ipynb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from pprint import pprint
import os
#import import_ipynb
import sys
sys.path.append('../')
from pandas.plotting import scatter_matrix
from lib import trace_analysis
from node import *
import sklearn.metrics as sm
import pandas as pd
import matplotlib.pyplot as plt
import os
from node import *
from lib import plots_analysis
from sklearn.metrics import confusion_matrix
from sklearn.cluster import KMeans
import sklearn.metrics as sm
from sklearn.decomposition import PCA
import random
#Modules to install via pip pandas,ipynb
import sys
sys.path.append('../')
from lib import plots_analysis
from sklearn.cluster import KMeans
import pandas as pd
# scipy
import sklearn.metrics as sm
class node(object):
ip = ""
hop= 0
pkts=pd.DataFrame()
# The class "constructor" - It's actually an initializer
def __init__(self,ip,hop,pkts):
self.ip = ip
self.hop=hop
self.pkts=pkts
def make_node(ip,hop,pkts):
node= node(ip,hop,pkts)
return node
#######
#Plotting Graphs
#####
def saveFileFigures(fig,directory,namefile):
directory=directory+"figures/"
if not os.path.exists(directory):
os.makedirs(directory)
print(directory)
fig.savefig(directory+namefile+".pdf") # save the figure to file
#plt.show()
#Prints on a file the big matrix (asked by professor)
def printBigPlot(directory,data,figsize,namefile,colors,cases):
print("Printing Big Plot for "+directory)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
#print(i,j)
ax=axs[i][j]
d=data[i][j].pkts["rtt"]
ax.set_ylabel("Density")
ax.set_title("Node "+ str(data[i][j].ip) )
ax.set_xlabel("Time (ms)")
if not d.empty | len(d)<2 :
d.plot.kde(
ax=ax,
label="Case " +str(cases[i]),
color=colors[i]
)
d.hist(density=True,alpha=0.3,color=colors[i], ax=ax)
ax.legend()
#ax.set_xlim([-500, 8000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Print on a file density by Hop (asked by professor)
def printDensityByHop(directory,dataHop,hops,figsize,namefile,colors,cases):
print("Printing Density by Hop for "+directory)
#dataHop=hopPreparation(data)
fig, axs= plt.subplots(len(dataHop[0]),1, figsize=(15,20),sharey=True, )
#print(len(dataHop),len(dataHop[0]))
for i in range(len(dataHop)):
for j in range(len(dataHop[i])):
#print(i,j)
d=dataHop[i][j].pkts['rtt']
axs[j].set_xlabel("Time (ms)")
axs[j].set_title("Hop "+ str(j+1))
if not d.empty | len(d)<2 :
d.plot.kde(
ax=axs[j],
label=cases[i],color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
#axs[j].set_xlim([-40, 6000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Print on a file density by Case (asked by professor)
def printDensityByCase(directory,data,hops,figsize,namefile,colors,cases):
print("Printing Density by case for "+directory)
#print(len(data),len(data[0]))
#data1=hopPreparation(data)
dataHopT=[*zip(*hops)]
#print(len(data1),len(data1[0]))
#print(len(dataHopT),len(dataHopT[0]))
fig, axs= plt.subplots(len(dataHopT[0]),1, figsize=(15,20),sharey=True, )
for i in range(len(dataHopT)):
for j in range(len(dataHopT[0])):
d=dataHopT[i][j]
axs[j].set_title(""+ cases[i])
axs[j].set_xlabel("Time (ms)")
axs[j].set_ylabel("Density")
if not d.empty | len(d)<2 :
#print(dataHopT[i][j])
#print(colors[i])
d=d["rtt"]
try:
d.plot.kde(
ax=axs[j],
label="Hop "+str(i),
color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
except:pass
plt.tight_layout()
#axs[j].set_xlim([-40, 6000])
saveFileFigures(fig,directory,namefile)
#Print Density of delay without outliers in every node by Case
def densityOfDelayByCaseNoOutliers(directory,data,figsize,namefile,colors,cases):
print("Printing Density of delay without outliers in every node by Case for "+directory)
fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
out=getStdValues(data[i][j].pkts)
if not out.empty :
ax=axs[j]
out["rtt"].plot.kde(
ax=ax,
label=cases[i],
color=colors[i]
)
ax.set_ylabel("Density")
out["rtt"].hist(density=True,alpha=0.3, ax=ax, color=colors[i])
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Time (ms)")
ax.legend()
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Density of outliers in every node by Case
def densityOutliersByCase(directory,data,figsize,namefile,colors,cases):
print("Printing Density of outliers in every node by Case for "+directory)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
out=getOutliers(data[i][j].pkts)
ax=axs[i][j]
ax.set_ylabel("Density")
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Time (ms)")
if not out.empty | len(out)<2 :
out["rtt"].plot.kde(
ax=ax,
label=cases[i],
color=colors[i]
)
out["rtt"].hist(density=True,alpha=0.3, ax=ax, color=colors[i])
ax.legend()
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Distibution of the delay divided by Node in the differents Cases
def densityOfDelayByCase(directory,data,figsize,namefile,colors,cases):
print("Printing Density of delay in every node by Case for "+directory)
fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
d=data[i][j].pkts["rtt"]
axs[j].set_title("Node "+ str(data[i][j].ip))
axs[j].set_xlabel("Time (ms)")
axs[j].set_ylabel("Density")
if not d.empty | len(d)<2 :
try:
d.plot.kde(
ax=axs[j],
label=cases[i],color=colors[i]
)
d.hist(density=True,alpha=0.3, ax=axs[j],color=colors[i])
axs[j].legend()
except:
pass
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#RTT Graph
def RTTGraph(directory,data,figsize,namefile,colors,cases):
print("Printing RTT Graph for "+directory)
# fig, axs= plt.subplots(len(data[0]),1, figsize=figsize,sharey=True, )
# for i in range(len(data)):
# for j in range(len(data[i])):
# axs[j].plot(data[i][j].pkts["seq"],data[i][j].pkts["rtt"],label=cases[i],color=colors[i] )
# axs[j].set_title("Node "+ str(data[i][j].ip))
# axs[j].set_xlabel("Packet Number")
# axs[j].set_ylabel("Time (ms)")
# axs[j].legend()
# plt.tight_layout()
# saveFileFigures(fig,directory,namefile)
fig, axs= plt.subplots(len(data),len(data[0]), figsize=figsize,sharey=True, )
for i in range(len(data)):
for j in range(len(data[i])):
#print(i,j)
ax=axs[i][j]
d=data[i][j].pkts["rtt"]
ax.set_ylabel("Time (ms)")
ax.set_title("Node "+ str(data[i][j].ip))
ax.set_xlabel("Packet Number")
if not d.empty | len(d)<2 :
ax.plot(data[i][j].pkts["seq"],data[i][j].pkts["rtt"],label=cases[i]
#,color=colors[i]
)
ax.legend()
#ax.set_xlim([-500, 8000])
plt.tight_layout()
saveFileFigures(fig,directory,namefile)
#Not used anymore
def coojaJsonImporter(dir):
dataList=[]
for file in os.listdir(dir):
print("Importing "+ file)
with open(dir+"/" + file, 'r') as f:
dataList.append(json.load(f))
return dataList
###Function to create nodes, create a list of nodes
###
def createNodes(dict):
nodeList=[]
#dfList(pd.DataFrame(dict))
for ip in dict.keys():
pkts=pd.DataFrame(dict[ip]['pkts'])
hop=64-(int(pkts[0:1]["ttl"]))
pkts = pkts.drop(['ttl'], axis=1)
pkts=pkts.rename(columns={"pkt":"seq"})
#print(type(pkts[0:1]["ttl"]))
#print(pkts[0:1]["ttl"])
n=node(ip,hop,pkts)
nodeList.append(n)
return nodeList
def findMissingPackets(node):
#print(node.pkts["pkt"])
print("Executed")
maxP=-1
for el in node.pkts["seq"]:
if(el>maxP): maxP=int(el)
#print(maxP)
pkt=[None]*(maxP+1)
for i in range(len(node.pkts["seq"])):
index=int(node.pkts["seq"][i])
#print(index)
pkt[index]=node.pkts["rtt"][i]
#pkt[)]=node.pkts["pkt"][i]
return pkt
def getIps(list):
ips=[]
for n in list:
ips.append(n.ip)
return ips
def MLPreparation(data):
# Calculate all the statistics
statistics = {} # <node_id, statistics of the node>
for network in data:
for node in network:
print(node.pkts["rtt"].describe())
def getOutliers(df):
df1=df["rtt"]
std=df1.std()
mean=df1.mean()
a1=df["rtt"]>mean+(2*std)
a2=df["rtt"]<mean-(2*std)
return(df[a1 | a2])
def get_IQR_Outliers(df):
df1 = df["rtt"]
lower = df1.quantile(.25)
upper = df1.quantile(.75)
a1 = df["rtt"]>upper
a2 = df["rtt"]<lower
return(df[a1 | a2])
def getStdValues(df):
df1=df["rtt"]
std=df1.std()
mean=df1.mean()
a1=df["rtt"]<mean+(2*std)
a2=df["rtt"]>mean-(2*std)
return(df[a1 & a2])
def getPings(data):
pings=[]
for i in range(len(data)):
packetN=-1
for j in range(len(data[i])):
if(len(data[i][j].pkts)>packetN): packetN=len(data[i][j].pkts)
pings.append(packetN)
return pings
#Prepare the hop data
def hopPreparation(data):
hoplist=[]
df_a = pd.DataFrame( )
dataHop=[]
listoflists = []
#print("Hop Preparation")
#print(len(data),len(data[0]))
maxHopCase=[]
for i in range(len(data)):
maxHop=-1
for j in range(len(data[i])):
if(data[i][j].hop>maxHop):
maxHop=data[i][j].hop
maxHopCase.append(maxHop)
#print(maxHopCase)
for i in range(len(data)):
sublist = []
for j in range(maxHopCase[i]):
sublist.append((df_a))
dataHop.append(sublist)
#print (listoflists)
for i in range(len(data)):
col=[]
for j in range(len(data[i])):
hop=data[i][j].hop-1
dataHop[i][hop]= pd.concat([dataHop[i][hop],data[i][j].pkts],sort=True)
#print(len(dataHop),len(dataHop[0]))
return dataHop
def getPercentageMissingPackets(node,lenght):
missing=0
#print(len(node.pkts))
missing=lenght-len(node)
#print(lenght,missing)
if(missing!=0):
result=missing/lenght
else: result=0
#print(maxS/missing)
return result*100
def accuracy_score_corrected(correction,labels):
#print(np.array(correction))
labels_alt=[]
sum_labels=0
sum_labels_alt=0
for el in labels:
if (el==0):
labels_alt.append(1)
sum_labels_alt+=1
elif el==1:
labels_alt.append(0)
sum_labels+=1
accuracy=sm.accuracy_score(correction, labels)
accuracy_alt=sm.accuracy_score(correction, labels_alt)
#print(correction)
if (sum_labels>sum_labels_alt):
#print(accuracy)
None
else:
#print(accuracy_alt)
labels=labels_alt
#print(np.array(labels))
confusionMatrix=sm.confusion_matrix(correction, labels)
#pprint(confusionMatrix)
return labels
def ReplaceMissingPackets(node):
#print(node.pkts["pkt"])
print("Executed")
maxP=-1
for el in node.pkts["seq"]:
if(el>maxP): maxP=int(el)
#print(maxP)
pkt=[None]*(maxP+1)
for i in range(len(node.pkts["seq"])):
index=int(node.pkts["seq"][i])
#print(index)
pkt[index]=node.pkts["rtt"][i]
#pkt[)]=node.pkts["pkt"][i]
return pkt
#Import from pings files to a dataframe
def import_nodes_Cooja_2(directory,tracemask,node_defaults):
#print(directory)
#print(tracemask)
files = []
# load all files and extract IPs of nodes
for file in os.listdir(directory):
try:
if file.startswith(tracemask) and file.index("routes"):
continue
except:
files.append(file)
nodes = | pd.DataFrame(columns=['node_id', 'rank']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
compare_models.py <baseline> <other_method>... [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph] [--top-k=<top_features>]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import os
import glob
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.set_context("paper")
from models.models_utils import generate, stz_zero, get_important_pages
def correct_name(value):
if value == "new_data" or value == "old_data":
return "Categories (Pageviews+Pagecounts)"
elif value == "cyclerank":
return "CycleRank (Pageviews+Pagecounts)"
elif value == "pageviews":
return "Categories (Pageviews)"
elif value == "cyclerank_pageviews":
return "CycleRank (Pageviews)"
elif value == "pagerank":
return "PageRank (Pageviews+Pagecounts)"
else:
return "PageRank (Pageviews)"
def get_results_filename(basepath, country):
files = [f for f in glob.glob(basepath + "/*_information_{}.csv".format(country), recursive=True)]
season_years = os.path.basename(files[0]).split("_")[0]
return season_years
def generate_dictionary(f, model):
result = dict()
unique_years = results.season.unique()
for y in unique_years:
f_tmp = f[f.season == y]
for index, row in f_tmp.iterrows():
page_name = str(row["page_name_"+model])
weigth = float(row["value_"+model])
if page_name in result:
result[page_name].append(weigth)
else:
result[page_name] = [weigth]
return result
if __name__ == "__main__":
# Read the command line arguments
args = docopt(__doc__)
# Read some config variables
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
top_features = int(args["--top-k"]) if args["--top-k"] else 5
# Get keywords coming from the various methods
print("")
keywords_standard = pd.read_csv(os.path.join("../data/keywords", "keywords_{}.txt".format(country)), header=None, names=["page_name"])
print("Standard keywords Size: {}".format(len(keywords_standard)))
# Get keywords coming from the various methods
keywords_cyclerank = pd.read_csv(os.path.join("../data/keywords", "keywords_cyclerank_{}.txt".format(country)), header=None, names=["page_name"])
print("Cyclerank keywords Size: {}".format(len(keywords_cyclerank)))
common_keywords = set.intersection(set(keywords_standard.page_name), set(keywords_cyclerank.page_name))
print("Common keywords Size: {}, {}, {}".format(len(common_keywords), len(common_keywords)/len(keywords_standard), len(common_keywords)/len(keywords_cyclerank)))
print("")
# Read the baseline results and merge them
baseline_results_path= os.path.join(base_dir, args["<baseline>"], future, country)
season_years = get_results_filename(baseline_results_path, country)
season_years_baseline = season_years
baseline_result_file = os.path.join(baseline_results_path, "{}_features_{}.csv".format(season_years, country))
baseline_results_df = pd.read_csv(baseline_result_file)[["season", "page_name", "value"]].rename(columns={"page_name": "page_name_{}".format(args["<baseline>"]), "value":"value_{}".format(args["<baseline>"])})
# Concat all the other results
other_results_df = None
for other_results in args["<other_method>"]:
other_results_path = os.path.join(base_dir, other_results, future, country)
season_years = get_results_filename(other_results_path, country)
other_result_file = os.path.join(other_results_path, "{}_features_{}.csv".format(season_years, country))
if other_results_df is None:
other_results_df = pd.read_csv(other_result_file)[["season", "page_name", "value"]]
other_results_df = other_results_df.rename(columns={"page_name": "page_name_{}".format(other_results), "value":"value_{}".format(other_results)})
else:
current_other_results_df = | pd.read_csv(other_result_file) | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import pickle
import json
import pandas as pd
import numpy as np
from pathlib import Path
from process_functions import adjust_names, aggregate_countries, moving_average, write_log
from pickle_functions import picklify, unpicklify
######################################
# Retrieve data
######################################
# Paths
path_UN = Path.cwd() / 'input' / 'world_population_2020.csv'
path_confirmed = Path.cwd() / 'input' / 'df_confirmed.csv'
path_deaths = Path.cwd() / 'input' / 'df_deaths.csv'
path_policy = Path.cwd() / 'input' / 'df_policy.csv'
#path_geo = Path.cwd() / 'input'/ 'countries.geojson'
# get data directly from github. The data source provided by Johns Hopkins University.
url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
url_deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
url_policy = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
#df.to_csv(r'C:/Users/John\Desktop/export_dataframe.csv', index = None)
pop = pd.read_csv(path_UN)
#load old data
df_confirmed_backup = pd.read_csv(path_confirmed)
old_df_confirmed = df_confirmed_backup[['Province/State','Country/Region']]
df_deaths_backup = pd.read_csv(path_deaths)
old_df_deaths = df_deaths_backup[['Province/State','Country/Region']]
df_policy_backup = pd.read_csv(path_policy)
old_names_df_policy = set(df_policy_backup['CountryName'])
old_dates_df_policy = set(df_policy_backup['Date'])
#load new data
df_confirmed = pd.read_csv(url_confirmed, error_bad_lines=False)
new_df_confirmed = df_confirmed[['Province/State','Country/Region']]
df_deaths = pd.read_csv(url_deaths, error_bad_lines=False)
new_df_deaths = df_confirmed[['Province/State','Country/Region']]
df_policy = pd.read_csv(url_policy, error_bad_lines=False)
new_names_df_policy = set(df_policy['CountryName'])
new_dates_df_policy = set(df_policy['Date'])
#compute difference of rows and columns
confirmed_country_diff = new_df_confirmed[~new_df_confirmed.apply(tuple,1).isin(old_df_confirmed.apply(tuple,1))]
confirmed_date_diff = set(df_confirmed.columns).symmetric_difference(set(df_confirmed_backup.columns))
deaths_country_diff = new_df_deaths[~new_df_deaths.apply(tuple,1).isin(old_df_deaths.apply(tuple,1))]
deaths_date_diff = set(df_deaths.columns).symmetric_difference(set(df_deaths_backup.columns))
policy_country_diff = new_names_df_policy.symmetric_difference(old_names_df_policy)
policy_date_diff = new_dates_df_policy.symmetric_difference(old_dates_df_policy)
#write log and load the backup df if there are new countries until the next update
#for confirmed
write_log('--- confirmed cases file check'.upper())
if confirmed_country_diff.empty:
write_log('no new countries added')
else:
write_log('new countries added:\n' + str(confirmed_country_diff))
#df_confirmed = df_confirmed_backup
if len(confirmed_date_diff) > 1:
write_log('multiple new dates added: ' + str(confirmed_date_diff))
elif len(confirmed_date_diff) == 1:
write_log('new date added: ' + str(confirmed_date_diff))
else:
write_log('no new date added')
#for deaths
write_log('--- deaths file check'.upper())
if deaths_country_diff.empty:
write_log('no new countries added')
else:
write_log('new countries added:\n' + str(deaths_country_diff))
#df_deaths = df_deaths_backup
if len(deaths_date_diff) > 1:
write_log('multiple new dates added: ' + str(deaths_date_diff))
elif len(deaths_date_diff) == 1:
write_log('new date added: ' + str(deaths_date_diff))
else:
write_log('no new date added')
#for policy
write_log('--- policy file check'.upper())
if not bool(policy_country_diff):
write_log('no new countries added')
else:
write_log('new countries added:\n' + str(policy_country_diff))
#df_policy = df_policy_backup
if len(policy_date_diff) > 1:
write_log('multiple new dates added: ' + str(policy_date_diff))
elif len(policy_date_diff) == 1:
write_log('new date added: ' + str(policy_date_diff))
else:
write_log('no new date added')
df_confirmed.to_csv(path_confirmed, index = None)
df_deaths.to_csv(path_deaths, index = None)
df_policy.to_csv(path_policy, index = None)
#########################################################################################
# Data preprocessing for getting useful data and shaping data compatible to plotly plot
#########################################################################################
# List of EU28 countries
eu28 = ['Austria', 'Italy', 'Belgium', 'Latvia', 'Bulgaria', 'Lithuania', 'Croatia', 'Luxembourg',
'Cyprus', 'Czech Republic', 'Malta', 'Netherlands', 'Denmark', 'Poland', 'Estonia', 'Portugal', 'Finland', 'Romania',
'France', 'Slovakia', 'Germany', 'Slovenia', 'Greece', 'Spain', 'Hungary', 'Sweden', 'Ireland', 'United Kingdom']
#filter the countries' names to fit our list of names
df_confirmed = adjust_names(df_confirmed.copy())
df_deaths = adjust_names(df_deaths.copy())
df_confirmed = aggregate_countries(df_confirmed.copy(), graph = 'scatter')
df_deaths = aggregate_countries(df_deaths.copy(), graph = 'scatter')
# Create a dataframe for the world with the date as columns, keep the Province/State column to rename it below
df_world = df_confirmed[0:0].drop(columns = ['Country/Region', 'Lat', 'Long']).copy()
# Create dataframes for EU28 for each variable
df_EU28_confirmed = df_confirmed.set_index('Country/Region').loc[eu28].copy()
df_EU28_confirmed = df_EU28_confirmed.drop(columns = ['Lat', 'Long'])
df_EU28_deaths = df_deaths.set_index('Country/Region').loc[eu28].copy()
df_EU28_deaths = df_EU28_deaths.drop(columns = ['Lat', 'Long'])
# Sum variables to get aggregate EU28 values
df_confirmed_EU28 = df_EU28_confirmed.reset_index().drop(columns = ['Country/Region']).iloc[:, :].sum(axis=0)
df_deaths_EU28 = df_EU28_deaths.reset_index().drop(columns = ['Country/Region']).iloc[:, :].sum(axis=0)
# Drop columns
df_EU28 = df_EU28_confirmed[0:0].reset_index().drop(columns = ['Country/Region']).copy()
# Total cases
df_confirmed_total = df_confirmed.drop(columns = ['Country/Region', 'Lat', 'Long']).iloc[:, :].sum(axis=0)
df_deaths_total = df_deaths.drop(columns = ['Country/Region', 'Lat', 'Long']).iloc[:, :].sum(axis=0)
# Add the rows to the world dataframe by date
df_world = df_world.append([df_confirmed_total, df_deaths_total] , ignore_index=True)
df_EU28 = df_EU28.append([df_confirmed_EU28, df_deaths_EU28] , ignore_index=True)
#add a column to explicitly define the the row for confirmed cases and for deaths
df_EU28.insert(loc=0, column='cases', value=['confirmed', 'deaths'])
df_world.insert(loc=0, column='cases', value=['confirmed', 'deaths'])
# Compute the increment from the previous day for the latest available data
daily_confirmed_world = df_world.iloc[0, -1] - df_world.iloc[0, -2]
daily_deaths_world = df_world.iloc[1, -1] - df_world.iloc[1, -2]
daily_confirmed_EU28 = df_EU28.iloc[0, -1] - df_EU28.iloc[0, -2]
daily_deaths_EU28 = df_EU28.iloc[1, -1] - df_EU28.iloc[1, -2]
# Recreate required columns for map data
map_data = df_confirmed[["Country/Region", "Lat", "Long"]]
map_data['Confirmed'] = df_confirmed.loc[:, df_confirmed.columns[-1]]
map_data['Deaths'] = df_deaths.loc[:, df_deaths.columns[-1]]
#aggregate the data of countries divided in provinces
map_data = aggregate_countries(map_data , graph = 'map')
#adjust some names of countries in the population dataframe
pop = pop[['name', 'pop2019']]
pop['pop2019'] = pop['pop2019'] * 1000
pop.at[pop['name'] == 'United States','name'] = 'United States of America'
pop.at[pop['name'] == 'Ivory Coast','name'] = "Cote d'Ivoire"
pop.at[pop['name'] == 'Republic of the Congo','name'] = "Republic of Congo"
pop.at[pop['name'] == 'DR Congo','name'] = "Democratic Republic of the Congo"
pop.at[pop['name'] == 'Timor-Leste','name'] = "East Timor"
pop.at[pop['name'] == 'Vatican City','name'] = "Holy See"
pop.at[pop['name'] == 'Macedonia','name'] = "North Macedonia"
pop.at[pop['name'] == '<NAME>','name'] = "<NAME>"
pop.at[pop['name'] == '<NAME>','name'] = "<NAME>"
temp_pop_names = list(pop['name'])
#create a list with the names of countries in the cases df not present in the population df
not_matched_countries = []
for i in list(df_confirmed['Country/Region'].unique()):
if i not in temp_pop_names:
not_matched_countries.append(i)
#add the total world and eu28 population to the population df
world_population = pop.drop(columns = ['name']).iloc[:, :].sum(axis=0)
pop_EU28 = pop.set_index('name').loc[eu28].copy()
EU28_population = pop_EU28.reset_index().drop(columns = ['name']).iloc[:, :].sum(axis=0)
pop = pop.set_index('name')
pop_t = pop.T.astype(int)
pop_t['World'] = int(world_population)
pop_t['EU28'] = int(EU28_population)
#last 24 hours increase
#map_data['Deaths_24hr']=df_deaths.iloc[:,-1] - df_deaths.iloc[:,-2]
#map_data['Confirmed_24hr']=df_confirmed.iloc[:,-1] - df_confirmed.iloc[:,-2]
#map_data.sort_values(by='Confirmed', ascending=False, inplace=True)
#create utility df transposed without lat and lon
df_confirmed_t=df_confirmed.drop(['Lat','Long'],axis=1).T
df_deaths_t=df_deaths.drop(['Lat','Long'],axis=1).T
df_confirmed_t.columns = df_confirmed_t.iloc[0]
df_confirmed_t = df_confirmed_t.iloc[1:]
df_deaths_t.columns = df_deaths_t.iloc[0]
df_deaths_t = df_deaths_t.iloc[1:]
df_world_t = df_world.T
df_world_t.columns = df_world_t.iloc[0]
df_world_t = df_world_t.iloc[1:]
df_EU28_t = df_EU28.T
df_EU28_t.columns = df_EU28_t.iloc[0]
df_EU28_t = df_EU28_t.iloc[1:]
# Remove countries for which we lack population data from the UN
df_confirmed_t = df_confirmed_t.drop(not_matched_countries, axis = 1)
df_deaths_t = df_deaths_t.drop(not_matched_countries, axis = 1)
# Set the countries available as choices in the dropdown menu
available_indicators = ['World', 'EU28']
for i in list(df_confirmed_t):
available_indicators.append(i)
df_confirmed_t.index=pd.to_datetime(df_confirmed_t.index)
df_deaths_t.index=pd.to_datetime(df_confirmed_t.index)
df_world_t.index = | pd.to_datetime(df_world_t.index) | pandas.to_datetime |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, | pd.Timestamp("2015-02-10") | pandas.Timestamp |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
with pytest.raises(TypeError):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
assert_series_equal(result, expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pandas-dev/pandas/issues/5284
with pytest.raises(TypeError):
d.__and__(s, axis='columns')
with pytest.raises(TypeError):
s & d
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
@pytest.mark.parametrize('op', [
operator.and_,
operator.or_,
operator.xor,
])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
dtype=bool)
result = op(ser, idx2)
assert_series_equal(result, expected)
@pytest.mark.parametrize("op, expected", [
(ops.rand_, pd.Index([False, True])),
(ops.ror_, pd.Index([False, True])),
(ops.rxor, pd.Index([])),
])
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
with pytest.raises(TypeError):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
with pytest.raises(TypeError):
t & v
def test_logical_ops_df_compat(self):
# GH#1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 & s2, exp)
assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 & s4, exp)
assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
class TestSeriesComparisons(object):
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
assert_series_equal(s == s2, exp)
assert_series_equal(s2 == s, exp)
def test_categorical_comparisons(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
assert not (a == 'a').all()
assert ((a != 'a') == ~(a == 'a')).all()
assert not ('a' == a).all()
assert (a == 'a')[0]
assert ('a' == a)[0]
assert not ('a' != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert ((~(a == e) == (a != e)).all())
assert ((~(e == a) == (e != a)).all())
assert ((~(a == f) == (a != f)).all())
assert ((~(f == a) == (f != a)).all())
# non-equality is not comparable
with pytest.raises(TypeError):
a < b
with pytest.raises(TypeError):
b < a
with pytest.raises(TypeError):
a > b
with pytest.raises(TypeError):
b > a
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
with pytest.raises(TypeError):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
with pytest.raises(TypeError):
cat < "d"
with pytest.raises(TypeError):
cat > "d"
with pytest.raises(TypeError):
"d" < cat
with pytest.raises(TypeError):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
@pytest.mark.parametrize('pair', [
([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')],
[NaT, NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')],
[NaT, NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), NaT,
pd.Period('2011-03', freq='M')],
[NaT, NaT, pd.Period('2011-03', freq='M')]),
])
@pytest.mark.parametrize('reverse', [True, False])
@pytest.mark.parametrize('box', [Series, Index])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons(self, dtype, box, reverse, pair):
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
left < right
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
class TestSeriesFlexComparisonOps(object):
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
assert_series_equal(left.gt(right, fill_value=0), exp)
class TestSeriesOperators(TestData):
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
assert np.isnan(result).all()
result = empty + Series([], index=Index([]))
assert len(result) == 0
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
tm.assert_series_equal(added[:-5], expected)
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
@pytest.mark.parametrize('op, equiv_op, fv', pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = | isna(a) | pandas.isna |
"""
Author: <NAME> (<EMAIL>)
Date: 2020-02-10
-----Description-----
This script provides a class and set of functions for bringing CSPP science variables into Python memory.
This is set up for recovered_cspp streams, but should also work for telemetered data.
Note that CTD, DOSTA, SPKIR, PAR, and VELPT are the only data sets that are telemetered. OPTAA and NUTNR data
packets are too large to transfer in a short surface window.
There are three general functions and one function for each CSPP data stream.
To make multiple data requests, submit each request before checking to see if the data is available.
-----Required Libraries-----
requests: For issuing and checking request status.
re: For parsing returned json for URLs that contain instrument data.
time: For pausing the script while checking a data request status.
pandas: For organizing data.
xarray: For opening remote NetCDFs.
-----Class-----
OOIM2M() <<< This is the overall class. This must prepend a function.
Example 1: url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time)
request = OOIM2M.make_request(url,user,token)
nc = OOIM2M.get_location(request)
Example 2: THIS_EXAMPLE_IS_TOO_LONG = OOIM2M()
url = THIS_EXAMPLE_IS_TOO_LONG.create_url(url)
request = THIS_EXAMPLE_IS_TOO_LONG.make_request(url,user,token)
nc = THIS_EXAMPLE_IS_TOO_LONG.get_location(request)
-----General Functions-----
url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time) <<< Function for generating a request URL for data between two datetimes. Returns a complete request URL. URL is the base request url for the data you want. Dates in YYYY-MM-DD. Times in HH:MM:SS.
request = OOIM2M.make_request(url,user,token) <<< Function for making the request from the URL created from create_url. User and token are found in your account information on OOINet. Returns a requests object.
nc = OOIM2M.get_location(request) <<< Function that gathers the remote locations of the requested data. Returns a list of URLs where the data is stored as netCDFs. This list includes data that is used in the creation of data products. Example: CTD data accompanies DOSTA data.
-----Instrument Functions-----
ctd = cspp_ctd(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, salinity, and density.
dosta = cspp_dosta(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, concentration, and estimated saturation. CTD data is also made available.
flort = cspp_flort(nc) <<< Returns pandas dataframe that contains datetime, pressure, chlorophyll-a, cdom, and optical backscatter.
nutnr = cspp_nutnr(nc) <<< Interpolates pressure for nitrate data using time and CTD pressure. Returns a pandas dataframe that contains datetime, pressure, and nitrate.
par = cspp_parad(nc) <<< Returns a pandas dataframe that contains datetime, pressure, bulk photosynthetically active radiation.
velpt = cspp_velpt(nc) <<< Returns a pandas dataframe that contains datetime, pressure, northward velocity, eastward velocity, upward velocity, heading, pitch, roll, soundspeed, and temperature measured by the aquadopp.
batt1, batt2 = cspp_batts(nc) <<< Returns two pandas dataframes that contain datetime and voltage for each CSPP battery.
compass = cspp_cpass(nc) <<< Returns a pandas dataframe that contains datetime, pressure, heading, pitch, and roll from the control can.
sbe50 = cspp_sbe50(nc) <<< Returns a pandas dataframe that contains datetime, pressure, and profiler velocity calculated from the SBE50 in the control can.
winch = cspp_winch(nc) <<< Returns a pandas dataframe that contains datetime, pressure, internal temperature of the winch, current seen by the winch, voltage seen by the winch, and the rope on the winch drum.
cspp_spkir(nc) <<< Under development.
cspp_optaa(nc) <<< Under development.
-----Extra Functions-----
find_site(nc) <<< Function that identifies the requested CSPP site and standard depth of that site. Used in removing bad pressure data. Called by data functions. Not generally called by the user.
-----Notes/Issues-----
Flort_sample is the stream name for CSPP fluorometer data.
However, when requests are made for this stream, only deployments 5 and greater are returned.
For deployments 1-4, the current stream is flort_dj_cspp_instrument_recovered.
OOI personnel are working to make flort_sample the stream that contains all data from all deployments.
NUTNR data does not have pressure data associated with it in the raw files produces by the CSPP.
The function provided in this script interpolates based on time.
Alternatively, the user can call the int_ctd_pressure variable.
The cspp_optaa function is in the works.
OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
"""
import requests, re, time, pandas as pd, numpy as np, xarray as xr
#CE01ISSP URLs
CE01ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE01ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE01ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE01ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE01ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE01ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE01ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE01ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE01ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE01ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE01ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE01ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE02SHSP URLs
CE02SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE02SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE02SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE02SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE02SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE02SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE02SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE02SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE02SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE02SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE02SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE02SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE06ISSP URLs
CE06ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE06ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE06ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE06ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE06ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE06ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE06ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE06ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE06ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE06ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE06ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE06ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE07SHSP URLs
CE07SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE07SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE07SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE07SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE07SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE07SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE07SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE07SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE07SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE07SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE07SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE07SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
class OOIM2M():
def __init__(self):
return
def create_url(url,start_date = '2014-04-04',start_time = '00:00:00',stop_date = '2035-12-31',stop_time = '23:59:59'): #Create a request URL.
timestring = "?beginDT=" + start_date + 'T' + start_time + ".000Z&endDT=" + stop_date + 'T' + stop_time + '.999Z' #Get the timespan into an OOI M2M format.
m2m_url = url + timestring #Combine the partial URL with the timespan to get a full url.
return m2m_url
def make_request(m2m_url, user ='OOIAPI-BCJPAYP2KUVXFX', token = '<KEY>O'): #Request data from UFRAME using the generated request URL.
request = requests.get(m2m_url,auth = (user,token))
if request.status_code == requests.codes.ok: #If the response is 200, then continue.
print('Request successful.')
return request
elif request.status_code == requests.codes.bad: #If the response is 400, then issue a warning to force the user to find an issue.
print(request)
print('Bad request. Check request URL, user, and token.')
return
elif request.status_code == requests.codes.not_found: #If the response is 404, there might not be data during the prescribed time period.
print(request)
print('Not found. There may be no data available during the requested time period.')
return
else: #If an error that is unusual is thrown, show this message.
print(request)
print('Unanticipated error code. Look up error code here: https://github.com/psf/requests/blob/master/requests/status_codes.py')
return
def get_location(request): #Check the status of the data request and return the remote location when complete.
data = request.json() #Return the request information as a json.
check = data['allURLs'][1] + '/status.txt' #Make a checker.
for i in range(60*30): #Given roughly half an hour...
r = requests.get(check) #check the request.
if r.status_code == requests.codes.ok: #If everything is okay.
print('Request complete.') #Print this message.
break
else:
print('Checking request...',end = " ")
print(i)
time.sleep(1) #If the request isn't complete, wait 1 second before checking again.
print("")
data_url = data['allURLs'][0] #This webpage provides all URLs for the request.
data_urls= requests.get(data_url).text #Convert the page to text.
data_nc = re.findall(r'(ooi/.*?.nc)',data_urls) #Find netCDF urls in the text.
for j in data_nc:
if j.endswith('.nc') == False: #If the URL does not end in .nc, toss it.
data_nc.remove(j)
for j in data_nc:
try:
float(j[-4]) == True #If the 4th to last value isn't a number, then toss it.
except:
data_nc.remove(j)
thredds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC/' #This is the base url for remote data access.
fill = '#fillmismatch' #Applying fill mismatch prevents issues.
data_nc = np.char.add(thredds_url,data_nc) #Combine the thredds_url and the netCDF urls.
nc = np.char.add(data_nc,fill) #Append the fill.
return nc
def find_site(nc): #Function for finding the requested site and setting the standard depth.
df = pd.DataFrame(data = {'location':nc}) #Put the remote location in a dataframe.
url = df['location'].iloc[0] #Take the first URL...
banana = url.split("-") #Split it by the dashes.
site = banana[1] #The value in the second location is the site.
if site == 'CE01ISSP': #If the site is..
depth = 25 #This is the standard deployment depth.
elif site == 'CE02SHSP':
depth = 80
elif site == 'CE06ISSP':
depth = 29
elif site == 'CE07SHSP':
depth = 87
else:
depth = 87
return site,depth #Return the site and depth for use later.
def cspp_ctd(nc):
site,depth = OOIM2M.find_site(nc)
data = pd.DataFrame() #Create a placeholder dataframe.
for remote in nc: #For each remote netcdf location
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull the following variables.
'pressure':dataset['pressure'],
'temperature':dataset['temperature'],
'salinity':dataset['salinity'],
'density':dataset['density'],
'conductivity':dataset['conductivity']})
d = pd.DataFrame(data = d) #Put the variables in a dataframe.
data = pd.concat([data,d]) #Concatenate the new dataframe with the old dataframe.
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.temperature > 0]
data = data[data.salinity > 2]
data = data[data.salinity < 42]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('CTD data for ' + site + ' available.')
print('CTD datetime in UTC.')
print('CTD pressure in dbars.')
print('CTD temperature in degC.')
print('CTD salinity in PSU.')
print('CTD density in kg m^-3.')
print('CTD conductivity in S m^-1.')
return data
def cspp_dosta(nc):
site,depth = OOIM2M.find_site(nc) #Determine the CSPP site and standard depth.
dfnc = pd.DataFrame(data = {'location':nc}) #The returned NetCDFs contain both DOSTA and CTDPF files.
dosta = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the DOSTA files. (Files that do not (~) contain "cspp-ctdpf_j_cspp_instrument".)
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the CTD file. CTD data accompanies DOSTA data because it is used in the computation of data products.
data = pd.DataFrame()
for remote in dosta['location']: #For each DOSTA remote location.
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull out these variables.
'pressure':dataset['pressure_depth'],
'temperature':dataset['optode_temperature'],
'concentration':dataset['dissolved_oxygen'],
'estimated_saturation':dataset['estimated_oxygen_saturation']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d]) #Concatenate it with the previous loop.
data = data[data.pressure < depth] #Remove bad values.
data = data[data.pressure > 0]
data = data[data.estimated_saturation > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('DOSTA data for ' + site + ' available.')
print('DOSTA datetime in UTC.')
print('DOSTA pressure in dbars.')
print('DOSTA temperature in degC.')
print('DOSTA concentration in umol kg^-1.')
print('DOSTA estimated_saturation in %.')
return data
def cspp_flort(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
flort = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in flort['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['time'],
'pressure':dataset['pressure_depth'],
'chla':dataset['fluorometric_chlorophyll_a'],
'cdom':dataset['fluorometric_cdom'],
'obs':dataset['optical_backscatter']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.chla > 0]
data = data[data.cdom > 0]
data = data[data.obs > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('FLORT data for ' + site + ' available.')
print('FLORT datetime in UTC.')
print('FLORT pressure in dbars.')
print('FLORT chl in ug L^-1.')
print('FLORT cdom in ppb.')
print('FLORT obs in m^-1.')
return data
def cspp_par(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
par = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in par['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'par':dataset['parad_j_par_counts_output']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad pressures.
data = data[data.pressure > 0]
data = data[data.par > 0] #Remove obviously bad values.
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('PAR data for ' + site + ' available.')
print('PAR datetime in UTC.')
print('PAR pressure in dbars.')
print('PAR par in umol photons m^-2 s^-1.')
return data
def cspp_velpt(nc):
# OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
# https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
# The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
velpt = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in velpt['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'northward':dataset['velpt_j_northward_velocity'],
'eastward':dataset['velpt_j_eastward_velocity'],
'upward':dataset['velpt_j_upward_velocity'],
'heading':dataset['heading'],
'pitch':dataset['pitch'],
'roll':dataset['roll'],
'soundspeed':dataset['speed_of_sound'],
'temperature':dataset['temperature']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data.northward = data.northward * 1000
data.eastward = data.eastward * 1000
data.upward = data.upward *1000
data = data[data.roll < 90]
data = data[data.roll > -90]
data = data[data.pitch < 90]
data = data[data.pitch > -90]
data = data[data.heading < 360]
data = data[data.heading > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('VELPT data for ' + site + ' available.')
print('VELPT datetime in UTC.')
print('VELPT pressure in dbars.')
print('VELPT northward, eastward, and upward in m s^-1.')
print('VELPT heading, pitch, roll in degrees.')
print('VELPT sounds speed in m s^-1.')
print('VELPT temperature in degC.')
return data
def cspp_batts(nc): #Returns two dataframes, one for each battery.
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
batt = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in batt['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'voltage':dataset['battery_voltage_flt32'],
'battery_position':dataset['battery_number_uint8']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
batt1 = data.loc[data['battery_position'].astype('str').str.contains('1.0')]
batt2 = data.loc[data['battery_position'].astype('str').str.contains('2.0')]
batt1 = batt1.reset_index(drop=True)
batt2 = batt2.reset_index(drop=True)
print('Battery data for ' + site + ' available.')
print('Battery datetime in UTC.')
print('Battery voltage in volts.')
return batt1,batt2
def cspp_cpass(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
hmr = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in hmr['location']:
dataset = xr.open_dataset(remote)
d =({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'heading':dataset['heading'],
'pitch':dataset['pitch'],
'roll':dataset['roll']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('Compass data for ' + site + ' available.')
print('Compass datetime in UTC.')
print('Compass pressure in dbars.')
print('Compass heading, pitch, and roll in degrees.')
return data
def cspp_sbe50(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
sbe50 = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in sbe50['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'velocity':dataset['velocity_flt32']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('SBE50 data for ' + site + ' available.')
print('SBE50 datetime in UTC.')
print('SBE50 pressure in dbars.')
print('SBE50 velocity in m s^-1.')
return data
def cspp_winch(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
winch = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in winch['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'wm_temp':dataset['temperature'],
'wm_current':dataset['current_flt32'],
'wm_voltage':dataset['voltage_flt32'],
'rope_on_drum':dataset['rope_on_drum']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data.dropna()
data = data.sort_values('datetime')
data = data.reset_index(drop = True)
print('Winch data for ' + site + ' available.')
print('WM datetime in UTC.')
print('WM pressure in dbars.')
print('WM wm_temp in degC.')
print('WM wm_current in amps.')
print('WM wm_voltage in volts.')
print('WM rope_on_drum in meters.')
return data
def cspp_nutnr(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
nit = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #CTD data accompanies NUTNR data. Parse out the relevant URLs.
ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
nit_data = pd.DataFrame()
for nit_remote in nit['location']: #Pull nitrate data.
nit_dataset = xr.open_dataset(nit_remote)
n = ({'timestamp':nit_dataset['profiler_timestamp'],
'nitrate':nit_dataset['salinity_corrected_nitrate']})
n = pd.DataFrame(data = n)
nit_data = pd.concat([nit_data,n],sort = False)
nit_data = nit_data.sort_values('timestamp')
nit_data = nit_data[nit_data.nitrate > 0]
ctd_data = pd.DataFrame()
for ctd_remote in ctd['location']: #Pull CTD data.
ctd_dataset = xr.open_dataset(ctd_remote)
c = ({'timestamp':ctd_dataset['profiler_timestamp'], #Pull the following variables.
'ctdpressure':ctd_dataset['pressure']})
c = pd.DataFrame(data = c)
ctd_data = pd.concat([ctd_data,c],sort = False)
ctd_data = ctd_data[ctd_data.ctdpressure < depth] #Remove obviously bad values.
ctd_data = ctd_data[ctd_data.ctdpressure > 0]
ctd_data = ctd_data.sort_values('timestamp')
combo = pd.concat([nit_data,ctd_data],sort = True) #Combine nitrate and ctd data.
combo.index = combo['timestamp']
combo = combo[['timestamp','ctdpressure','nitrate']]
combo = combo.sort_index()
combo['pressure'] = combo['ctdpressure'].interpolate(method = 'time') #Interpolate pressure by time.
combo['datetime'] = combo.index
data = combo[['datetime','pressure','nitrate']]
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.dropna()
data = data.reset_index(drop = True) #Reset the index.
print('NUTNR data for ' + site + ' available.')
print('NUTNR datetime in UTC.')
print('NUTNR pressure is interpolated from CTD pressure and is in dbars.')
print('NUTNR nitrate in uMol L^-1')
return data
def cspp_spkir(nc):
site,depth = OOIM2M.find_site(nc) #Get the deployment site and standard depth.
dfnc = pd.DataFrame(data = {'location':nc})
spkir = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify remote locations of spkir data.
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame() #Create an empty dataframe for holding.
for remote in spkir['location']: #For each remote location.
dataset = xr.open_dataset(remote) #Open the dataset.
datetime = dataset['profiler_timestamp'].values #Pull datetime data and put it into a pandas array.
datetime = pd.DataFrame(data={'datetime':datetime})
pressure = dataset['pressure_depth'].values #Pull pressure data and put it into a pandas array.
pressure = pd.DataFrame(data = {'pressure':pressure})
vector = dataset['spkir_abj_cspp_downwelling_vector'].values #Pull vector data.
channels = pd.DataFrame(data = vector)
channels = channels.rename(columns={0:"412nm",1:"443nm",2:"490nm",3:"510nm",4:"555nm",5:"620nm",6:"683nm"}) #Assign values as shown by the dataset['spkir_abj_cspp_downwelling_vector'].attrs comment.
d = | pd.concat([datetime,pressure,channels],axis=1,sort=False) | pandas.concat |
#
# Source: https://github.com/ijmbarr/notes-on-causal-inference/blob/master/datagenerators.py
#
#
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
def generate_dataset_0(n_samples=500, set_X=None, show_z=False):
"""
Generate samples from the CSM:
Nodes: (X,Y,Z)
Edges: (Z -> X, Z-> Y, X -> Y)
All variables are binary.
Designed to generate simpson's paradox.
Args
----
n_samples: int, the number of samples to generate
set_X: array, values to set x
Returns
-------
samples: pandas.DateFrame
"""
p_z = 0.5
p_x_z = [0.9, 0.1]
p_y_xz = [0.2, 0.4, 0.6, 0.8]
z = np.random.binomial(n=1, p=p_z, size=n_samples)
if set_X is not None:
assert (len(set_X) == n_samples)
x = set_X
else:
p_x = np.choose(z, p_x_z)
x = np.random.binomial(n=1, p=p_x, size=n_samples)
p_y = np.choose(x + 2 * z, p_y_xz)
y = np.random.binomial(n=1, p=p_y, size=n_samples)
if show_z:
return pd.DataFrame({"x": x, "y": y, "z": z})
return pd.DataFrame({"x": x, "y": y})
def generate_dataset_1(n_samples=500, set_X=None):
"""
Generate samples from the CSM:
Nodes: (X,Y,Z)
Edges: (Z -> X, Z-> Y, X -> Y)
X is binary, Z and Y are continuous.
Args
----
n_samples: int, the number of samples to generate
set_X: array, values to set x
Returns
-------
samples: pandas.DateFrame
"""
z = np.random.uniform(size=n_samples)
if set_X is not None:
assert (len(set_X) == n_samples)
x = set_X
else:
p_x = np.minimum(np.maximum(z, 0.1), 0.9)
x = np.random.binomial(n=1, p=p_x, size=n_samples)
y0 = 2 * z
y1 = y0 - 0.5
y = np.where(x == 0, y0, y1) + 0.3 * np.random.normal(size=n_samples)
return pd.DataFrame({"x": x, "y": y, "z": z})
def generate_dataset_2(n_samples=500, set_X=None):
"""
Generate samples from the CSM:
Nodes: (X,Y,Z)
Edges: (Z -> X, Z-> Y, X -> Y)
X is binary, Z and Y are continuous.
Args
----
n_samples: int, the number of samples to generate
set_X: array, values to set x
Returns
-------
samples: pandas.DateFrame
"""
z = np.random.uniform(size=n_samples)
if set_X is not None:
assert (len(set_X) == n_samples)
x = set_X
else:
p_x = np.minimum(np.maximum(z, 0.1), 0.8)
x = np.random.binomial(n=1, p=p_x, size=n_samples)
y0 = 2 * z
y1 = np.where(z < 0.2, 3, y0)
y = np.where(x == 0, y0, y1) + 0.3 * np.random.normal(size=n_samples)
return | pd.DataFrame({"x": x, "y": y, "z": z}) | pandas.DataFrame |
import pandas as pd
import geopandas as gpd
def _areal_weighting(
sources,
targets,
extensive,
intensive,
weights,
sid,
tid,
geoms=True,
all_geoms=False,
):
"""
A method for interpolating areal data based soley on
the geometric overlap between sources and targets.
For an 'extensive' variable, either the 'sum' or 'total' weight can
be specified.
For an 'intensive' variable, only the 'sum' weight can
be specified.
For mixed interpolations, this will only impact the
calculation of the extensive variables.
Based on :
https://cran.r-project.org/web/packages/areal/vignettes/areal-weighted-interpolation.html
Parameters
----------
sources : gpd.GeoDataFrame
GeoDataFrame containing variable(s) to be interpolated
targets : gpd.GeoDataFrame
GeoDataFrame where variables will be assigned
extensive : str or list
str or list of extensive variables e.g population counts
intensive : str or list
str list of intensive variables e.g. population density
sid : str
Column containing unique values
tid : str
Column containing unique values
weights : str
type of weights to be computed
geoms : bool (default False)
whether to return target geometries
all_geoms : bool (default False)
whether to return all target geoms
or only those that intersect sources
Return
------
type: pd.DataFrame or gpd.GeoDataFrame
targets containing interpolated values
"""
if extensive is not None and intensive is None:
if extensive is type(list):
raise ValueError(
"Multiple variables for areal weighting is not supported yet"
)
else:
return _areal_weighting_single(
sources,
targets,
extensive,
intensive,
weights,
sid,
tid,
geoms,
all_geoms,
)
elif extensive is None and intensive is not None:
if intensive is type(list):
raise ValueError(
"Multiple variables for areal weighting is not supported yet"
)
else:
return _areal_weighting_single(
sources,
targets,
extensive,
intensive,
weights,
sid,
tid,
geoms,
all_geoms,
)
else:
if extensive is not None and intensive is not None:
raise ValueError("Mixed areal interpolation is not yet supported")
def _areal_weighting_single(
sources,
targets,
extensive,
intensive,
weights,
sid,
tid,
geoms=False,
all_geoms=False,
):
"""
A method for interpolating areal data based soley on
the geometric overlap between sources and targets.
This function only accepts single variables.
For an 'extensive' variable, either the 'sum' or 'total' weight can
be specified.
For an 'intensive' variable, only the 'sum' weight can
be specified.
For mixed interpolations, this will only impact the
calculation of the extensive variables.
Based on :
https://cran.r-project.org/web/packages/areal/vignettes/areal-weighted-interpolation.html
Parameters
----------
sources : gpd.GeoDataFrame
GeoDataFrame containing variable(s) to be interpolated
targets : gpd.GeoDataFrame
GeoDataFrame where variables will be assigned
extensive : str or list
str or list of extensive variables e.g population counts
intensive : str or list
str list of intensive variables e.g. population density
sid : str
Column containing unique values
tid : str
Column containing unique values
weights : str
type of weights to be computed
geoms : bool (default False)
whether to return target geometries
all_geoms : bool (default False)
whether to return all target geoms
or only those that intersect sources
Return
------
type: pd.DataFrame or gpd.GeoDataFrame
targets containing interpolated values
"""
if extensive is not None and intensive is not None:
raise ValueError(
"Use _areal_weighting_multi for mixed types - not yet supported"
)
if intensive is not None and weights != "sum":
raise ValueError(
"Areal weighting only supports 'sum' weights \
with use of intensive variables"
)
area = "Aj"
if extensive is not None:
var = extensive
if var in targets:
raise ValueError(f"{var} already in target GeoDataFrame")
sources[area] = sources.area
else:
var = intensive
if var in targets:
raise ValueError(f"{var} already in target GeoDataFrame")
targets[area] = targets.area
intersect = gpd.overlay(targets, sources, how="intersection")
intersect = intersect.sort_values(by=tid) # TO DO: remove afterwards
# Calculate weights based on area overlap
Ai = intersect.area
Aj = intersect[area]
Wij = Ai / Aj
# Estimate values by weighted intersected values
Vj = intersect[var].values
Ei = Vj * Wij
intersect[var] = Ei
# Summarize data for each target
Gk = intersect[[tid, var]].groupby(by=tid).sum()
if weights == "total":
w = sum(sources[var]) / sum(Gk[var])
Gk[var] = Gk[var] * w
if geoms is True:
if all_geoms is True:
return | pd.merge(targets, Gk, on=tid, how="outer") | pandas.merge |
from peloton import PelotonWorkout
import numpy as np
import pandas as pd
def get_all_workout_data():
variables = ['timestamp', 'fitness_discipline', 'title', 'duration', 'instructor', 'calories', 'distance']
workouts = PelotonWorkout.list()
df = | pd.DataFrame(columns=variables) | pandas.DataFrame |
import io
import os
import json
import gc
import pandas as pd
import numpy as np
from datetime import date, timedelta
from fastapi import FastAPI, File, HTTPException
import lightgbm as lgb
from lightgbm import LGBMClassifier
import matplotlib.pyplot as plt
import joblib
app = FastAPI(
title="Home Credit Default Risk",
description="""Obtain information related to probability of a client defaulting on loan.""",
version="0.1.0",
)
def calculate_years(days):
"""
Method used to calculate years based on date (today - quantity of days).
Parameters:
-----------------
days (int): Numbers of day to rest of today
Returns:
-----------------
years (int): Numbers of years
"""
today = date.today()
initial_date = today - timedelta(abs(days))
years = today.year - initial_date.year - ((today.month, today.day) < (initial_date.month, initial_date.day))
return years
########################################################
# Columns to read on CSVs
########################################################
COLUMNS = [
"SK_ID_CURR", "AMT_INCOME_TOTAL", "CODE_GENDER",
"DAYS_BIRTH", "DAYS_REGISTRATION", "DAYS_EMPLOYED",
"AMT_CREDIT", "AMT_GOODS_PRICE", "EXT_SOURCE_2",
"EXT_SOURCE_3",
]
########################################################
# Reading the csv
########################################################
df_clients_to_predict = pd.read_csv("datasets/df_clients_to_predict_20220221.csv")
df_current_clients = pd.read_csv("datasets/df_current_clients_20220221.csv")
df_current_clients["AGE"] = df_current_clients["DAYS_BIRTH"].apply(lambda x: calculate_years(x))
df_current_clients["YEARS_EMPLOYED"] = df_current_clients["DAYS_EMPLOYED"].apply(lambda x: calculate_years(x))
df_current_clients["EXT_SOURCE_2"] = df_current_clients["EXT_SOURCE_2"].round(3)
df_current_clients["EXT_SOURCE_3"] = df_current_clients["EXT_SOURCE_3"].round(3)
df_current_clients_by_target_repaid = df_current_clients[df_current_clients["TARGET"] == 0]
df_current_clients_by_target_not_repaid = df_current_clients[df_current_clients["TARGET"] == 1]
@app.get("/api/clients")
async def clients_id():
"""
EndPoint to get all clients id
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
return {"clientsId": clients_id}
@app.get("/api/clients/{id}")
async def client_details(id: int):
"""
EndPoint to get client's detail
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Filtering by client's id
df_by_id = df_clients_to_predict[COLUMNS][df_clients_to_predict["SK_ID_CURR"] == id]
idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0]
for col in df_by_id.columns:
globals()[col] = df_by_id.iloc[0, df_by_id.columns.get_loc(col)]
client = {
"clientId" : int(SK_ID_CURR),
"gender" : "Man" if int(CODE_GENDER) == 0 else "Woman",
"age" : calculate_years(int(DAYS_BIRTH)),
"antiquity" : calculate_years(int(DAYS_REGISTRATION)),
"yearsEmployed" : calculate_years(int(DAYS_EMPLOYED)),
"goodsPrice" : float(AMT_GOODS_PRICE),
"credit" : float(AMT_CREDIT),
"anualIncome" : float(AMT_INCOME_TOTAL),
"source2" : float(EXT_SOURCE_2),
"source3" : float(EXT_SOURCE_3),
"shapPosition" : int(idx)
}
return client
@app.get("/api/predictions/clients/{id}")
async def predict(id: int):
"""
EndPoint to get the probability honor/compliance of a client
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Loading the model
model = joblib.load("models/model_20220220.pkl")
threshold = 0.135
# Filtering by client's id
df_prediction_by_id = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id]
df_prediction_by_id = df_prediction_by_id.drop(df_prediction_by_id.columns[[0, 1]], axis=1)
# Predicting
result_proba = model.predict_proba(df_prediction_by_id)
y_prob = result_proba[:, 1]
result = (y_prob >= threshold).astype(int)
if (int(result[0]) == 0):
result = "Yes"
else:
result = "No"
return {
"repay" : result,
"probability0" : result_proba[0][0],
"probability1" : result_proba[0][1],
"threshold" : threshold
}
@app.get("/api/predictions/clients/shap/{id}")
async def client_shap_df(id: int):
"""
EndPoint to return a df with all client's data
"""
clients_id = df_clients_to_predict["SK_ID_CURR"].tolist()
if id not in clients_id:
raise HTTPException(status_code=404, detail="client's id not found")
else:
# Filtering by client's id
idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0]
client = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id].drop(columns=["SK_ID_CURR", "AMT_INCOME_TOTAL"])
client = client.to_json(orient="records")
return client
@app.get("/api/statistics/ages")
async def statistical_age():
"""
EndPoint to get some statistics - ages
"""
ages_data_repaid = df_current_clients_by_target_repaid.groupby("AGE").size()
ages_data_repaid = pd.DataFrame(ages_data_repaid).reset_index()
ages_data_repaid.columns = ["AGE", "AMOUNT"]
ages_data_repaid = ages_data_repaid.set_index("AGE").to_dict()["AMOUNT"]
ages_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("AGE").size()
ages_data_not_repaid = pd.DataFrame(ages_data_not_repaid).reset_index()
ages_data_not_repaid.columns = ["AGE", "AMOUNT"]
ages_data_not_repaid = ages_data_not_repaid.set_index("AGE").to_dict()["AMOUNT"]
return {"ages_repaid" : ages_data_repaid, "ages_not_repaid" : ages_data_not_repaid}
@app.get("/api/statistics/yearsEmployed")
async def statistical_years_employed():
"""
EndPoint to get some statistics - years employed
"""
years_employed_data_repaid = df_current_clients_by_target_repaid.groupby("YEARS_EMPLOYED").size()
years_employed_data_repaid = pd.DataFrame(years_employed_data_repaid).reset_index()
years_employed_data_repaid.columns = ["YEARS_EMPLOYED", "AMOUNT"]
years_employed_data_repaid = years_employed_data_repaid.set_index("YEARS_EMPLOYED").to_dict()["AMOUNT"]
years_employed_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("YEARS_EMPLOYED").size()
years_employed_data_not_repaid = pd.DataFrame(years_employed_data_not_repaid).reset_index()
years_employed_data_not_repaid.columns = ["YEARS_EMPLOYED", "AMOUNT"]
years_employed_data_not_repaid = years_employed_data_not_repaid.set_index("YEARS_EMPLOYED").to_dict()["AMOUNT"]
return {
"years_employed_repaid" : years_employed_data_repaid,
"years_employed_not_repaid" : years_employed_data_not_repaid
}
@app.get("/api/statistics/amtCredits")
async def statistical_amt_credit():
"""
EndPoint to get some statistics - AMT Credit
"""
amt_credit_data_repaid = df_current_clients_by_target_repaid.groupby("AMT_CREDIT").size()
amt_credit_data_repaid = | pd.DataFrame(amt_credit_data_repaid) | pandas.DataFrame |
'''
Author: <NAME>
Date: 2021-06-16 07:51:48
LastEditTime: 2021-06-18 06:57:23
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /Binance_Futures_python/history/download_id.py
'''
import hmac
import hashlib
from urllib import parse
import time
from datetime import datetime, timedelta
import requests
import pandas as pd
import numpy as np
import os
id_path = "https://api2.binance.com/sapi/v1/futuresHistDataId"
link_path = "https://api2.binance.com/sapi/v1/downloadLink"
query_symbols = ['BTCUSDT','ETHUSDT','LTCUSDT','EOSUSDT']
from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
g_api_key = '<KEY>'
g_secret_key = '<KEY>'
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
def geterate_periods(start='2020-01-01 00:00:00', end='2021-04-05 00:00:00', days=15):
if end:
endArray = time.strptime(end, "%Y-%m-%d %H:%M:%S")
end = int(time.mktime(endArray)) * 1000
else:
end = int(time.time()) * 1000
startArray = time.strptime(start, "%Y-%m-%d %H:%M:%S")
start = int(time.mktime(startArray)) * 1000
return range(start, end, days*24*60*60*1000)
def get_symbol_start(csv_file,symbol,data_type,days):
if not os.path.exists(csv_file):
time_start = int(time.mktime(datetime.strptime('2020-01-01','%Y-%m-%d').timetuple())) * 1000
else:
df = | pd.read_csv(csv_file) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = | bdate_range('2000-1-1', periods=10, tz='UTC') | pandas.bdate_range |
import os
from tkinter import filedialog
from tkinter import *
import pandas as pd
import datetime
# define year of measurements for naming
year = 2014
# Please navigate to folder containing measurement subfolders for specified year
root = Tk()
root.withdraw()
folder = filedialog.askdirectory()
# quick check to see all stations included in that year
#folder = "//igswztwwgszona/Gravity Data Archive/Relative Data/All American Canal/2019-05"
for folders in sorted(os.listdir(folder)):
print(folders)
name_array, time_array, corr_g = [], [], []
user_array, meter_array, date_array = [], [], []
for file in sorted(os.listdir(folder)):
abs_path = os.path.join(folder + '/' + file)
if abs_path[-3:] == 'xls' or abs_path[-3:] == 'XLS':
data_xls = | pd.read_excel(abs_path, 'results', index_col=None, usecols=7, dtype='object') | pandas.read_excel |
# -*- coding:utf-8 -*-
import sys
import time
import datetime
import pandas as pd
import numpy as np
import logging
# 显示小数位数
pd.set_option('display.float_format', lambda x: '%.3f' % x)
# 显示所有列
pd.set_option('display.max_columns', 1000)
# 显示所有行
pd.set_option('display.max_rows', 1000)
# 设置value的显示长度为100,默认为50
pd.set_option('max_colwidth', 100)
# 当console中输出的列数超过1000的时候才会换行
pd.set_option('display.width', 1000)
def quiet_logs(sc):
# 控制台不打印警告信息
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR)
logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR)
logger_py4j = logging.getLogger('py4j')
logger_py4j.setLevel(logging.ERROR)
def df_head(hc_df, lines=5):
if hc_df:
df = hc_df.toPandas()
return df.head(lines)
else:
return None
class Py4jHdfs:
"""
python操作HDFS
"""
def __init__(self, sc):
self.sc = sc
self.filesystem = self.get_file_system()
def path(self, file_path):
"""
创建hadoop path对象
:param sc sparkContext对象
:param file_path 文件绝对路径
:return org.apache.hadoop.fs.Path对象
"""
path_class = self.sc._gateway.jvm.org.apache.hadoop.fs.Path
return path_class(file_path)
def get_file_system(self):
"""
创建FileSystem
:param sc SparkContext
:return FileSystem对象
"""
filesystem_class = self.sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
hadoop_configuration = self.sc._jsc.hadoopConfiguration()
return filesystem_class.get(hadoop_configuration)
def ls(self, path, is_return=False):
"""
读取文件列表,相当于hadoop fs -ls命令
:param path hdfs绝对路径
:return file_list 文件列表
"""
def file_or_dir(is_file, is_dir):
if is_file:
return 'is_file:True'
elif is_dir:
return 'is_directory:True'
else:
return 'unknow'
filesystem = self.get_file_system()
status = filesystem.listStatus(self.path(path))
try:
file_index = str(status[0].getPath()).index(path) + len(path)
except:
print([])
file_list = [(str(m.getPath())[file_index:],
str(round(m.getLen() / 1024.0 / 1024.0, 2)) + ' MB',
str(datetime.datetime.fromtimestamp(m.getModificationTime() / 1000)),
str(file_or_dir(m.isFile(), m.isDirectory()))) for m in status]
if file_list and not is_return:
for f in file_list:
print(f)
if not file_list:
print([])
if is_return:
return file_list
def exists(self, path):
return self.filesystem.exists(self.path(path))
def mkdir(self, path):
return self.filesystem.mkdirs(self.path(path))
def mkdirs(self, path, mode="755"):
return self.filesystem.mkdirs(self.path(path))
def set_replication(self, path, replication):
return self.filesystem.setReplication(self.path(path), replication)
def mv(self, path1, path2):
return self.filesystem.rename(self.path(path1), self.path(path2))
def rm(self, path, recursive=True, print_info=True):
"""
直接删除文件,不可恢复!
:param path 文件或文件夹
:param recursive 是否递归删除,默认为True
"""
try:
result = self.filesystem.delete(self.path(path), recursive)
if result:
if print_info:
print('[Info]: Remove File Successful!')
return True
else:
if print_info:
print('[Error]: Remove File Failed!')
return result
except Exception as e:
if print_info:
print('[Error]: %s' % e)
def safe_rm(self, path, trash_path='.Trash/Current'):
"""
删除文件,可恢复
可删除文件/文件夹
:path 需删除的文件的绝对路径
"""
try:
self.filesystem.rename(self.path(path), self.path(trash_path + path))
print('[Info]: Safe Remove File Successful!')
except:
try:
self.rm(self.path(trash_path + path))
self.filesystem.rename(self.path(path), self.path(trash_path + path))
print('[Info]: Safe Remove File Successful!')
except Exception as e:
print('[Error]: %s' % e)
print('[Error]: Remove File Failed!')
return True
def exists(self, path):
return self.filesystem.exists(self.path(path))
def chmod(self, path, mode):
self.filesystem.setPermission(self.path(path), mode)
def chown(self, path, owner, group):
self.filesystem.setOwner(self.path(path), owner, group)
# def get(self, src, dst, del_src=False,use_raw_local_file_system=True):
# self.filesystem.copyToLocalFile(del_src, self.path(src), dst, use_raw_local_file_system)
# def put(self, src, dst, del_src=False, overwrite=True):
# self.filesystem.copyFromLocalFile(del_src, src, dst, overwrite)
def run_time_count(func):
"""
计算函数运行时间
装饰器:@run_time_count
"""
def run(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print("Function [{0}] run time is {1} second(s).".format(func.__name__, round(time.time() - start, 4)))
return result
return run
@run_time_count
def write(self, path, contents, encode='utf-8', overwrite_or_append='overwrite'):
"""
写内容到hdfs文件
:param sc SparkContext
:param path 绝对路径
:param contents 文件内容 字符串或字符串列表 例如:rdd.collect() 形如:['str0,str1,str2','str3,str4,str5']
:param encode 输出编码格式
:param overwrite_or_append 写模式:覆盖或追加
"""
try:
filesystem = self.get_file_system()
if overwrite_or_append == 'overwrite':
out = filesystem.create(self.path(path), True)
elif overwrite_or_append == 'append':
out = filesystem.append(self.path(path))
if isinstance(contents, list):
for content in contents:
out.write(bytearray(content + '\r\n', encode))
elif sys.version_info.major == 3 and isinstance(contents, str):
out.write(bytearray(contents, encode))
elif sys.version_info.major == 2 and (isinstance(contents, str) or isinstance(contents, unicode)):
out.write(bytearray(contents, encode))
else:
print('[Error]: Input data format is not right!')
return False
out.flush()
out.close()
print('[Path]: %s' % path)
print('[Info]: File Saved!')
return True
except Exception as e:
print('[Error]: %s' % e)
return False
@run_time_count
def read(self, path, sep=',', header=None, nrows=None):
"""
读取hdfs上存储的utf-8编码的单个csv,txt文件,输出为pandas.DataFrame
:param path 文件所在hdfs路径
:param sep 文本分隔符
:param header 设为0时表示第一行作为列名
:param nrows 读取行数
"""
filesystem = self.get_file_system()
file = filesystem.open(self.path(path))
# print(file)
data = []
line = True
nrow = 0
if not nrows:
nrows_ = np.inf
else:
nrows_ = nrows
while line and nrow <= nrows_:
try:
nrow = nrow + 1
line = file.readLine()
data.append(line.encode('raw_unicode_escape').decode('utf-8').split(sep))
except Exception as e:
print('[Info]: %s' % str(e))
break
file.close()
if header == 0:
data = pd.DataFrame(data[1:], columns=data[0])
elif header:
data = pd.DataFrame(data, columns=header)
else:
data = pd.DataFrame(data)
return data
@run_time_count
def read_hdfs(self, path, sep=',', header=None, nrows=None):
"""
读取hdfs上存储的文件,输出为pandas.DataFrame
:param path 文件所在hdfs路径
:param sep 文本分隔符
:param header 设为0时表示第一行作为列名
:param nrows 读取行数
"""
filesystem = self.get_file_system()
files = self.ls(path, is_return=True)
files = list(map(lambda x: x[0], files))
file_flag = '/_SUCCESS'
files = list(filter(lambda x: x != file_flag, files))
files = list(map(lambda x: path + x, files))
print('[Info]: Num of need to read files is %s' % len(files))
# if file_flag in files:
# files = list(filter(lambda x: x != file_flag, files))
# files = list(map(lambda x: path + x, files))
# print('[Info]: Num of need to read files is %s' % len(files))
# else:
# print("[Error]: File format is incorrect! Try to use 'read()' replace 'read_hdfs()'.")
# return False
data = []
for file_path in files:
file = filesystem.open(self.path(file_path))
print('[Info]: Reading file %s' % file_path)
line = True
nrow = 0
if not nrows:
nrows_ = np.inf
else:
nrows_ = nrows
while line and nrow <= nrows_:
try:
nrow = nrow + 1
line = file.readLine()
if line is not None:
data.append(line.encode('raw_unicode_escape').decode('utf-8').split(sep))
except Exception as e:
print('[Error]: %s' % str(e))
break
file.close()
if header == 0:
data = | pd.DataFrame(data[1:], columns=data[0]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost.sklearn import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.dummy import DummyClassifier
from sklearn import model_selection
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from sklearn.utils import resample
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import TomekLinks
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import NearMiss
from iso3166 import countries
import matplotlib.pyplot as plt
import pycountry_convert as pc
import pycountry
def my_confusion_matrix(y_actual, y_predicted):
""" This method finds the number of True Negatives, False Positives,
True Positives and False Negative between the hidden movies
and those predicted by the recommendation algorithm
"""
cm = metrics.confusion_matrix(y_actual, y_predicted)
return cm[0][0], cm[0][1], cm[1][1], cm[1][0]
def get_metrics(y_test, y_predicted):
tn, fp, tp, fn = my_confusion_matrix(y_test, y_predicted)
print(tn, fp, tp, fn)
G_mean = np.sqrt((tp/(tp+fp)) * (tn/(tn+fp)))
print('G-mean: %.4f' % G_mean)
print('Balanced_Accuracy: %.4f' % metrics.balanced_accuracy_score(y_test, y_predicted))
print('F1: %.4f' % metrics.f1_score(y_test, y_predicted, average="micro"))
def split_train_test(data, sampling=None):
# Implement UnderSampling
if sampling == 'undersample':
dfs = []
for i in range(0, 2):
curr_df = data[data['top_k'] == i]
dfs.append(resample(curr_df, replace=False, n_samples=1000, random_state=0))
data = | pd.concat(dfs) | pandas.concat |
import logging
from typing import Optional, Tuple
import click
import numpy as np
import pandas as pd
from tqdm import tqdm
from food_ke.entailment.custom_typing import PathLike
logging.basicConfig(level=logging.INFO)
def _link_entities_lexmapr(entity_queries: list) -> pd.Series:
logging.info("Linking entities with LexMapr")
pass
def _link_entities_ols(entity_queries: list) -> Tuple[pd.Series, pd.Series]:
logging.info("Linking entities with OLS")
entity_queries_unique = np.unique(entity_queries)
from ebi.ols.api.client import OlsClient
client = OlsClient()
out_ids = {}
out_names = {}
for query in tqdm(entity_queries_unique):
query_results = client.search(query=query, ontology="foodon")
if len(query_results) > 0:
logging.debug(query)
logging.debug(query_results[0])
out_ids[query] = query_results[0].obo_id
out_names[query] = query_results[0].name
else:
logging.warn("No results for query: {}".format(query))
out_ids[query] = None
out_names[query] = None
out_ids = | pd.Series(entity_queries) | pandas.Series |
import math
from lxml import etree
from scipy.spatial import distance
import scipy
import re
import fnmatch
import numpy as np
import pandas as pd
import os
from numpy import dot
from numpy.linalg import norm
import sparse_coding as sc
import evaluation as eval
from evaluation import Level
DOCS = dict()
DATA_PATH = "data/Single/Source/DUC/"
SUMMARY_PATH = "data/Single/Summ/Extractive/"
def __content_processing(content):
content = content.replace("\n", " ").replace("(", " ").replace(")", " "). \
replace(" ", " ").replace(" ", " ")
sentences = re.split("\.|\?|\!", content)
while sentences.__contains__(''):
sentences.remove('')
while sentences.__contains__(' \n'):
sentences.remove(' \n')
for sentence in sentences:
words = sentence.split(" ")
while words.__contains__(''):
words.remove('')
if len(words) < 2:
sentences.remove(sentence)
words = list(set(map(lambda x: x.strip(), content.replace("?", " ").replace("!", " ").replace(".", " ").
replace("؟", " ").replace("!", " ").replace("،", " ").split(" "))))
if words.__contains__(''):
words.remove('')
return sentences, words
def read_document(doc_name):
file_path = DATA_PATH + doc_name
with open(file_path) as fp:
doc = fp.readlines()
content = ""
for line in doc:
content += line
fp.close()
return __content_processing(content)
def read_documents(directory_path):
# directory_path = "data/Multi/Track1/Source/D91A01/"
directory = os.fsencode(directory_path)
contents = ""
for file in os.listdir(directory):
filename = os.fsdecode(file)
content = etree.parse(directory_path + filename)
memoryElem = content.find('TEXT')
DOCS[filename] = memoryElem.text
contents += memoryElem.text
return __content_processing(contents)
def make_term_frequency(sentences, words):
term_frequency = dict()
for sentence in sentences:
vector = list()
for i in range(0, len(words)):
word = words[i]
vector.append(sentence.count(word))
if norm(vector) != 0:
term_frequency[sentence] = vector
# term_frequency[sentence] = vector / norm(vector, ord=1)
return term_frequency
def __avg_sent_2_vec(words, model):
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.mean(axis=0)
return v / np.sqrt((v ** 2).sum())
def read_word2vec_model():
w2v = dict()
print("waiting to load word2vec model...")
with open('twitt_wiki_ham_blog.fa.text.100.vec', 'r', encoding='utf-8') as infile:
first_line = True
for line in infile:
if first_line:
first_line = False
continue
tokens = line.split()
w2v[tokens[0]] = [float(el) for el in tokens[1:]]
if len(w2v[tokens[0]]) != 100:
print('Bad line!')
print("model loaded")
return w2v
def make_word_2_vec(data, model):
word2vec = dict() # final dictionary containing sentence as the key and its representation as value
DocMatix = np.zeros((len(data), 100))
for i in range(len(data)):
words = list(map(lambda x: x.strip(), data[i].replace("?", " ").replace("!", " ").replace(".", " ").
replace("؟", " ").replace("!", " ").replace("،", " ").split(" ")))
if words.__contains__(''):
words.remove('')
result = __avg_sent_2_vec(words, model)
if not (np.isnan(result).any()):
DocMatix[i] = result
word2vec[data[i]] = DocMatix[i]
print("features calculated")
# print(word2vec)
train_df = | pd.DataFrame(DocMatix) | pandas.DataFrame |
from onecodex.exceptions import OneCodexException
class VizMetadataMixin(object):
def plot_metadata(
self,
rank="auto",
haxis="Label",
vaxis="simpson",
title=None,
xlabel=None,
ylabel=None,
return_chart=False,
plot_type="auto",
label=None,
sort_x=None,
):
"""Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'chao1', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
sort_x : `callable`, optional
Function will be called with a list of x-axis labels as the only argument, and must
return the same list in a user-specified order.
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergy to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
"""
# Deferred imports
import altair as alt
import pandas as pd
from onecodex.viz import boxplot
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if plot_type not in ("auto", "boxplot", "scatter"):
raise OneCodexException("Plot type must be one of: auto, boxplot, scatter")
# alpha diversity is only allowed on vertical axis--horizontal can be magically mapped
df, magic_fields = self._metadata_fetch([haxis, "Label"], label=label)
if vaxis in ("simpson", "chao1", "shannon"):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
else:
# if it's not alpha diversity, vertical axis can also be magically mapped
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
# we require the vertical axis to be numerical otherwise plots get weird
if (
pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_categorical_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_object_dtype(vert_df[vert_magic_fields[vaxis]])
or not pd.api.types.is_numeric_dtype(vert_df[vert_magic_fields[vaxis]])
): # noqa
raise OneCodexException("Metadata field on vertical axis must be numerical")
df = pd.concat([df, vert_df], axis=1).dropna(subset=[vert_magic_fields[vaxis]])
magic_fields.update(vert_magic_fields)
# plots can look different depending on what the horizontal axis contains
if pd.api.types.is_datetime64_any_dtype(df[magic_fields[haxis]]):
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif "date" in magic_fields[haxis].split("_"):
df.loc[:, magic_fields[haxis]] = df.loc[:, magic_fields[haxis]].apply(
pd.to_datetime, utc=True
)
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif (
| pd.api.types.is_bool_dtype(df[magic_fields[haxis]]) | pandas.api.types.is_bool_dtype |
""" module of functions that allow you to create per-cell / per-sample summary tables """
import numpy as np
import math
import pandas as pd
def mutations_df_fill_in(GOI, GOI_df, mutationsDF_):
""" creates a cell-wise dataframe with mutations to each GOI """
mutName = GOI + '_mut'
for i in range(0,len(mutationsDF_.index)):
currCell = mutationsDF_['cell'][i]
rightIndex = GOI_df['cell'] == currCell
rightRow = GOI_df[rightIndex]
rightCell = rightRow['cell']
rightCell = str(rightCell).split()[1]
rightMut = rightRow['mutations']
rightMut = str(rightMut).split()[1]
mutationsDF_[mutName][i] = rightMut
def remove_extra_characters_mutations_df(GOI, mutationsDF_):
""" converting df cols from lists to strings """
mutName = GOI + '_mut'
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("'", "") # remove quotes
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("[", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace("]", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replace(" ", "") # remove whitespace?
def generic_summary_table_fill_in(metaField, summaryField, summaryTable_, patientMetadata_):
""" fills in a given metadata field in summaryTable_ """
for i in range(0,len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
currPlate = currCell.split('_')[1]
index_to_keep = patientMetadata_['plate'] == currPlate
keepRow = patientMetadata_[index_to_keep]
try:
currField = list(keepRow[metaField])[0]
summaryTable_[summaryField][i] = currField
except IndexError:
continue
#print('ERROR: plate not found') # these are just the plates were NOT
# including in the analysis
def fusions_fill_in(fusionsDF_, summaryTable_):
""" takes the existing fusionsDF and populates summaryTable_ with this shit """
for i in range(0, len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
for col in fusionsDF_.columns:
if currCell in list(fusionsDF_[col]):
summaryTable_['fusions_found'][i] = col
def translated_muts_fill_in(GOI, summaryTable_):
""" converts 'raw' mutation calls to something that more resembles
those reported in our clinical cols. general """
colName = 'mutations_found_' + GOI
for i in range(0,len(summaryTable_.index)):
translatedList = []
currCell = summaryTable_['cell'].iloc[i]
currMuts = summaryTable_[colName].iloc[i]
currMuts_split = currMuts.split(',')
for item in currMuts_split:
if item != '' and '?' not in item:
translatedList.append(GOI + ' ' + item)
summaryTable_['mutations_found_translated'][i] = summaryTable_['mutations_found_translated'][i] + translatedList
def translated_muts_fill_in_egfr(summaryTable_):
""" converts 'raw' mutation calls to something that more resembles
those reported in our clinical cols. egfr, specificially """
for i in range(0,len(summaryTable_.index)):
translatedList = []
currCell = summaryTable_['cell'].iloc[i]
currMuts_egfr = summaryTable_['mutations_found_EGFR'].iloc[i]
currMuts_egfr_split = currMuts_egfr.split(',')
for item in currMuts_egfr_split:
if 'delELR' in item:
translatedList.append('EGFR del19')
elif '745_' in item:
translatedList.append('EGFR del19')
elif '746_' in item:
translatedList.append('EGFR del19')
elif 'ins' in item:
translatedList.append('EGFR ins20')
elif item != '':
translatedList.append('EGFR ' + item)
summaryTable_['mutations_found_translated'][i] = translatedList
def translated_muts_fill_in_fusions(summaryTable_):
""" converts 'raw' mutation calls to something that more resembles
those reported in our clinical cols. for fusions """
for i in range(0,len(summaryTable_.index)):
currCell = summaryTable_['cell'].iloc[i]
currFus = summaryTable_['fusions_found'].iloc[i]
if not | pd.isnull(currFus) | pandas.isnull |
import json
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import pandas as pd
import pathlib as pl
from utils import read_data_cfg
def count_people(json_path):
#output_path = 'K:/dataset/flir_output dataset/'
#category_dict_path = 'K:/dataset/flir dataset/train/thermal_annotations.json'
with open(json_path) as json_file:
data = json.load(json_file)
categories = ['person','car','bicycle','dog']
cat = pd.DataFrame(data['categories']).rename(columns={'id':'category_id','name':'category'})
#for c in categories:
annotations = pd.DataFrame(data['annotations'])
images = pd.DataFrame(data['images']).rename(columns={'id':'image_id'})
df = annotations.merge(cat,how='left',on=['category_id'])
#df['category'] = df['category'].fillna('empty')
#g = df.groupby(['image_id','category']).size().reset_index(name='count').groupby(['count','category']).size().reset_index(name='count_c')
#g['count_c'] = g[['count_c']].apply(lambda x: x/x.sum()*100)
return(df)
def count_kaist_people(data_path):
options = read_data_cfg(data_path)
train_file = options['train']
#output_path = 'K:/dataset/flir_output dataset/'
#category_dict_path = 'K:/dataset/flir dataset/train/thermal_annotations.json'
data = []
with open(train_file) as tf:
images = tf.readlines()
for i in images:
labpath = i.replace('images', 'labels').replace('.jpg', '.txt').replace('.jpeg', '.txt').replace('.png','.txt').replace('.tif', '.txt')
txt = pl.Path(labpath.rstrip())
if txt.exists():
with txt.open('r') as t:
size = len(t.readlines())
data.append({'file_name':i.rstrip(),'size':size})
df = pd.DataFrame(data).groupby('size').count()
df.plot(grid=True,marker='o',markevery=2,ylabel='number of annotations',xlabel='annotations per image')
plt.xlabel('annotations per image')
plt.ylabel('number of annotations')
plt.legend(['person'])
plt.show()
# # Close file
# rd.close()
# data = json.load(json_file)
# categories = ['person','car','bicycle','dog']
# cat = pd.DataFrame(data['categories']).rename(columns={'id':'category_id','name':'category'})
# #for c in categories:
# annotations = pd.DataFrame(data['annotations'])
# images = pd.DataFrame(data['images']).rename(columns={'id':'image_id'})
# df = annotations.merge(cat,how='left',on=['category_id'])
# #df['category'] = df['category'].fillna('empty')
# #g = df.groupby(['image_id','category']).size().reset_index(name='count').groupby(['count','category']).size().reset_index(name='count_c')
# #g['count_c'] = g[['count_c']].apply(lambda x: x/x.sum()*100)
# return(df)
def square_mean_loss(annotation_json,detection_json):
with open(annotation_json) as ann_file:
ann_data = json.load(ann_file)
with open(detection_json) as det_file:
det_data = json.load(det_file)
category_dict = {
1:"person",
2:"bicycle",
3:"car",
17:"dog"
}
# cat = pd.DataFrame(ann_data['categories']).rename(columns={'id':'category_id','name':'category'})
images = pd.DataFrame(ann_data['images']).rename(columns={'id':'image_id'})
ann_df = | pd.DataFrame(ann_data['annotations']) | pandas.DataFrame |
# import os
# os.chdir('C:/Users/ali_m/AnacondaProjects/PhD/Semiology-Visualisation-Tool/')
from .Bayes_rule import Bayes_All
from pandas.testing import assert_series_equal, assert_frame_equal
import pandas as pd
from pathlib import Path
from collections import defaultdict
from mega_analysis.Bayesian.Bayes_rule import Bayes_rule, renormalised_probabilities
# --------------Load----------------------------
directory = Path(__file__).parent.parent.parent/'resources' / 'Bayesian_resources'
marginal_folder = 'SemioMarginals_fromSS_GIFmarginals_from_TS'
prob_S_given_GIFs_norm = pd.read_csv(directory / 'prob_S_given_GIFs_norm.csv', index_col=0)
p_S_norm = pd.read_csv(directory / marginal_folder / 'p_S_norm_SS.csv', index_col=0)
p_GIF_norm = pd.read_csv(directory / marginal_folder / 'p_GIF_norm_TS_granular.csv', index_col=0)
prob_S_given_GIFs_notnorm = pd.read_csv(directory / 'prob_S_given_GIFs_notnorm.csv', index_col=0)
p_S_notnorm = pd.read_csv(directory / marginal_folder / 'p_S_notnorm_SS.csv', index_col=0)
p_GIF_notnorm = | pd.read_csv(directory / marginal_folder / 'p_GIF_notnorm_TS_granular.csv', index_col=0) | pandas.read_csv |
import os
import pandas as pd
import camelot
from tkinter import Tk
from tkinter import filedialog
from glob import glob
root = Tk().withdraw()
def select_file():
folder = filedialog.askdirectory(title="Select Folder with pdf Files")
files=sorted([f for f in glob(f'{folder}/*.pdf')]) #<-----pdf folder path
return folder, files
def data(dfs):
dfs_list=[]
for i in dfs:
tables = camelot.read_pdf(i, flavor='stream', row_tol=8, table_areas=['12,563,577,115'], split_text=True)
data=tables[0].df
df=pd.DataFrame(data)
df.columns=df.loc[0]
df.columns=[' '.join(x.split('\n')) for x in df.columns]
df=df.drop([0])
df=df.drop(columns=['FECHA VALOR','ORIG','REFERENCIA'])
last_saldo=(df.loc[df['CONCEPTO'].str.contains('Saldo'),'CONCEPTO']==True).index[-1]
df=df.drop(df.index[last_saldo:],axis=0)
df.columns=['FECHA OPER', 'CONCEPTO', 'DESCRIPCION', 'CARGO', 'ABONO', 'SALDO']
df=df.append( | pd.Series(dtype='object') | pandas.Series |
import glob
import json
import multiprocessing
import ntpath
import traceback
from datetime import date, datetime
from multiprocessing import Pool
import numpy as np
import pandas as pd
from dateutil import rrule
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from tqdm.auto import tqdm
from config import *
# Default headers for Coinmarketcap
headers = {
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9,vi-VN;q=0.8,vi;q=0.7",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
}
class HODL:
def __init__(self, alpha, n_coins, cap):
self.alpha = alpha
self.n_coins = n_coins
self.cap = cap
def list_binance(self):
"""
Get all the trading pairs with quote asset as USDT
and match with CoinMarketCap
"""
session = Session()
session.headers.update(headers)
try:
pairs = session.get("https://api.binance.com/api/v3/exchangeInfo").json()
pairs = [
p["symbol"]
for p in pairs["symbols"]
if ((p["quoteAsset"] == "USDT") & (p["status"] == "TRADING"))
]
bnb_coins = [c.replace("USDT", "").lower() for c in pairs]
cmc_coins_ = session.get(
"https://web-api.coinmarketcap.com/v1/cryptocurrency/listings/latest",
params={
"aux": "circulating_supply,max_supply,total_supply",
"convert": "USD",
"cryptocurrency_type": "coins",
"limit": "100",
"sort": "market_cap",
"sort_dir": "desc",
"start": "1",
},
).json()
cmc_coins = [c["symbol"].lower() for c in cmc_coins_["data"]]
coins = [c for c in cmc_coins if c in bnb_coins]
tmp = dict()
for c in cmc_coins_["data"]:
if c["symbol"].lower() in coins:
tmp[c["slug"]] = c["symbol"].lower()
return tmp
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
return None
def weighted_market_cap(self):
"""
Calculate exponential weighted moving average market cap
"""
all_data = sorted(glob.glob("./data/processed/*.csv"))
for path in tqdm(all_data):
df = | pd.read_csv(path) | pandas.read_csv |
def getMetroStatus():
import http.client, urllib.request, urllib.parse, urllib.error, base64, time
headers = {
# Request headers
'api_key': '6b700f7ea9db408e9745c207da7ca827',}
params = urllib.parse.urlencode({})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/StationPrediction.svc/json/GetPrediction/All?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
return str(data) #returns the data as a string rather than raw bytes
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def JSONfromMetro(trainString): #converts the string into a dictionary file
import json, re
fixSlash=re.compile(r'\\') #this line and the next remove triple-slashes, which screw up the json module
fixedTrainString=fixSlash.sub('',trainString)
trainJSON=json.loads(fixedTrainString[2:-2]+"}") #slightly adjusts the string to put it in json form
if isinstance(trainJSON,dict) and 'Trains' in trainJSON.keys():
return trainJSON['Trains']
else:
return None
def saveWMATASQL(trainData, engine): #saves the current WMATA data to open engine
import datetime, pandas as pd
#the line below creates a table name starting with WMATA and then containing the date and time information, with each day/hour/minute/second taking two characters
if not isinstance(trainData, list):
return None
DTstring=str(datetime.datetime.now().month)+str(datetime.datetime.now().day).rjust(2,'0')+str(datetime.datetime.now().hour).rjust(2,'0')+str(datetime.datetime.now().minute).rjust(2,'0')+str(datetime.datetime.now().second).rjust(2,'0')
trainFrame=pd.DataFrame('-', index=range(len(trainData)), columns=['DT','Car','Loc','Lin','Des','Min','Gro']) #creates trainFrame, the DataFrame to send to the SQL server
for iter in range(len(trainData)): #for all the trains in trainData
trainFrame.loc[iter]['DT']=DTstring
for colName in ['Car','LocationCode','Line','DestinationCode','Min','Group']: #select the six relevant fields
trainFrame.loc[iter][colName[:3]]=trainData[iter][colName] #and fill in the relevant data
trainFrame.to_sql('WMATAFull', engine, if_exists='append') #send trainFrame to the SQL server
return trainFrame
def lineNextDF(line, destList, arrData):
import pandas as pd
timeString=arrData.DT.iloc[0]
rowName=pd.to_datetime('2016-'+timeString[0]+'-'+timeString[1:3]+' '+timeString[3:5]+':'+timeString[5:7]+':'+timeString[7:])
# names the row as a timestamp with the month day hour minute second
lineStat=pd.DataFrame('-',index=[rowName],columns=line)
for station in line: #repeat the below process for every station on the line
trains2consider=arrData.loc[lambda df: df.Loc==station].loc[lambda df: df.Des.isin(destList)] #pull out the trains at that station heading toward the destinations
if len(trains2consider.index)>0: #If you found a train
if trains2consider.Des.iloc[0] in ['A11','B08','E01','K04']: #the next few lines set the station status to the color and ETA of the first arriving train
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0].lower()+':'+trains2consider.Min.iloc[0] #if the train is terminating early (at Grovesnor, Silver Spring or Mt Vernon), use lowercase
elif trains2consider.Des.iloc[0]=='E06':
lineStat.loc[rowName,station]='Yl:'+trains2consider.Min.iloc[0]
elif trains2consider.Des.iloc[0]=='A13':
lineStat.loc[rowName,station]='Rd:'+trains2consider.Min.iloc[0]
else:
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0]+':'+trains2consider.Min.iloc[0] #otherwise use upper
return lineStat
def allLNtoNE(arrData, surgeNum): #all of the lines to the North and East during Surge 4
import pandas as pd
LNlist=[]
for num in range(len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][num], NEdestList[surgeNum][num], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def allLNtoSW(arrData, surgeNum): #all of the lines to the South and West during Surge 4
import pandas as pd
LNlist=[]
for num in range(1,1+len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][-num][::-1], SWdestList[surgeNum][-num][::-1], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def WMATAtableSQL(timeMin,intervalSec, surgeNum): #records for timeMin minutes, about ever intervalSec seconds
import time, pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@team<EMAIL>:5432/WmataData') #opens the engine to WmataData
#creates a list of the table we're creating to add to the index
isStart=True
startTime=time.time()
while time.time()<(startTime+60*timeMin): #runs for timeMin minutes
stepStart=time.time()
WMATAdf=saveWMATASQL(JSONfromMetro(getMetroStatus()),engine) #save the current train data and appends the name to tableList
if isinstance(WMATAdf,pd.DataFrame) and len(WMATAdf.index)>0: #if you got data back
if isStart: #and it's the first row
allLN2NE=allLNtoNE(WMATAdf,surgeNum) #set allLNtoNE equal to the all LineNext to NE data
allLN2SW=allLNtoSW(WMATAdf,surgeNum) #set allLNtoSW equal to the all LineNext to SW data
isStart=False #and the next row will not be the first row
else: #for other rows
allLN2NE=allLN2NE.append(allLNtoNE(WMATAdf,surgeNum)) #append the data
allLN2SW=allLN2SW.append(allLNtoSW(WMATAdf,surgeNum))
stepTime=time.time()-stepStart #calculates the time this step took
if stepTime<intervalSec: #if intervalSec seconds have not passed,
time.sleep(intervalSec-stepTime) #wait until a total of intervalSec have passed
engine.connect().close()
return [allLN2NE, allLN2SW]
def lineNextSQL(line, timeString,destList, engine): #reads the next train to arrive at the stations in line heading toward destList and returns it as a Data Frame
import pandas as pd
from sqlalchemy import create_engine
isEngineNone=(engine is None)
if isEngineNone: #if there's not an engine, make one
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT"='+"'"+timeString+"';"
arrData=pd.read_sql(query,engine)
if isEngineNone:
engine.connect().close()
return lineNextDF(line, destList, arrData)
def lineNextTableSQL(line, firstTime, lastTime, destList): #saves the next train arrivals for a line and destList over time
import time, pandas as pd
from sqlalchemy import create_engine
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!<EMAIL>:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
if len(arrData.index)==0:
return None
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=lineNextDF(line, destList, arrData.loc[lambda df: df.DT==timesPD[0]]) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(lineNextDF(line, destList, arrData.loc[lambda df: df.DT==timesPD[num]])) #add the data for that time
engine.connect().close()
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
return lineStats
def allLNtoNEtable(firstTime, lastTime, surgeNum): #saves the next train arrivals for a line and destList over time
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@<EMAIL>:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
if len(arrData.index)==0: #if you didn't get any data,
return None #return nothing
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=allLNtoNE(arrData.loc[lambda df: df.DT==timesPD[0]],surgeNum) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(allLNtoNE(arrData.loc[lambda df: df.DT==timesPD[num]],surgeNum)) #add the data for that time
engine.connect().close()
return lineStats
def allLNtoSWtable(firstTime, lastTime, surgeNum): #saves the next train arrivals for a line and destList over time
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
if len(arrData.index)==0: #if you didn't get any data,
return None #return nothing
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=allLNtoSW(arrData.loc[lambda df: df.DT==timesPD[0]],surgeNum) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(allLNtoSW(arrData.loc[lambda df: df.DT==timesPD[num]],surgeNum)) #add the data for that time
engine.connect().close()
return lineStats
def trainBuild(lineStat,startTime): #determines how long it took the train arriving after startTime to reach every station and returns it as one row data frame
import pandas as pd
timeRow=list(lineStat.index).index(startTime) #finds the row number from lineStat labeled startTime and calls it timeRow
specTrain=pd.concat([pd.DataFrame('-',index=[startTime],columns=['Col']),pd.DataFrame(0,index=[startTime],columns=list(lineStat.columns))], axis=1, join='outer')
while timeRow<len(lineStat.index)-1 and (not isinstance(lineStat.iloc[timeRow][0], str) or len(lineStat.iloc[timeRow][0])<6 or lineStat.iloc[timeRow][0][-3:]!='BRD'): #while timeRow is in bounds and no train is boarding,
timeRow+=1 #go to the next line
skipRows=timeRow-list(lineStat.index).index(startTime) #skipRows is the number of rows to skip the next time it looks for a train
if timeRow>=len(lineStat.index): #if you get to the end,
return [specTrain, skipRows] #just return what you have
specTrain.loc[startTime,'Col']=lineStat.iloc[timeRow][0][:2] #fills in the color, which is stored as the first two letters in the status
timeDif=lineStat.index[timeRow]-startTime #set timeDif to the diffence between arrival at this station and startTime
specTrain.loc[startTime,lineStat.columns[0]]=timeDif.seconds #store timeDif as seconds
for stationNum in range(1,len(lineStat.columns)): #this fills in the difference arrival time for every station
isTrainBoarding=False
while timeRow<(len(lineStat.index)-1) and not isTrainBoarding: #while timeRow is in bounds and the train is not boarding
#The line below says that a train is boarding if either it has status "BRD" or it has status "ARR" and 20 seconds later the station is waiting for a different train
isTrainBoarding=lineStat.iloc[timeRow][stationNum]==(specTrain.loc[startTime,'Col']+":BRD") or (lineStat.iloc[timeRow][stationNum]==(specTrain.loc[startTime,'Col']+":ARR") and (lineStat.iloc[timeRow+1][stationNum][:2]!=specTrain.loc[startTime,'Col']))
timeRow+=1 #go to the next line
if timeRow>=len(lineStat.index)-1: #if you get to the end,
return [specTrain, skipRows] #just return what you have
timeDif=lineStat.index[timeRow]-startTime #set timeDif to the diffence between arrival at this station and startTime
specTrain.loc[startTime,lineStat.columns[stationNum]]=timeDif.seconds #store timeDif as seconds (converted into a string)
if stationNum<len(lineStat.columns)-1: #if you found a trains, go down a certain number of rows before checking the next station
if lineStat.columns[stationNum] in minDist.keys() and lineStat.columns[stationNum+1] in minDist.keys(): #if both stations are in minDist
timeRow+=minDist[lineStat.columns[stationNum]][lineStat.columns[stationNum+1]]['weight'] #go down the number of rows recorded in minDist
else:
timeRow+=2 #if the connection isn't in minDist, go down two rows
if (specTrain.loc[startTime,'Col'].islower() and lineStat.columns[stationNum] in ['A11','B08','E01','K04']) or (specTrain.loc[startTime,'Col']=='Yl' and lineStat.columns[stationNum]=='E05') or (specTrain.loc[startTime,'Col']=='Rd' and lineStat.columns[stationNum]=='A13'):
break
return [specTrain, skipRows]
def trainTable(lineStat): #returns a table listing the trains by start time, color and the time they took to reach a given station
import pandas as pd
[masterTable,rowNum]=trainBuild(lineStat,lineStat.index[0]) #builds the first row and lets it now how many rows to go forward to get to the next train arrival
currentColor=masterTable.iloc[0][0] #record the color of the first train as currentColor
newTrain=masterTable #newTrain just needs to be something for when it's referenced in the if statement
while rowNum<len(lineStat.index):# and newTrain.iloc[0][-1]!=0: #keep going as long as there's data to analyze and each train gets to the end
while rowNum<len(lineStat.index)-1 and lineStat.iloc[rowNum][0]==currentColor+':BRD': #while the train (with currentColor) is boarding,
rowNum+=1 #go to the next row
[newTrain, skipRows]=trainBuild(lineStat,lineStat.index[rowNum]) #once you've gotten to a new train arrival, record it as newTrain and note the rows to skip
masterTable=masterTable.append(newTrain) #append newTrain to the masterTable
currentColor=masterTable.iloc[-1][0] #xchange currentColor to the color of the train that just boarded
rowNum+=skipRows+1 #skip ahead to the next train
return masterTable
def lastBRDtime(newTrainBRDtime, lineStat, stationNum): #finds the last time a train boarded at a given station before newTrainBRDtime
import pandas as pd
timeRow=list(lineStat.index).index(newTrainBRDtime)-2 #start with a time two rows before the train reaches the station
isTrainBoarding=False # the next few lines just say keep moving backwards in time until you get to a train board
while timeRow>0 and not isTrainBoarding: #if you haven't hit the beginning and a train isn't boarding
isTrainBoarding=isinstance(lineStat.iloc[timeRow,stationNum], str) and len(lineStat.iloc[timeRow, stationNum])==6 and lineStat.iloc[timeRow,stationNum][-3:]=='BRD' #a train is boarding if it's a string of length 6 with BRD as the last three letters
timeRow-=1
return lineStat.index[timeRow] #return that time
def trainTableIntermediate(lineStat, stationList): #returns a table listing the trains by start time, color and the time they took to reach a given station, with the possibility that a train started at an intermediary station
import pandas as pd
staNumList=[]
for station in stationList: #turn the list of stations into a list of numbers corresponding to the stations' location in lineStat's columns
staNumList.append(list(lineStat.columns).index(station))
[masterTable,rowNum]=trainBuild(lineStat,lineStat.index[0]) #builds the first row and lets it now how many rows to go forward to get to the next train arrival
currentColor=masterTable.iloc[0][0] #record the color of the first train as currentColor
newTrain=masterTable #newTrain just needs to be something for when it's referenced in the if statement
while rowNum<len(lineStat.index):# and newTrain.iloc[0][-1]!=0: #keep going as long as there's data to analyze and each train gets to the end
while rowNum<len(lineStat.index)-1 and lineStat.iloc[rowNum,0]==currentColor+':BRD': #while the train (with currentColor) is boarding,
rowNum+=1 #go to the next row
[newTrain, skipRows]=trainBuild(lineStat,lineStat.index[rowNum]) #once you've gotten to a new train arrival, record it as newTrain and note the rows to skip
for staNum in staNumList: #for all the intermediary stations in stationList
mostRecentBRDtime=lastBRDtime(newTrain.index[0]+pd.to_timedelta(newTrain.iloc[0,staNum],unit='s'), lineStat, staNum) #find the last train to board at this station
if mostRecentBRDtime>=masterTable.index[-1]+ | pd.to_timedelta(masterTable.iloc[-1,staNum]+42,unit='s') | pandas.to_timedelta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.