prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
'''
Data pipeline for ingestion of 311-data datasets
General sections:
1. ACQUIRE: Download data from source
2. CLEAN: Perform data cleaning and organization before entering into SQL
3. INGEST: Add data set to SQL database
These workflows can be abstracted/encapsulated in order to better generalize
across tasks if necessary.
'''
### 1. ACQUIRE ###
# Code for automated data download goes here
### 2. CLEAN ###
# Load data file from TSV/CSV
### xNOTE: Can encapsulate this workflow and reapply for each data set
dfb = pd.read_table('311data2019.tsv',sep='\t') # For now assume data in this folder
# Format dates as datetime (Time intensive)
dfb['CreatedDate'] = pd.to_datetime(dfb['CreatedDate'])
dfb['ClosedDate'] = pd.to_datetime(dfb['ClosedDate'])
dfb['ServiceDate'] = pd.to_datetime(dfb['ServiceDate'])
# Compute service time
# New columns: closed_created, service_created
dfb['closed_created'] = dfb.ClosedDate-dfb.CreatedDate
dfb['service_created'] = dfb.ServiceDate-dfb.CreatedDate
# drop NA values and reformat closed_created in units of hours
dfb = dfb[~dfb.closed_created.isna()]
# New column: closed_created in units of days
dfb['closed_createdD'] = dfb.closed_created / | pd.Timedelta(days=1) | pandas.Timedelta |
import argparse
import warnings
import logging
import flywheel
import pandas as pd
from fw_heudiconv.backend_funcs.query import get_seq_info
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('fw-heudiconv-tabulator')
def tabulate_bids(client, project_label, path=".", subject_labels=None,
session_labels=None, dry_run=False, unique=True):
"""Writes out a tabular form of the Seq Info objects
Args:
client (Client): The flywheel sdk client
project_label (str): The label of the project
heuristic_path (str): The path to the heuristic file or the name of a
known heuristic
subject_code (str): The subject code
session_label (str): The session label
dry_run (bool): Print the changes, don't apply them on flywheel
"""
logger.info("Querying Flywheel server...")
project_obj = client.projects.find_first('label="{}"'.format(project_label))
assert project_obj, "Project not found! Maybe check spelling...?"
logger.debug('Found project: %s (%s)', project_obj['label'], project_obj.id)
sessions = client.get_project_sessions(project_obj.id)
assert sessions, "No sessions found!"
# filters
if subject_labels:
sessions = [s for s in sessions if s.subject['label'] in subject_labels]
if session_labels:
sessions = [s for s in sessions if s.label in session_labels]
logger.debug('Found sessions:\n\t%s',
"\n\t".join(['%s (%s)' % (ses['label'], ses.id) for ses in sessions]))
# Find SeqInfos to apply the heuristic to
seq_infos = get_seq_info(client, project_label, sessions)
seq_info_dicts = [seq._asdict() for seq in seq_infos]
df = | pd.DataFrame.from_dict(seq_info_dicts) | pandas.DataFrame.from_dict |
"""
Copyright (C) 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from zipfile import ZipFile
import pandas as pd
from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler, MaxAbsScaler
import torch
from torch.utils.data import Dataset
from kaggle.api.kaggle_api_extended import KaggleApi
def split_datetime(data):
tmp = data.copy()
tmp['year'] = pd.Series(data.index.year.values, index=data.index)
tmp['month'] = pd.Series(data.index.month.values, index=data.index)
tmp['day'] = | pd.Series(data.index.day.values, index=data.index) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t2,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t3,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t5,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t6,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t7,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t8,on='merchant_id',how='left')
merchant3_feature.sales_use_coupon = merchant3_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature['merchant_coupon_transfer_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_coupon
merchant3_feature['coupon_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_sales
merchant3_feature.total_coupon = merchant3_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature.to_csv('data/merchant3_feature.csv',index=None)
#for dataset2
merchant2 = feature2[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant2[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant2[merchant2.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant2[merchant2.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant2_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t2,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t3,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t5,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t6,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t7,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t8,on='merchant_id',how='left')
merchant2_feature.sales_use_coupon = merchant2_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature['merchant_coupon_transfer_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_coupon
merchant2_feature['coupon_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_sales
merchant2_feature.total_coupon = merchant2_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature.to_csv('data/merchant2_feature.csv',index=None)
#for dataset1
merchant1 = feature1[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant1[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant1[merchant1.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant1[merchant1.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant1_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t2,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t3,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t5,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t6,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t7,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t8,on='merchant_id',how='left')
merchant1_feature.sales_use_coupon = merchant1_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature['merchant_coupon_transfer_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_coupon
merchant1_feature['coupon_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_sales
merchant1_feature.total_coupon = merchant1_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature.to_csv('data/merchant1_feature.csv',index=None)
############# user related feature #############
"""
3.user related:
count_merchant.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
buy_use_coupon/buy_total
user_date_datereceived_gap
"""
def get_user_date_datereceived_gap(s):
s = s.split(':')
return (date(int(s[0][0:4]),int(s[0][4:6]),int(s[0][6:8])) - date(int(s[1][0:4]),int(s[1][4:6]),int(s[1][6:8]))).days
#for dataset3
user3 = feature3[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user3[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user3[user3.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user3[user3.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user3[user3.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user3[(user3.date_received!='null')&(user3.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user3_feature = pd.merge(t,t1,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t3,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t4,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t5,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t6,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t7,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t8,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t9,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t11,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t12,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t13,on='user_id',how='left')
user3_feature.count_merchant = user3_feature.count_merchant.replace(np.nan,0)
user3_feature.buy_use_coupon = user3_feature.buy_use_coupon.replace(np.nan,0)
user3_feature['buy_use_coupon_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.buy_total.astype('float')
user3_feature['user_coupon_transfer_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.coupon_received.astype('float')
user3_feature.buy_total = user3_feature.buy_total.replace(np.nan,0)
user3_feature.coupon_received = user3_feature.coupon_received.replace(np.nan,0)
user3_feature.to_csv('data/user3_feature.csv',index=None)
#for dataset2
user2 = feature2[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user2[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user2[user2.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user2[user2.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user2[user2.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user2[(user2.date_received!='null')&(user2.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user2_feature = pd.merge(t,t1,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t3,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t4,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t5,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t6,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t7,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t8,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t9,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t11,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t12,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t13,on='user_id',how='left')
user2_feature.count_merchant = user2_feature.count_merchant.replace(np.nan,0)
user2_feature.buy_use_coupon = user2_feature.buy_use_coupon.replace(np.nan,0)
user2_feature['buy_use_coupon_rate'] = user2_feature.buy_use_coupon.astype('float') / user2_feature.buy_total.astype('float')
user2_feature['user_coupon_transfer_rate'] = user2_feature.buy_use_coupon.astype('float') / user2_feature.coupon_received.astype('float')
user2_feature.buy_total = user2_feature.buy_total.replace(np.nan,0)
user2_feature.coupon_received = user2_feature.coupon_received.replace(np.nan,0)
user2_feature.to_csv('data/user2_feature.csv',index=None)
#for dataset1
user1 = feature1[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user1[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user1[user1.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user1[(user1.date!='null')&(user1.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user1[(user1.date!='null')&(user1.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user1[user1.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user1[user1.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user1[(user1.date_received!='null')&(user1.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user1_feature = pd.merge(t,t1,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t3,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t4,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t5,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t6,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t7,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t8,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t9,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t11,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t12,on='user_id',how='left')
user1_feature = pd.merge(user1_feature,t13,on='user_id',how='left')
user1_feature.count_merchant = user1_feature.count_merchant.replace(np.nan,0)
user1_feature.buy_use_coupon = user1_feature.buy_use_coupon.replace(np.nan,0)
user1_feature['buy_use_coupon_rate'] = user1_feature.buy_use_coupon.astype('float') / user1_feature.buy_total.astype('float')
user1_feature['user_coupon_transfer_rate'] = user1_feature.buy_use_coupon.astype('float') / user1_feature.coupon_received.astype('float')
user1_feature.buy_total = user1_feature.buy_total.replace(np.nan,0)
user1_feature.coupon_received = user1_feature.coupon_received.replace(np.nan,0)
user1_feature.to_csv('data/user1_feature.csv',index=None)
################## user_merchant related feature #########################
"""
4.user_merchant:
times_user_buy_merchant_before.
"""
#for dataset3
all_user_merchant = feature3[['user_id','merchant_id']]
all_user_merchant.drop_duplicates(inplace=True)
t = feature3[['user_id','merchant_id','date']]
t = t[t.date!='null'][['user_id','merchant_id']]
t['user_merchant_buy_total'] = 1
t = t.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t.drop_duplicates(inplace=True)
t1 = feature3[['user_id','merchant_id','coupon_id']]
t1 = t1[t1.coupon_id!='null'][['user_id','merchant_id']]
t1['user_merchant_received'] = 1
t1 = t1.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t1.drop_duplicates(inplace=True)
t2 = feature3[['user_id','merchant_id','date','date_received']]
t2 = t2[(t2.date!='null')&(t2.date_received!='null')][['user_id','merchant_id']]
t2['user_merchant_buy_use_coupon'] = 1
t2 = t2.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t2.drop_duplicates(inplace=True)
t3 = feature3[['user_id','merchant_id']]
t3['user_merchant_any'] = 1
t3 = t3.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t3.drop_duplicates(inplace=True)
t4 = feature3[['user_id','merchant_id','date','coupon_id']]
t4 = t4[(t4.date!='null')&(t4.coupon_id=='null')][['user_id','merchant_id']]
t4['user_merchant_buy_common'] = 1
t4 = t4.groupby(['user_id','merchant_id']).agg('sum').reset_index()
t4.drop_duplicates(inplace=True)
user_merchant3 = pd.merge(all_user_merchant,t,on=['user_id','merchant_id'],how='left')
user_merchant3 = | pd.merge(user_merchant3,t1,on=['user_id','merchant_id'],how='left') | pandas.merge |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
#
# 人脸检测和属性分析 WebAPI 接口调用示例
# 运行前:请先填写Appid、APIKey、APISecret以及图片路径
# 运行方法:直接运行 main 即可
# 结果: 控制台输出结果信息
#
# 接口文档(必看):https://www.xfyun.cn/doc/face/xf-face-detect/API.html
#
from datetime import datetime
from wsgiref.handlers import format_date_time
from time import mktime
import hashlib
import base64
import hmac
from urllib.parse import urlencode
import os
import traceback
import pandas as pd
import json
import requests
class AssembleHeaderException(Exception):
def __init__(self, msg):
self.message = msg
class Url:
def __init__(self, host, path, schema):
self.host = host
self.path = path
self.schema = schema
pass
# 进行sha256加密和base64编码
def sha256base64(data):
sha256 = hashlib.sha256()
sha256.update(data)
digest = base64.b64encode(sha256.digest()).decode(encoding='utf-8')
return digest
def parse_url(requset_url):
stidx = requset_url.index("://")
host = requset_url[stidx + 3:]
schema = requset_url[:stidx + 3]
edidx = host.index("/")
if edidx <= 0:
raise AssembleHeaderException("invalid request url:" + requset_url)
path = host[edidx:]
host = host[:edidx]
u = Url(host, path, schema)
return u
def assemble_ws_auth_url(requset_url, method="GET", api_key="", api_secret=""):
u = parse_url(requset_url)
host = u.host
path = u.path
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# print(date)
# date = "Thu, 12 Dec 2019 01:57:27 GMT"
signature_origin = "host: {}\ndate: {}\n{} {} HTTP/1.1".format(host, date, method, path)
# print(signature_origin)
signature_sha = hmac.new(api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
api_key, "hmac-sha256", "host date request-line", signature_sha)
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# print(authorization_origin)
values = {
"host": host,
"date": date,
"authorization": authorization
}
return requset_url + "?" + urlencode(values)
def gen_body(appid, img_path, server_id):
with open(img_path, 'rb') as f:
img_data = f.read()
body = {
"header": {
"app_id": appid,
"status": 3
},
"parameter": {
server_id: {
"service_kind": "face_detect",
# "detect_points": "1", #检测特征点
"detect_property": "1", # 检测人脸属性
"face_detect_result": {
"encoding": "utf8",
"compress": "raw",
"format": "json"
}
}
},
"payload": {
"input1": {
"encoding": "jpg",
"status": 3,
"image": str(base64.b64encode(img_data), 'utf-8')
}
}
}
return json.dumps(body)
def run(appid, apikey, apisecret, img_path, server_id='s67c9c78c'):
url = 'http://api.xf-yun.com/v1/private/{}'.format(server_id)
request_url = assemble_ws_auth_url(url, "POST", apikey, apisecret)
headers = {'content-type': "application/json", 'host': 'api.xf-yun.com', 'app_id': appid}
# print(request_url)
response = requests.post(request_url, data=gen_body(appid, img_path, server_id), headers=headers)
resp_data = json.loads(response.content.decode('utf-8'))
# print(resp_data)
if not resp_data.get('payload'):
return None
return base64.b64decode(resp_data['payload']['face_detect_result']['text']).decode()
def run_once(path):
text = json.loads(run(
appid='a4f4c658',
apisecret='<KEY>',
apikey='28ab5c28403ce4501a87c72bb28057e4',
img_path=path
))
if not text or text['ret'] != 0 or not text.get('face_1'):
print(text['ret'])
return None
return text['face_1']['property']
# 请填写控制台获取的APPID、APISecret、APIKey以及要检测的图片路径
if __name__ == '__main__':
dir_ = "../img_align_celeba"
img_list = sorted(os.listdir(dir_))
d = dict({'name': [], 'gender': [], 'glass': [], 'beard': [], 'hair': [], 'mask': [], 'expression': []})
keys = list(d.keys())
df = pd.DataFrame.from_dict(d)
filename = '../partition/label.txt'
# df.to_csv(filename, header=True, index=False, columns=keys, mode='w')
# 199, 1707, 1864, 1884, 1919, 2407, 2432, 2548, 2657, 3347, 5590, 5735, 5842, 7132, 13043
for i, img in enumerate(img_list):
if i <= 96398:
continue
print("{}/{}: {} ".format(i, len(img_list), img), end="")
try:
res = run_once(dir_ + '/' + img)
except TypeError:
continue
except ConnectionResetError:
continue
if not res:
continue
print("success!")
res['name'] = img
df = | pd.DataFrame(res, index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = | strings.str_contains(values, pat) | pandas.core.strings.str_contains |
"""
utils4text.py is the script file storing many useful functions for processing the comment dataframes from the subreddits.
That is, it is mainly used for text EDA.
Made by <NAME>.
"""
import numpy as np
import pandas as pd
import multiprocess as mp
import re
import nltk
import contractions
import string
from emoji import UNICODE_EMOJI
from itertools import repeat
from collections import Counter
from nltk import pos_tag, word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords, wordnet
from joblib import Parallel, delayed
from profanity_check import predict_prob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def build_convs(df):
"""
Use parallel computing. Consider only one post at each time.
Reconstruct the dataframe to a more conversation-like dataframe.
Arg:
df: A given dataframe scraped from a certain subreddit.
Return:
df_convs: A more conversation-like dataframe with the columns such as
conversation ID, subreddit, post title, author, dialog turn, and text.
"""
# initialize conversation dataframe
df_convs = pd.DataFrame(columns = ['subreddit', 'post title', 'author', 'dialog turn', 'text'])
# consider each post
df_link_id = df.reset_index().drop('index', axis = 1)
row_list = []
convs_turn = 0
# add post from df_link_id
post_row = df_link_id.loc[0, :]
convs_turn += 1
row_list.append({'subreddit': post_row['subreddit'], 'post title': post_row['title'],
'author': post_row['post_author'], 'dialog turn': convs_turn, 'text': post_row['post_content']})
# iterate over each comment from df_link_id
for i, row in df_link_id.iterrows():
convs_turn += 1
row_list.append({'subreddit': row['subreddit'], 'post title': row['title'],
'author': row['comment_author'], 'dialog turn': convs_turn, 'text': row['comment_content']})
df_convs = df_convs.append(pd.DataFrame(row_list))
# change data types
df_convs['dialog turn'] = df_convs['dialog turn'].astype('int32')
return df_convs
def apply_parallel(grouped_df, func):
"""
Parallelize the 'build_convs' function by grouping each post and its comments.
And then concatenate all of them into a complete dataframe.
Arg:
grouped_df: A dataframe on which groupby function is applied.
Return:
pd.concat(retLst): A complete dataframe with the conversation sets between posts and comments.
"""
retLst = Parallel(n_jobs = mp.cpu_count())(delayed(func)(group) for id, group in grouped_df)
return pd.concat(retLst)
def build_concise_convs_df(df_convs, njobs = mp.cpu_count()):
"""
Using the functions, build_convs and apply_parallel, a dataframe with conversation sets
can be easily built. Also the id for each conversation is added.
Arg:
df_convs: The original dataframe consisting of posts and comments parsed from the text files.
Return:
df_convs_concise: The concise version of a dataframe with conversation sets.
"""
df_convs_concise = apply_parallel(df_convs.groupby(df_convs.link_id), build_convs)
df_convs_concise['conversation id'] = (df_convs_concise.groupby(['post title']).cumcount() == 0).astype(int)
df_convs_concise['conversation id'] = df_convs_concise['conversation id'].cumsum()
df_convs_concise = df_convs_concise[['conversation id', 'subreddit', 'post title', 'author', 'dialog turn', 'text']]
df_convs_concise = df_convs_concise.reset_index().drop('index', axis = 1)
return df_convs_concise
def remove_marks(text):
"""
Remove those unnecessary marks inside texts.
Arg:
text: A string that could be either posts or comments.
Return:
new_text: A string which is a clean sentence.
"""
# remove HTML tags
new_text = re.sub('<.*?>', '', text)
# remove URL
new_text = re.sub('http\S+', '', new_text)
# replace number with <NUM> token
new_text = re.sub('\d+', ' NUM ', new_text)
return new_text
def get_wordnet_pos(tag):
"""
Transform a positional tag to its corresponding WordNet format.
Arg:
tag: A positional tag from pos_tag function.
Return:
The associated wordnet format given by a tag.
"""
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def token_lemmatize(token, lemmatizer):
"""
Lemmatize a token to convert a token back to its root form.
When dealing with punctuation marks or emojis, simply return them as usual.
Arg:
token: A word in the string type.
lemmatizer: The object from wordnet lemmatizer.
Return:
token in its root form.
"""
if token == 'NUM':
# keep NUM token as usual
return token
elif token in string.punctuation:
# keep punctuation marks as usual
return token
elif token in UNICODE_EMOJI:
# keep emojis
return token
elif token.isalpha():
# consider English words
token, tag = pos_tag([token])[0][0], pos_tag([token])[0][1]
return lemmatizer.lemmatize(token, get_wordnet_pos(tag))
# else:
# # transform those nonwords as the token NOWORD
# token = 'NONWORD'
# return token
def text_lemmatize(text, lemmatizer):
"""
Apply lemmatization on the raw texts to convert the words in texts back to their root
forms. Before lemmatization, remove unnecessary marks and stopwords to keep only the
meaningful words.
Arg:
text: A string text.
lemmatizer: An object of WordNetLemmatizer.
Return:
lem_words: A list of lemmatized words.
"""
# remove unnecessary marks and tokenize
tokens = word_tokenize(remove_marks(text))
# remove stopwords
filtered_tokens = [word for word in tokens if word not in stopwords.words('english')]
# lemmatize the tokenized texts
lem_words = []
lem_words += list(map(token_lemmatize, filtered_tokens, repeat(lemmatizer)))
return lem_words
def compute_tokens(subreddit_convs_concise):
"""
Given the text data from a subreddit, lemmatize and compute the word tokens using the defined function, text_lemmatize.
Before that, remove the newline tag and expanding the English contraction.
The reason why the progress_bar is set to false is because of Google Colab's memory limitation.
If it's not the problem in your local machine, you could simply convert it to be true to check the processing status.
Arg:
subreddit_convs_concise: A conversation dataframe from a subreddit.
Return:
subreddit_tokens: A series with each row containing a list of word tokens from either post or comment.
"""
# copy the text column from original dataframe
subreddit_text = subreddit_convs_concise['text'].copy()
# expanding contraction
subreddit_text = subreddit_text.swifter.progress_bar(False).apply(lambda text: text.replace('\n', ' '))\
.swifter.progress_bar(False).apply(lambda text: ' '.join([contractions.fix(word) for word in text.split()]))
# lemmatize
lemmatizer = WordNetLemmatizer()
subreddit_tokens = subreddit_text.swifter.progress_bar(False).apply(lambda text: text_lemmatize(text, lemmatizer))
return subreddit_tokens
def compute_turn_distribution(df):
"""
Given a conversation dataframe from a subreddit (note that the dataframe is in the concise format indicated by Supervisor),
find out the dialog turn distribution.
Arg:
df: A conversation dataframe from a subreddit.
Return:
turn_dist: A series about dialog turn distribution.
"""
turn_dist = df.groupby('conversation id').size().value_counts().sort_index()
turn_dist = | pd.DataFrame(turn_dist) | pandas.DataFrame |
import datetime
import functools
import os
from urllib.parse import urljoin
import arcgis
import geopandas
import numpy
import pandas
import requests
from airflow import DAG
from airflow.hooks.base_hook import BaseHook
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.utils.email import send_email
from arcgis.gis import GIS
API_BASE_URL = "https://api2.gethelp.com/v1/"
FACILITIES_ID = "dd618cab800549358bac01bf218406e4"
STATS_ID = "9db2e26c98134fae9a6f5c154a1e9ac9"
TIMESERIES_ID = "bd17014f8a954681be8c383acdb6c808"
COUNCIL_DISTRICTS = (
"https://opendata.arcgis.com/datasets/"
"76104f230e384f38871eb3c4782f903d_13.geojson"
)
def download_council_districts():
r = requests.get(COUNCIL_DISTRICTS)
fname = "/tmp/council-districts.geojson"
with open(fname, "wb") as f:
f.write(r.content)
return fname
def coerce_integer(df):
"""
Loop through the columns of a df, if it is numeric,
convert it to integer and fill nans with zeros.
This is somewhat heavy-handed in an attempt to force
Esri to recognize sparse columns as integers.
"""
# Numeric columns to not coerce to integer
EXCEPT = ["latitude", "longitude", "zipCode"]
def numeric_column_to_int(series):
return (
series.fillna(0).astype(int)
if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT
else series
)
return df.transform(numeric_column_to_int, axis=0)
def upload_to_esri(df, layer_id, filename="/tmp/df.csv"):
"""
A quick helper function to upload a data frame
to ESRI as a featurelayer backed CSV
recommend: no geometries, lat/long columns
remember ESRI is UTC only.
"""
df.to_csv(filename, index=False)
# Login to ArcGIS
arcconnection = BaseHook.get_connection("arcgis")
arcuser = arcconnection.login
arcpassword = arcconnection.password
gis = GIS("http://lahub.maps.arcgis.com", username=arcuser, password=arcpassword)
gis_item = gis.content.get(layer_id)
gis_layer_collection = arcgis.features.FeatureLayerCollection.fromitem(gis_item)
gis_layer_collection.manager.overwrite(filename)
os.remove(filename)
return True
def make_get_help_request(api_path, token, params={}, paginated=True):
"""
Makes an API request to the GetHelp platform.
Also handles depagination of long responses.
Parameters
==========
api_path: string
The path to query
token: string
The OAuth bearer token
params: dict
Any additional query parameters to pass
paginated: boolean
Whether the response is expected to be a list of paginated results
with a "content" field. In this case, the function will depaginate
the results. If false, it will return the raw JSON.
Returns
=======
The depaginated JSON response in the "content" field, or the raw JSON response.
"""
endpoint = urljoin(API_BASE_URL, api_path)
if paginated:
content = []
page = 0
while True:
r = requests.get(
endpoint,
headers={"Authorization": f"Bearer {token}"},
params=dict(page=page, **params),
)
res = r.json()
content = content + res["content"]
if res["last"] is True:
break
else:
page = page + 1
return content
else:
r = requests.get(
endpoint, headers={"Authorization": f"Bearer {token}"}, params=params,
)
return r.json()
def get_facilities():
"""
Get the current facilties and their status.
Returns
=======
A dataframe with the current facilities.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request("facility-groups/1/facilities", TOKEN)
df = pandas.io.json.json_normalize(res)
df = pandas.concat(
[df, df.apply(lambda x: get_client_stats(x["id"]), axis=1)], axis=1,
)
df = pandas.concat(
[df, df.apply(lambda x: get_facility_program_status(x["id"]), axis=1)], axis=1,
)
council_districts = geopandas.read_file(
download_council_districts(), driver="GeoJSON"
)[["geometry", "District"]]
df = geopandas.GeoDataFrame(
df,
geometry=geopandas.points_from_xy(df.longitude, df.latitude),
crs={"init": "epsg:4326"},
)
df = df.assign(
district=df.apply(
lambda x: council_districts[council_districts.contains(x.geometry)]
.iloc[0]
.District,
axis=1,
)
).drop(columns=["geometry"])
return df
def get_client_stats(facility_id):
"""
Given a facility ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
Returns
=======
A pandas.Series with the client statistics for the facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/client-statistics", TOKEN, paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def get_program_client_stats(facility_id, program_id):
"""
Given a facility ID and a program ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
program_id: int
The program ID
Returns
=======
A pandas.Series with the client statistics for the facility program.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/facility-programs/{program_id}/client-statistics",
TOKEN,
paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def agg_facility_programs(facility_id, program_list, match, prefix):
"""
Aggregate the current bed occupancy data for a list of programs,
filtering by program name.
Parameters
==========
facility_id: int
The facility id.
program_list: list
A list of programs of the shape returned by the GetHelp
facility-programs endpoint.
match: str
A string which is tested for inclusion in a program name
to decide whether to include a program in the statistics.
prefix:
A string to prefix series labels with.
Returns
=======
A pandas.Series with the aggregated statistics for the matching facility programs.
"""
# A sentinel timestamp which is used to determine whether
# any programs actually matched.
sentinel = pandas.Timestamp("2020-01-01T00:00:00Z")
last_updated = functools.reduce(
lambda x, y: (
max(x, pandas.Timestamp(y["lastUpdated"]))
if match in y["name"].lower()
else x
),
program_list,
sentinel,
)
if last_updated == sentinel:
# No programs matched, return early
return None
occupied = functools.reduce(
lambda x, y: x
+ (y["bedsOccupied"] + y["bedsPending"] if match in y["name"].lower() else 0),
program_list,
0,
)
total = functools.reduce(
lambda x, y: x + (y["bedsTotal"] if match in y["name"].lower() else 0),
program_list,
0,
)
available = total - occupied
client_stats = functools.reduce(
lambda x, y: x.add(
get_program_client_stats(facility_id, y["id"]), fill_value=0,
)
if match in y["name"].lower()
else x,
program_list,
pandas.Series(),
)
return pandas.Series(
{
prefix + "occupied": occupied,
prefix + "available": available,
prefix + "last_updated": last_updated,
}
).append(client_stats.rename(lambda x: prefix + x))
def get_facility_program_status(facility_id):
"""
Get the most recent status for a facility, broken
up into shelter beds, trailers, and safe parking.
Parameters
==========
facility_id: int
The facility ID.
Returns
=======
A pandas.Series with program statistics for shelter beds, safe
parking, and trailer beds.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
shelter_beds = agg_facility_programs(
facility_id, res, "shelter bed", "shelter_beds_"
)
isolation = agg_facility_programs(facility_id, res, "isolation", "isolation_")
trailers = agg_facility_programs(facility_id, res, "trailer", "trailers_")
safe_parking = agg_facility_programs(facility_id, res, "parking", "safe_parking_")
return pandas.concat([shelter_beds, isolation, trailers, safe_parking])
def get_facility_history(facility_id, start_date=None, end_date=None):
"""
Get the history stats of a given facility by ID.
Parameters
==========
facility_id: int
The ID of the facility.
start_date: datetime.date
The start date of the history (defaults to April 8, 2020)
end_date: datetme.date
The end date of the history (defaults to the present day)
Returns
=======
A dataframe with the history for the given facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
start_date = start_date or datetime.date(2020, 4, 8)
end_date = end_date or pandas.Timestamp.now(tz="US/Pacific").date()
# Get the shelter bed program ID
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
programs = pandas.io.json.json_normalize(res)
history = | pandas.DataFrame() | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import lib_plot
from lib_db import DBClient, NodeClassification
from lib_fmt import fmt_barplot, fmt_thousands
from lib_agent import agent_name, go_ipfs_version, go_ipfs_v08_version
def main(client: DBClient):
sns.set_theme()
def plot_agent(results, plot_name):
results_df = | pd.DataFrame(results, columns=['agent_version', 'count']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import os
import sys
import argparse
import pandas as pd
from scipy import stats
sys.path.append(os.path.abspath(".."))
from survey._app import CODE_DIR, app
from core.models.metrics import gain_mean, rejection_ratio, gain
from utils import get_con_and_dfs, get_all_con_and_dfs
import metrics
STATS_FUNCTIONS = {}
# overwritten by a command line flag. If true, percentage will be generated instead of frequency
USE_PERCENTAGE = None
USE_LABELS = None
SELECTION = None
AI_FEEDBACK_ACCURACY_SCALAS = {
"ai_much_worse": "AI much worse than PROPOSER",
"ai_worse": "AI worse",
"ai_sligthly_worse": "AI slighly worse",
"ai_equal_to_proposer": "AI equal to PROPOSER",
"ai_slighly_better": "AI slighly better",
"ai_better": "AI better",
"ai_much_better": "AI much better than the PROPOSER",
}
AI_FEEDBACK_SCALAS = {
1: "strongly_disagree",
2: "disagree",
3: "slightly_disagree",
4: "neutral",
5: "slightly_agree",
6: "agree",
7: "strongly_agree"
}
def get_parser():
parser = argparse.ArgumentParser(description='Generate statistics for a given treatment')
parser.add_argument('--use-percentage', help='Generate percentages instead of frequencies', action='store_true')
parser.add_argument('--use-latex', help='Print results as latex table', action='store_true')
parser.add_argument('--use-labels', help='Print results using description labels', action='store_true')
parser.add_argument('--output-dir', help='Output directory where csv files were exported')
parser.add_argument('--selection', help='Whether to restrict the stats to responder or proposers', choices=['prop', 'resp'])
parser.add_argument('treatments', help='Comma separated treatments')
return parser
ALL_CONS, ALL_DFS = get_all_con_and_dfs()
def mark_for_stats(label=None):
def _mark_for_stats(function, label=label):
if label is None:
label = function.__name__[4:]
STATS_FUNCTIONS[label] = function
return function
return _mark_for_stats
def get_count_participants(treatment, con, dfs=None, use_percentage=None, use_labels=None):
sql = f"""
select * from result__{treatment}_survey
where worker_id not in (
select worker_id from main__txx where worker_code == 'dropped'
)
"""
if SELECTION == "resp":
sql = f"""
select worker_id from result__{treatment}_resp
"""
elif SELECTION == "prop":
sql = f"""
select worker_id from result__{treatment}_prop
"""
else:
sql = f"""
select * from (
select worker_id from result__{treatment}_resp
union
select worker_id from result__{treatment}_prop
)
"""
df = pd.read_sql(sql, con)
return df.shape[0]
@mark_for_stats()
def get_count(treatment, con, dfs=None, use_percentage=None, use_labels=None):
sql = f"""
select * from result__{treatment}_survey
where worker_id in (
select worker_id from main__txx where worker_code != 'dropped'
)
"""
if SELECTION == "resp":
sql += f"""and
worker_id in (
select worker_id from result__{treatment}_resp
)
"""
elif SELECTION == "prop":
sql += f"""and
worker_id in (
select worker_id from result__{treatment}_prop
)
"""
df = pd.read_sql(sql, con)
count_stats = df.shape[0]
count_all = get_count_participants(treatment, con, dfs, use_percentage, use_labels)
return {"count": f"{count_stats} ({count_all}*)"}
RESPONDERS = {
"t00": "t00",
"t10a": "t10a",
"t10b": "t10a",
"t11a": "t11a",
"t11b": "t11a",
"t11c": "t11a",
"t20": "t20a",
"t20a": "t20a",
}
PROPOSERS = {
"t00": "t00",
"t10a": "t10a",
"t11a": "t11a",
"t12a": "t10a",
"t13a": "t10a",
"t20a": "t10a",
}
def get_prop_resp(treatment):
if SELECTION == "prop":
df_prop = ALL_DFS[f"result__{treatment}_prop"].copy()
if treatment in {"t20", "t20a"}:
df_prop["offer"] = df_prop["ai_offer"]
df_prop["offer_dss"] = df_prop["ai_offer"]
df_prop["offer_final"] = df_prop["ai_offer"]
df_resp = ALL_DFS[f"result__{RESPONDERS[treatment]}_resp"].copy()
elif SELECTION == "resp":
df_resp = ALL_DFS[f"result__{treatment}_resp"].copy()
df_prop = ALL_DFS[f"result__{PROPOSERS[treatment]}_prop"].copy()
if "offer_dss" not in df_prop.columns:
df_prop["offer_dss"] = df_prop["offer"]
df_prop["offer_final"] = df_prop["offer"]
if "min_offer_dss" not in df_resp.columns:
df_resp["min_offer_dss"] = df_resp["min_offer"]
df_resp["min_offer_final"] = df_resp["min_offer"]
size = min(df_prop.shape[0], df_resp.shape[0])
df_prop = df_prop.head(size)
df_resp = df_resp.head(size)
return df_prop, df_resp
def _get_prop_vs_prop_dss_score(treatment, con, dfs=None, use_percentage=None, use_labels=None, metric=None, as_percentage=None):
df_prop, df_resp = get_prop_resp(treatment)
if as_percentage:
res = {
"Proposer": f'{round(100 * metric(df_resp["min_offer"], df_prop["offer"]), 2)} %',
"Proposer + DSS": f'{round(100 * metric(df_resp["min_offer"], df_prop["offer_dss"]), 2)} %',
"Auto DSS": f'{round(100 * metric(df_resp["min_offer"], df_prop["ai_offer"]), 2)} %',
}
else:
res = {
"Proposer": f'{round(metric(df_resp["min_offer"], df_prop["offer"]), 2)}',
"Proposer + DSS": f'{round(metric(df_resp["min_offer"], df_prop["offer_dss"]), 2)}',
"Auto DSS": f'{round(metric(df_resp["min_offer"], df_prop["ai_offer"]), 2)}',
}
return res
# @mark_for_stats()
def get_rejection_ratio(treatment, con, dfs=None, use_percentage=None, use_labels=None):
return _get_prop_vs_prop_dss_score(treatment, con, dfs, use_percentage, use_labels, metric=rejection_ratio, as_percentage=True)
@mark_for_stats()
def get_rel_responder_min_offer(treatment, con, dfs=None, use_percentage=None, use_labels=None):
if SELECTION != "resp":
return
df_prop, df_resp = get_prop_resp(treatment)
df_prop[df_resp.columns] = df_resp
_, df_resp_t00 = get_prop_resp("t00")
resp_stat = stats.ttest_rel(df_resp["min_offer"], df_resp["min_offer_final"])
print("REF to t00", stats.ttest_ind(get_prop_resp("t00")[0]["min_offer"], df_resp["min_offer_final"]))
resp_stat_t00 = stats.ttest_ind(df_resp["min_offer_final"], df_resp_t00["min_offer"])
resp_wc_stat = stats.wilcoxon(df_resp["min_offer"], df_resp["min_offer_final"])
res = {
"n": df_resp["min_offer"].shape[0],
"mean (initial)": metrics.get_mean(df_resp["min_offer"]),
"mean": metrics.get_mean(df_resp["min_offer_final"]),
"median": df_resp["min_offer_final"].median(),
"mode": df_resp["min_offer_final"].mode()[0],
"standard deviation": metrics.get_std(df_resp["min_offer"]),
"standard deviation": metrics.get_std(df_resp["min_offer_final"]),
# "rejection_ratio": rejection_ratio(df_prop)
"stat": resp_stat[0],
"p-value": resp_stat[1],
"stat-t00": resp_stat_t00[0],
"p-value-t00": resp_stat_t00[1],
"stat-wc": resp_wc_stat[0],
"p-value-wc": resp_wc_stat[1]
}
return {k: (f"{v:.3f}" if | pd.notnull(v) | pandas.notnull |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.pandas.typedef.typehints import (
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.sql.types import BooleanType
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BooleanOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([True, True, False])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def float_pser(self):
return pd.Series([1, 2, 3], dtype=float)
@property
def float_psser(self):
return ps.from_pandas(self.float_pser)
@property
def other_pser(self):
return | pd.Series([False, False, True]) | pandas.Series |
import datetime
import logging
import random
import re
import time
from typing import Iterator, List, Union, Dict
from urllib.parse import quote
import pandas as pd
import requests
from bs4 import BeautifulSoup
from .conn_postgresql import ConnPostgreSQL
log = logging.getLogger(__name__)
class HhParser:
"""Парсер hh.ru."""
def __init__(self, area: int, search_period: int, search_text: str, search_regex: str) -> None:
"""
:param area: Регион поиска (1 - Москва)
:param search_period: Период поиска в днях
:param search_text: Поисквовый запрос
:param search_regex: Уточняющая регулярка для названия вакансии
"""
self.__area = area
self.__search_period = search_period
self.__search_text = search_text
self.__search_regex = search_regex
self.__base_url = 'https://hh.ru/search/vacancy'
self.__url_params = {
'search_period': self.__search_period,
'clusters': 'true',
'area': self.__area,
'text': quote(self.__search_text),
'enable_snippets': 'true',
'page': 0
}
self.__session = requests.Session()
self.__headers = {
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/76.0.3809.100 Safari/537.36'
}
area = property(lambda self: self.__area)
search_period = property(lambda self: self.__search_period)
search_text = property(lambda self: self.__search_text)
search_regex = property(lambda self: self.__search_regex)
class HhParserResults:
"""Результаты парсинга hh."""
def __init__(self, data: pd.DataFrame) -> None:
"""
:param data: Данные парсинга
"""
self.__data = data
self.area = None
self.search_period = None
self.search_text = None
self.search_regex = None
self.parse_duration = None
self.df_parsing_results = None
self.df_current_jobs = None
self.df_unique_jobs = None
self.df_unique_closed_jobs = None
data = property(lambda self: self.__data)
@staticmethod
def _get_url_with_params(url: str, params: dict) -> str:
"""
Сформируй URL с параметрами.
:param url: URL
:param params: Параметры URL
"""
return f'{url}?' + '&'.join([f'{k}={v}' for k, v in params.items()])
def _get_urls_pages_with_vacancies(self) -> Iterator[str]:
"""Получи URL страниц с вакансиями."""
start_url = self._get_url_with_params(self.__base_url, self.__url_params)
urls = [start_url]
response = self.__exponential_backoff(start_url)
if response is not False:
result = BeautifulSoup(response.content, 'lxml')
pages = result.find_all('a', attrs={'data-qa': 'pager-page'})
page_count = int(pages[-1].text)
url_params = self.__url_params
for i in range(page_count - 1):
url_params['page'] = i + 1
urls.append(self._get_url_with_params(self.__base_url, url_params))
log.info(f'Found {len(urls)} pages with "{self.__search_text}" vacancies')
yield from urls
else:
log.error(f'Start request failed')
raise RuntimeError('Request failed')
def run(self) -> HhParserResults:
"""Запусти парсер."""
time_start = time.monotonic()
log.info(f'Looking for "{self.__search_text}" vacancies on hh.ru...')
vacancies_pages_urls = self._get_urls_pages_with_vacancies()
raw_vacancies_data = []
url_counter = 1
for url in vacancies_pages_urls:
log.info(f'Parsing page {url_counter}...')
response = self.__exponential_backoff(url)
if response is not False:
result = BeautifulSoup(response.content, 'lxml')
vacancies_divs = result.find_all('div', attrs={
'data-qa': 'vacancy-serp__vacancy'
})
premium_vacancies_divs = result.find_all('div', attrs={
'data-qa': 'vacancy-serp__vacancy vacancy-serp__vacancy_premium'
})
vacancies_data = self._get_data_from_divs(vacancies_divs)
premium_vacancies_data = self._get_data_from_divs(premium_vacancies_divs)
raw_vacancies_data += vacancies_data + premium_vacancies_data
else:
log.error(f'Request failed')
raise RuntimeError('Request failed')
url_counter += 1
df = pd.DataFrame(raw_vacancies_data)
if len(df) == 0:
log.error(f'No results found for settings: area={self.__area}, period={self.__search_period}, '
f'text={self.__search_text}, specifying_regex={self.__search_regex}')
raise RuntimeError('No results found')
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df[['date', 'title', 'salary', 'company', 'href']].sort_values(by='date', ascending=False)
parse_duration = round(time.monotonic() - time_start, 2)
log.info(f'Found {len(df)} vacancies in {parse_duration} seconds')
results = self.HhParserResults(df)
results.parse_duration = parse_duration
results.area = self.__area
results.search_period = self.__search_period
results.search_text = self.__search_text
results.search_regex = self.__search_regex
return results
def _vacancy_name_check(self, title: str) -> bool:
"""
Проверь название вакансии уточняющим регулярным выражением.
:param title: Название вакансии
"""
if re.search(self.__search_regex, title, flags=re.IGNORECASE):
return True
return False
@staticmethod
def _process_date(raw_date: str) -> str:
"""
Преобразуй дату публикации вакансии.
:param raw_date: Дата из вакансии
"""
date_dict = {
'января': '01',
'февраля': '02',
'марта': '03',
'апреля': '04',
'мая': '05',
'июня': '06',
'июля': '07',
'августа': '08',
'сентября': '09',
'октября': '10',
'ноября': '11',
'декабря': '12'
}
date_arr = raw_date.split(' ')
for i in range(len(date_arr)):
try:
date_arr[i] = date_dict[date_arr[i]]
except KeyError:
pass
# Добавляем год к дате
date_arr.append(str(datetime.datetime.now().year))
if datetime.datetime.strptime('.'.join(date_arr), '%d.%m.%Y') > datetime.datetime.now():
date_arr[-1] = str(datetime.datetime.now().year - 1)
return '.'.join(date_arr)
def _get_data_from_divs(self, divs: List) -> List[dict]:
"""
Получи данные из блоков с вакансиями.
:param divs: Блоки с вакансиями
"""
results = []
for div in divs:
title = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'}).text
if not self._vacancy_name_check(title):
continue
company_data = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-employer'})
company = company_data.text if company_data else 'Не определено'
href = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'}).get('href')
date = self._process_date(
div.find('span', attrs={'class': 'vacancy-serp-item__publication-date'}).text.replace('\xa0', ' ')
)
salary_data = div.find('span', attrs={'data-qa': 'vacancy-serp__vacancy-compensation'})
salary = salary_data.text.replace('\xa0', '') if salary_data else 'Не указано'
results.append({'title': title, 'company': company, 'salary': salary, 'date': date, 'href': href})
return results
def __exponential_backoff(self, url: str) -> Union[requests.Response, bool]:
"""
Экспоненциальная выдержка для 403, 500 и 503 ошибки.
:param url: URL запроса
:return: Ответ сервера или False при ошибке
"""
for n in range(0, 5):
log.debug(f'GET request to URL {url}')
response = self.__session.get(url, headers=self.__headers)
if response.status_code in [403, 500, 503]:
log.debug(f'HTTP error: {response.status_code}. Trying again. Attempt {n + 1}')
time.sleep((2 ** n) + random.random())
elif response.status_code == 200:
return response
else:
log.error(f'HTTP error {response.status_code} during requesting URL: {url}')
return False
log.error(f'Failed request URL {url} in 5 attempts')
return False
class HhParserResultsProcessor:
"""Обработка результатов парсинга."""
def __init__(self, hh_parsed_data: HhParser.HhParserResults, pg_conn=ConnPostgreSQL) -> None:
"""
:param hh_parsed_data: Результаты парсинга
:param pg_conn: Активное подключение к PostgreSQL
"""
self.__hh_parsed_data = hh_parsed_data
self.__df = hh_parsed_data.data
self.__parsing_duration = hh_parsed_data.parse_duration
self.__pg_conn = pg_conn
hh_parsed_data = property(lambda self: self.__hh_parsed_data)
report_folder = property(lambda self: self.__report_folder)
def run(self) -> HhParser.HhParserResults:
"""Запусти обработку результатов парсинга."""
self._get_parsing_results_df()
self._get_current_jobs_df()
self._get_unique_jobs_df()
self._get_unique_closed_jobs_df()
return self.__hh_parsed_data
def _find_jobs_without_salary(self) -> Dict[str, Union[int, float]]:
"""Найди % вакансий без указания зарплаты."""
unknown_salary_count = self.__df.loc[self.__df['salary'] == 'Не указано']['salary'].count()
unknown_salary_percent = round((unknown_salary_count / len(self.__df)) * 100, 2)
log.info(f'Jobs without salary: {unknown_salary_percent}%')
return {'jobs_without_salary': unknown_salary_percent}
def _find_salary_mean_and_median(self) -> Dict[str, Union[int, float]]:
"""Найди медианную, среднюю, среднюю максимальную и средней минимальную зарплаты."""
salaries_min = []
salaries_max = []
for i in range(len(self.__df)):
# Указана зарплата "от"
if self.__df.loc[i, 'salary'].split()[0] == 'от':
salaries_min.append(int(self.__df.loc[i, 'salary'].split()[1]))
# Указана зарплата "до"
elif self.__df.loc[i, 'salary'].split()[0] == 'до':
salaries_max.append(int(self.__df.loc[i, 'salary'].split()[1]))
# Указана вилка зарплаты
elif len(self.__df.loc[i, 'salary'].split()[0].split('-')) == 2:
fork = self.__df.loc[i, 'salary'].split()[0].split('-')
salaries_min.append(int(fork[0]))
salaries_max.append(int(fork[1]))
# Зарплата не указана
elif self.__df.loc[i, 'salary'] == 'Не указано':
pass
# Указана фиксированная зарплата
else:
salaries_min.append(int(self.__df.loc[i, 'salary'].split()[0]))
salaries_max.append(int(self.__df.loc[i, 'salary'].split()[0]))
salaries_all = salaries_min + salaries_max
salary_mean = round(pd.Series(salaries_all).mean())
salary_median = round(pd.Series(salaries_all).median())
min_salary_mean = round(pd.Series(salaries_min).mean())
max_salary_mean = round(pd.Series(salaries_max).mean())
log.info(f'Mean salary: {salary_mean}, median salary: {salary_median}, mean min salary: {min_salary_mean}, '
f'mean max salary: {max_salary_mean}')
return {'salary_mean': salary_mean,
'salary_median': salary_median,
'min_salary_mean': min_salary_mean,
'max_salary_mean': max_salary_mean}
def _get_parsing_results_df(self) -> None:
"""Сформируй датафрейм для таблицы "parsing_results"."""
data_for_update = {}
data_for_update.update(self._find_jobs_without_salary())
data_for_update.update(self._find_salary_mean_and_median())
data_for_update.update({'jobs_count': len(self.__df),
'date': datetime.datetime.now().strftime("%Y-%m-%d"),
'time_parse': self.__parsing_duration})
df = pd.DataFrame([data_for_update])
df['date'] = pd.to_datetime(df['date'])
self.__hh_parsed_data.df_parsing_results = df
log.info(f'DataFrame for "parsing_results" table generated')
def _get_current_jobs_df(self) -> None:
"""Сформируй датафрейм для таблицы "current_jobs"."""
min_salary = []
max_salary = []
df = self.__df.copy().reset_index(drop=True)
for i in range(len(df)):
# Указана зарплата "от"
if df.loc[i, 'salary'].split()[0] == 'от':
min_salary.append(int(df.loc[i, 'salary'].split()[1]))
max_salary.append(int(df.loc[i, 'salary'].split()[1]))
# Укащана зарплата "до"
elif df.loc[i, 'salary'].split()[0] == 'до':
min_salary.append(0)
max_salary.append(int(df.loc[i, 'salary'].split()[1]))
# Указана вилка зарплаты
elif len(df.loc[i, 'salary'].split()[0].split('-')) == 2:
fork = df.loc[i, 'salary'].split()[0].split('-')
min_salary.append(int(fork[0]))
max_salary.append(int(fork[1]))
# Зарплата не указана
elif df.loc[i, 'salary'] == 'Не указано':
min_salary.append(0)
max_salary.append(0)
# Указана фиксированная зарплата
else:
min_salary.append(int(df.loc[i, 'salary'].split()[0]))
max_salary.append(int(df.loc[i, 'salary'].split()[0]))
df['min_salary'] = min_salary
df['max_salary'] = max_salary
df['mean_salary'] = (df['min_salary'] + df['max_salary']) / 2
df = df.sort_values(['mean_salary', 'max_salary', 'min_salary'], ascending=False).reset_index(drop=True)
df['row'] = list(range(1, len(df) + 1))
self.__hh_parsed_data.df_current_jobs = df[['row', 'date', 'title', 'company', 'salary', 'href']]
log.info(f'DataFrame for "current_jobs" table generated')
def _get_unique_jobs_merged_df(self) -> pd.DataFrame:
"""Получи сджойненый датафрейм уникальных вакансий из Postgres и результатов парсинга."""
pg_unique_jobs_raw = self.__pg_conn.get_table(table_name='unique_jobs')
pg_unique_jobs = self._get_df_from_pgtable(pg_unique_jobs_raw)
if pg_unique_jobs is None or pg_unique_jobs.empty:
pg_unique_jobs = pd.DataFrame.from_dict({'date': [], 'href': []})
pg_unique_jobs['date'] = pd.to_datetime(pg_unique_jobs['date'])
pg_unique_jobs['href'] = pg_unique_jobs['href'].astype(str)
r = | pd.merge(pg_unique_jobs, self.__df[['date', 'href']], on='href', how='outer') | pandas.merge |
import argparse
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib as mpl
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
from processing_helpers import *
#from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
today = datetime.today()
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--cdph_date",
type=str,
help="i.e 20201026",
default=None
)
return parser.parse_args()
def plot_emresource(ems_list, scale= '', channels = None, palette = None, add_grid = True):
if channels == None:
channels = ['ICU conf', 'non ICU', 'CLI admissions']
if palette == None:
palette = ('#913058', "#F6851F", "#00A08A", "#D61B5A", "#5393C3", "#F1A31F", "#98B548", "#8971B3", "#969696")
ref_df = load_ref_df(ems_nr=list(ems_list))
ref_df = ref_df.sort_values(['covid_region', 'date'])
ref_df = ref_df[(ref_df['date'] >= first_plot_day) & (ref_df['date'] <= last_plot_day)]
ref_df = ref_df.rename(columns={
'confirmed_covid_deaths_prev_24h' : 'deaths',
'confirmed_covid_icu' : 'ICU conf',
'confirmed_covid_on_vents' : 'vents conf',
'suspected_and_confirmed_covid_icu' : 'ICU conf+susp',
'covid_non_icu' : 'non ICU',
'inpatient': 'CLI admissions',
'new_confirmed_cases': 'Confirmed cases (public)'
})
ref_df = ref_df[['date', 'covid_region'] + channels]
if len(ems_list) == 2:
fig = plt.figure(figsize=(12, 5))
fig.subplots_adjust(right=0.97, left=0.07, hspace=0.15, top=0.95, bottom=0.01)
axes = [fig.add_subplot(1, 2, x + 1) for x in range(len(ems_list))]
else:
fig = plt.figure(figsize=(14, 12))
fig.subplots_adjust(right=0.97, wspace=0.2, left=0.1, hspace=0.5, top=0.95, bottom=0.07)
axes = [fig.add_subplot(4, 3, x + 1) for x in range(len(ems_list))]
for ei, ems in enumerate(ems_list):
ax = axes[ei]
if add_grid:
ax.grid(b=True, which='major', color='#999999', linestyle='-', alpha=0.3)
df = ref_df[ref_df['covid_region'] == ems]
for (c, name) in enumerate(channels):
df['moving_ave'] = df[name].rolling(window=7, center=True).mean()
ax.plot(df['date'].values, df['moving_ave'], color=palette[c], label=name)
ax.scatter(df['date'].values, df[name], s=10, linewidth=0, color=palette[c], alpha=0.7, label='')
ax.set_title('covid region %d' % ems)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b-%d'))
#ax.set_ylim(0, df[name].max())
if scale == 'log':
ax.set_yscale('log')
if ei+1 == len(ems_list):
ax.legend(loc='upper left', shadow=False, ncol=1)
regions = '-'.join(map(str, ems_list))
fig.savefig(os.path.join(plot_path, f'EMResource_and_CLI_by_covidregion.png'))
fig.savefig(os.path.join(plot_path, f'EMResource_and_CLI_by_covidregion.pdf'), format='PDF')
if __name__ == '__main__' :
first_plot_day = pd.to_datetime(date(2021, 2, 1))
last_plot_day = | pd.to_datetime(today) | pandas.to_datetime |
from copy import deepcopy
import numpy as np
import pandas as pd
import torch as t
import torch.nn as nn
from scipy import constants, linalg
from pyhdx.fileIO import dataframe_to_file
from pyhdx.models import Protein
from pyhdx.config import cfg
# TORCH_DTYPE = t.double
# TORCH_DEVICE = t.device('cpu')
class DeltaGFit(nn.Module):
def __init__(self, deltaG):
super(DeltaGFit, self).__init__()
#self.deltaG = deltaG
self.register_parameter(name='deltaG', param=nn.Parameter(deltaG))
def forward(self, temperature, X, k_int, timepoints):
"""
# inputs, list of:
temperatures: scalar (1,)
X (N_peptides, N_residues)
k_int: (N_peptides, 1)
"""
pfact = t.exp(self.deltaG / (constants.R * temperature))
uptake = 1 - t.exp(-t.matmul((k_int / (1 + pfact)), timepoints))
return t.matmul(X, uptake)
def estimate_errors(hdxm, deltaG):
"""
Calculate covariances and uncertainty (perr, experimental)
Parameters
----------
hdxm : :class:`~pyhdx.models.HDXMeasurement`
deltaG : :class:`~numpy.ndarray`
Array with deltaG values.
Returns
-------
"""
dtype = t.float64
joined = | pd.concat([deltaG, hdxm.coverage['exchanges']], axis=1, keys=['dG', 'ex']) | pandas.concat |
"""Exemplo de python."""
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def plot_matplotlib(var):
fig = plt.figure(figsize=(16, 9))
plt.hist(var, bins=100)
plt.show()
def plot_pandas(var):
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111)
var.plot.hist(bins=100, ax=ax)
plt.show()
def plot_seaborn(var):
"""Faz o plot de um Histograma utilizado seaborn.
Parameters
----------
var : type
Minha variável.
Returns
-------
type
Description of returned object.
"""
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111)
sns.histplot(var, ax=ax)
plt.show()
if __name__ == "__main__":
x = np.random.normal(0, 10, size=1000)
X = | pd.DataFrame(x, columns=["x"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal( | notnull(values) | pandas.core.dtypes.missing.notnull |
# cancer without number
import pandas as pd
import scipy.stats as ss
from add_weights_helpers import rev_comp, change_in_motifs, iupac_dict, motifs, \
motif_check, mirna_weight_calc
# from add_weights_helpers import get_whole_sequence
path = '/media/martyna/Pliki2/praca/IChB/ZGM/repos/'
df_all_mut = pd.read_csv(
path + 'files/output/DATA_pancancer/all_mutations.csv'
)
df_all_mut['cancer'] = df_all_mut['cancer'].apply(
lambda x: x.replace('1', '').replace('2', '').replace('3', '')
)
df_all_mut.drop('Unnamed: 0', axis=1, inplace=True)
print(df_all_mut.head())
df_patient_count = pd.read_excel(
path + 'files/output/DATA_pancancer/patient_mutation_count.xlsx'
)
df_patient_count['cancer_id'] = df_patient_count['cancer_id'].apply(
lambda x: x.replace('1', '').replace('2', '').replace('3', '')
)
df_all_mut = df_all_mut.join(df_patient_count.set_index(['patient_id', 'cancer_id']),
['indiv_name', 'cancer'],
how='left')
df_all_mut['>10000'] = df_all_mut['mutation_count'].apply(
lambda x: 1 if x > 10000 else 0
)
df_all_mut['cut_region'] = df_all_mut.apply(
lambda x: 1 if
((x['type'] == 'flanking-5' and x['from end'] == -1)
or (x['type'] == 'flanking-3' and x['from_start'] == 1)
or (x['type'] == 'loop' and x['from_start'] == 1)
or (x['type'] == 'loop' and x['from end'] == -1)
or (x['type'] == 'pre-seed' and x['from_start'] == 1)
or (x['type'] == 'post-seed' and x['from end'] == -1)
) else 0,
axis=1
)
df_all_mut['duplex'] = df_all_mut['type'].apply(
lambda x: 1 if x in ['pre-seed', 'seed', 'post-seed'] else 0
)
df_all_mut['seed'] = df_all_mut.apply(
lambda x: 1 if x['type'] == 'seed' and ((x['arm'] == x['balance'])
or x['balance'] == 'both') else 0,
axis=1
)
df_coordinates = pd.read_csv(
path + 'files/files_for_mirnaome_mirbase22.1/coordinates_withseq.bed',
sep='\t',
header=None
)
print(df_coordinates.head())
print(df_coordinates[df_coordinates.duplicated(subset=[0, 3],
keep='first')])
print(df_all_mut.shape)
df_all_mut = df_all_mut.join(df_coordinates.set_index([0, 3]),
on=['chrom', 'pre_name'],
how='left')
df_all_mut = df_all_mut[(df_all_mut['pos'] >= df_all_mut[1])
& (df_all_mut['pos'] <= df_all_mut[2])]
print(df_all_mut.shape)
df_all_mut['whole_seq'] = df_all_mut[4].str.upper()
df_all_mut['whole_seq'] = df_all_mut.apply(
lambda x: x['whole_seq'].replace('T', 'U') if x['orientation'] == '+'
else ''.join([rev_comp[c] for c in list(x['whole_seq'])])[::-1],
axis=1
)
# df_all_mut['mutation_seq'] = df_all_mut.apply(
# change_sequence,
# axis=1
# )
for key in motifs.keys():
motif = key
for letter, reg in iupac_dict.items():
motif = motif.replace(letter, reg)
df_all_mut['motifs_{}'.format(key)] = df_all_mut.apply(lambda x: motif_check(x, motif, key), axis=1)
motifs_cols = [col for col in df_all_mut.columns if 'motifs_' in str(col)]
print(motifs_cols)
df_all_mut.head()
df_all_mut['motifs'] = df_all_mut.apply(
lambda x: change_in_motifs(x, motifs_cols),
axis=1
)
df_all_mut['weight'] = df_all_mut.apply(
lambda x: 2 if x['seed'] == 1 else (
1.5 if (x['duplex'] == 1)
or (x['motifs'] == 1) or (x['cut_region'] == 1) else 1
),
axis=1
)
print(df_all_mut.head())
df_all_mut.to_csv(
path + 'files/output/DATA_pancancer/all_mutations_weights_new.csv'
)
df_localization = pd.read_csv(
path + 'files/files_for_mirnaome_mirbase22.1/localizations.csv'
)
df_localization = df_localization.groupby(['chrom', 'pre_name', 'start_pre', 'orientation',
'balance', 'type', 'arm']).min()
df_localization = df_localization.unstack(5).unstack(5)[['start', 'stop']]
print(df_localization.head())
print(df_localization.shape)
df_localization = df_localization.join(df_coordinates.set_index([0, 3]), ['chrom', 'pre_name'], 'inner')
print(df_localization.shape)
df_localization = df_localization.reset_index()
df_localization = df_localization[(df_localization[('start', 'seed', '5p')] <= df_localization[2])
& (df_localization[('start', 'seed', '5p')] >= df_localization[1])]
print(df_localization.head())
print(df_localization.shape)
df_localization['whole_seq'] = df_localization[4].str.upper()
df_localization['whole_seq'] = df_localization.apply(
lambda x: x['whole_seq'].replace('T', 'U') if x['orientation'] == '+'
else ''.join([rev_comp[c] for c in list(x['whole_seq'])])[::-1],
axis=1
)
df_localization['weight'] = df_localization.apply(lambda x: mirna_weight_calc(x), axis=1)
df_localization.to_csv(
path + 'files/files_for_mirnaome_mirbase22.1/localizations_weight_new.csv'
)
df_localization['weight_prob'] = df_localization['weight'] / df_localization['weight'].sum()
print(df_localization.head())
coords = pd.read_csv(
path + 'files/files_for_mirnaome_mirbase22.1/coordinates.bed',
sep='\t', header=None)
coords['length'] = coords[2] - coords[1]
all_mirnas_len = coords[coords[3].str.contains('hsa-')]['length'].sum()
coords['prob'] = coords['length'] / all_mirnas_len
df_occur = pd.DataFrame()
df_mutations_summary = | pd.DataFrame() | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt= | pd.DataFrame() | pandas.DataFrame |
import sys
sys.path.append("../")
import argparse
from augur.utils import json_to_tree
import Bio
import Bio.Phylo
import json
import pandas as pd
import sys
from Helpers import get_y_positions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("tree", help="auspice tree JSON")
parser.add_argument("output", help="tab-delimited file of attributes per node of the given tree")
parser.add_argument("--include-internal-nodes", action="store_true", help="include data from internal nodes in output")
parser.add_argument("--attributes", nargs="+", help="names of attributes to export from the given tree")
args = parser.parse_args()
# Load tree from JSON.
with open(args.tree, "r") as fh:
tree_json = json.load(fh)
tree = json_to_tree(tree_json)
# Collect attributes per node from the tree to export.
records = []
if args.attributes:
attributes = args.attributes
else:
attributes = sorted(list(tree.root.node_attr.keys()) + list(tree.root.branch_attrs.keys()))
for node in tree.find_clades(terminal=True):
if node.is_terminal() or args.include_internal_nodes:
record = {
"name": node.name
}
for attribute in attributes:
if attribute in node.node_attrs:
record[attribute] = node.node_attrs[attribute]["value"]
elif attribute in node.branch_attrs:
record[attribute] = node.branch_attrs[attribute]["value"]
else:
print(f"Attribute '{attribute}' missing from node '{node.name}'", file=sys.stderr)
records.append(record)
tree_records = []
heights = get_y_positions(tree)
for node in tree.find_clades(terminal=True):
tree_records.append(dict(strain=node.name, y=heights[node]))
tree_records_df = pd.DataFrame(tree_records)
# Convert records to a data frame and save as a tab-delimited file.
df = | pd.DataFrame(records) | pandas.DataFrame |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Build haplotypes and make scatter plot for vizualization")
db_p = parser.add_argument_group('Sample name parameters')
db_p.add_argument('-gm', '--gmm_file', required=True, help='Tab separated file with IBS and non_IBS categorized by GMM model')
db_p.add_argument('-db', '--db_file', required=True, help='Tab separated file with variations genetared by IBSpy output')
db_p.add_argument('-rf', '--refId', required=True, help='Name of the genome reference used')
db_p.add_argument('-qr', '--qryId', required=True, help='Name of the query sample')
db_p.add_argument('-chr', '--chrNme', required=True, help='Chromosome name to be plotted')
db_p.add_argument('-cl', '--chr_length_file', required=True, help='Reference chromosome lenghts file')
hap_block_p = parser.add_argument_group('Haplotype blocks parameters')
hap_block_p.add_argument('-w', '--windSize', required=True, help='Windows size to count variations within')
hap_block_p.add_argument('-vf', '--varfltr', required=True, help='Filter variations above this threshold to compute GMM model')
hap_block_p.add_argument('-st', '--StitchVarNum', required=True, help='Stitching haplotypes: number of non-IBS "outliers" that must a appear consecutively in a windows to be called non-IBS')
out_files = parser.add_argument_group('Output files')
out_files.add_argument('-o','--out_img_file', help='Output scatter plot and bars with haplotypes in ".jpg" format ')
args = parser.parse_args()
gmm_file = args.gmm_file
db_file = args.db_file
refId = args.refId
qryId = args.qryId
chrNme = args.chrNme
chr_length_file = args.chr_length_file
windSize = int(args.windSize)
varfltr = int(args.varfltr)
StitchVarNum = int(args.StitchVarNum)
out_img_file = args.out_img_file
#### GMM input file ####
'''
It is a temporary file where IBS and non-IBS were computed (to 1 as IBS, and 0 non-IBS)
using the number of variations found in a windows of a size employing the GMM model.
'''
inFile = pd.read_csv(gmm_file, delimiter='\t')
byChrDf = inFile[inFile['seqname'] == chrNme].copy()
byChrDf.reset_index(drop=True, inplace=True)
#### stitching haplotypes: number of non-IBS "outliers" that must a appear consecutively in a windows to be called non-IBS ####
def stitch_haplotypes(byChrDf):
gmmDta = byChrDf['gauss_mx_IBS'].copy()
for i in range(len(gmmDta)):
if gmmDta[i] == 1:
if i < (len(gmmDta) - StitchVarNum):
count = 0
for n in range(1, (StitchVarNum+1)):
if gmmDta[i+n] == 0:
count += 1
if count == StitchVarNum:
continue
else:
gmmDta[i+1] = 1
hapBlock = gmmDta
hapBlock = pd.Series(hapBlock) # final haplotype block after stitching
byChrDf['hap_block'] = hapBlock.values # new column for stitched haplotypes
return byChrDf
#### put back the k-mer variations Data Base file with all variations count per windows position ####
hapCntFile = pd.read_csv(db_file, delimiter='\t')
hapCntFile = hapCntFile[['seqname', 'start', 'variations']].copy()
hapCntFile = hapCntFile[hapCntFile['seqname'] == chrNme].copy()
byChrDf = stitch_haplotypes(byChrDf)
byChrDf = | pd.merge(hapCntFile, byChrDf, left_on='start', right_on='start', how='left') | pandas.merge |
import pandas as pd
import numpy as np
import os
from datetime import datetime
from IPython.display import IFrame,clear_output
# for PDF reading
import textract
import re
import sys
import docx
from difflib import SequenceMatcher
#######################################################################################
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
#######################################################################################
def dms_to_dd(x,as_string=True):
d,m,s = x.split()
result = abs(float(d)) + float(m)/60. + float(s)/3600.
if float(d) < 0:
result = -result
return result
#######################################################################################
def convert_state(state):
return {'New Hampshire':'NH','Maine':'ME',
'Massachusetts':'MA','New Hampshire/Maine':'NH'}[state]
#######################################################################################
def doy_to_date(x, year=2008, jan1=1):
# jan1 is Day 1, usually
#if np.isnan(x):
# return np.nan
#print(x)
result = ( pd.Period(year = year-1, month=12, day=31, freq='D') +
pd.to_timedelta(x+(1-jan1), unit='days') )
return result.strftime('%Y-%m-%d')
#######################################################################################
def date_conversion(x, year=None, dateformat='%d-%m-%y'):
# year is Fall Year for date
# default interpretations:
# aaaa-bb-cc : Year/Month/Day
# PROBLEMATIC:
# aa-bb-cc : Month/Day/Year - or Day/Month/Year if aa>12
# Returns string
# Unknown / missing
if np.any([True for i in ['(earliest/latest)', '-999','no data','no response',
'unknown', 'missing', 'unknown', 'unkown','none',
# the following are added for postcard data
# 2021-02-07
'died', 'no res','skip','omit','card not received',
'card not returned', 'moved','nursing home','delete']
if i in str(x).lower()]):
return '-999'
elif (str(x).strip()=='') | (str(x).strip()=='?') | (str(x).strip()=='-?-'):
return '-999'
elif x in ['0',0]:
return '0'
xx = str(x)
if ('+1' in xx) | ('+2' in xx) | ('+3' in xx):
xx = xx.split('+')[0].strip()
outofbounds = False
if ((year < 1678) | (year > 2262)) & (year is not None):
outofbounds = True
if ((len(xx)==8) | ((len(xx)==10))) & ('-' not in xx) & ('/' not in xx):
#print xx, year
if (xx[-2]=='.') | ((len(xx)==8) & (xx.isdigit())):
xx = '{}-{}-{}'.format(xx[:4],xx[4:6],xx[6:8]) # year, month, day
#print xx, year
try:
if (len(xx)==8 ) & ('-' in xx):
xdt = pd.to_datetime(xx, format=dateformat)
else:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
if (len(xx)==8) & ('-' in xx):
# mostly a problem if 00-02-28 (i.e., thinking 00 is a month)
if (xx[2]=='-') & (xx[5]=='-'):
xx = '19'+xx
else:
xx = xx+', {}'.format(year)
elif (len(xx)==10)& ('-' in xx) & outofbounds:
if len(xx.split('-')[0]) >2:
y,m, d = (int(i) for i in xx.split('-'))
else:
d,m,y = (int(i) for i in xx.split('-'))
# latest thaw in August; earliest freeze in August
if ((m<=8) & (y== year+1)) | ((m>=8) & (y==year)):
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
else:
print ('+++++PROBLEM+++++')
print(xx)
xx = xx+', {}'.format(year)
else:
xx = xx+', {}'.format(year)
try:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
print ('**************')
print (e)
print (' {} can not be converted to YYYY/MM/DD'.format(str(x)))
print ('**************\n')
return '-999'
if year is not None:
# print type(y), type(year)
# latest thaw in September!,
# latest thaw in August; earliest freeze in August
if ((m < 8) & (y != (year+1))) | ((m>9) & (y!=year)) | (
((m==8) | (m==9)) & (y!=year) & (y!=(year+1) ) ):
if m<=8:
yearnew = year+1
else:
yearnew = year+0
print ('==================')
print ('Wrong Year in table')
print ('\tData from table: {} (start_year is {})'.format(xx, year))
print ('\t\tYMD: {}-{:02d}-{:02d}'.format(y,m,d))
print (' Recorded (or added) ice date year {} should be {}\n'.format(y, yearnew))
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
# OTHERWISE TRY FIXING IT BY INVERTING DATE
elif (len(xx)==8) & ('-' in xx):
#print xx
xx = '-'.join(xx.split('-')[::-1])
#print xx
# assuming default as before but switching backwards
xdt = pd.to_datetime(xx,format=dateformat)
d, m, y = xdt.day, xdt.month, xdt.year
if ((m <= 8) & (y != year+1)) | ((m>8) & (y!=year)):
if m<=8:
yearnew = year+1
else:
yearnew = year
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
else:
print (x, xx)
print ('\tSTILL A PROBLEM. Recorded year {} should be {}'.format(y, yearnew))
else:
print ('Problem fixed')
else:
print ('\tFORCING ICE YEAR TO NEW VALUE (assuming typo)')
y = yearnew
print (' {}-{}, new corrected ice date {:}-{:02d}-{:02d}'.format(year, year+1,y,m,d))
try:
##return '{:02d}-{:02d}-{:04d}'.format(m,d,y)
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
except ValueError as e:
print ('*****FINAL*****')
print (e)
print ('**************')
print ('{} can not be converted to YYYY/MM/DD'.format(str(x)))
return '-999'
#######################################################################################
######## READ IN FILES ################################################################
#######################################################################################
def read_all_files(filename_dict, readin_dict , verbose=False,logfile=None, record_contributor=True):
"""
INPUT: filename_dict is dictionary of files names, sorted by file type
readin_dict is a list of corrections and column renames, etc. by filename
OUTPUT: All files merged into a Pandas DataFrame
"""
default_ext = {
'txt':{'delimiter':'\t'},
'tab':{'delimiter':'\t'}
}
dfresult = pd.DataFrame()
# run through the files
for file_ext in filename_dict.keys():
for f in filename_dict[file_ext]:
default_values = {'header':0, 'delimiter':None, 'sheetname':False,
'lakename':None, 'city':None, 'state':None,'contributor':None, 'reorient':False,
'column_rename':None,'ncolumns':None, 'split':False,
'multi':False, 'index_col':None}
if file_ext in default_ext:
for key, value in default_ext[file_ext].items():
default_values[key] = value
if logfile is not None:
logfile.write('===========\nReading in {}\n'.format(f))
if (np.array([i in f for i in readin_dict.keys()])).any():
lakeid = [i for i in readin_dict.keys() if i in f]
if len(lakeid) > 1:
print ('WARNING. There are too many similarly named readin_dict items. Could be a problem.')
if logfile is not None:
logfile.write('\nWARNING. There are too many similarly named readin_dict items.\n')
break
foo = readin_dict[lakeid[0]]
for key,value in foo.items():
default_values[key] = value
#if 'Updated Data 2019.5' in f:
# print(f)
df = read_ts(f,delimiter=default_values['delimiter'],
sheetname=default_values['sheetname'],
header=default_values['header'],
ncolumns=default_values['ncolumns'],
index_col=default_values['index_col'],
logfile = logfile,record_contributor=record_contributor)
if verbose:
if len(df)>0:
sys.stdout.write('\r[ {:150s} ]\r'.format(f))
#sys.stdout.flush()
else:
sys.stdout.write('Skipping {}\n'.format(f))
#sys.stdout.flush()
# specific case for Maine lakes
if default_values['reorient']:
if logfile is not None:
logfile.write('\tReorienting table.\n')
contributor = df.Contributor.values[0]
#df = df.set_index(df.columns[0])
#print('Maine drop')
#display(df.head())
#print(df.columns)
df = df.drop('Contributor',axis=1,level=0).unstack().reset_index()
#print('END Maine drop')
df['Contributor'] = contributor
if default_values['column_rename'] is not None:
if logfile is not None:
logfile.write('\tRenaming columns.\n')
df = df.rename(default_values['column_rename'],axis=1)
if default_values['lakename'] is not None:
if logfile is not None:
logfile.write('\tSetting lakename to {}\n'.format(default_values['lakename']))
df['lake'] = default_values['lakename']
if default_values['city'] is not None:
if logfile is not None:
logfile.write('\tSetting city to {}\n'.format(default_values['city']))
df['city'] = default_values['city']
if default_values['state'] is not None:
if logfile is not None:
logfile.write('\tSetting state to {}\n'.format(default_values['state']))
df['state'] = default_values['state']
if default_values['split']:
# rearrange years/seasons
if logfile is not None:
logfile.write('\tRearranging years/seasons\n')
df = sort_by_season(df)
if default_values['multi']:
if logfile is not None:
logfile.write('\tSorting by events.\n')
df = sort_by_events(df)
#if default_values['lakename'] is not None:
# df['lake'] = default_values['lakename']
if default_values['contributor'] is not None:
if logfile is not None:
logfile.write('\tAssigning contributor: {}\n'.format(default_values['contributor']))
df['Contributor'] = default_values['contributor']
if 'Updated Data' in f:
updated_year = f.split('Updated Data')[1].split('/')[0].strip()
if updated_year == '2018':
updated_year = 2018.5
elif updated_year == '':
updated_year = 2018.0
else:
updated_year = float(updated_year)
df['Updated Year'] = updated_year
"""
if 'Updated Data 2020.5' in f:
df['Updated Year'] = 2020.5
elif 'Updated Data 2020' in f:
df['Updated Year'] = 2020.0
elif 'Updated Data 2019.5' in f:
df['Updated Year'] = 2019.5
elif 'Updated Data 2018' in f:
df['Updated Year'] = 2018.5
elif 'Updated Data 2019' in f:
df['Updated Year'] = 2019.0
elif 'Updated Data' in f:
df['Updated Year'] = 2018.0
"""
df['FileName'] = f
try:
dfresult = dfresult.append(df,ignore_index=True, sort=False)
except:
display(df)
print(kasdf)
return dfresult
#######################################################################################
def sort_by_events(df):
# Move multi-freeze thaw years into separate rows
iceon1col = [c for c in ['Freeze date 1',] if c in df.columns][0]
iceon2col = [c for c in ['Freeze date 2',] if c in df.columns][0]
iceoff1col = [c for c in ['Thaw date 1',] if c in df.columns][0]
iceoff2col = [c for c in ['Thaw date 2',] if c in df.columns][0]
ind = ((~df[iceon1col].isnull() | ~df[iceoff1col].isnull()) &
(~df[iceon2col].isnull() | ~df[iceoff2col].isnull()))
# .copy
dfoo = df[ind].copy()
dfoo[iceon1col] = dfoo[iceon2col]
dfoo[iceoff1col] = dfoo[iceoff2col]
#print('sort by events Drop')
df = df.append(dfoo,ignore_index=True,sort=False).drop([iceoff2col,iceon2col],axis=1)
#print('END sort by events Drop')
# display(df)
return df
#######################################################################################
def sort_by_season(df):
#print (df.columns)
#display(df)
yearcolumn = [c for c in ['Year','year'] if c in df.columns][0]
iceoncolumn = [c for c in ['datefirstice','IceOnDOY','Ice On','Ice-On','Ice on'] if c in df.columns][0]
iceoffcolumn = [c for c in ['datelastice','IceOffDOY','Ice Off','Ice-Off','Ice off'] if c in df.columns][0]
# print df.columns
lakecolumn = [c for c in ['lakeid','lake'] if c in df.columns][0]
dropcolumns = [iceoncolumn, iceoffcolumn]
dfresult = pd.DataFrame()
for name, group in df.groupby(lakecolumn):
iceoff = group[iceoffcolumn].tolist() + [np.nan]
iceon = [np.nan] + group[iceoncolumn].tolist()
try:
years = [float(group[yearcolumn].astype(str).min()) - 1] + group[yearcolumn].tolist()
except:
print(yearcolumn)
display(group[yearcolumn])
display(df)
#print (kmtpasdf)
dfoo = pd.DataFrame({lakecolumn:name,
'Fall Year': years,
iceoncolumn:iceon,
iceoffcolumn:iceoff})
dfresult = dfresult.append(dfoo, ignore_index=True,sort=False)
#print('sort by season Drop')
dfresult = dfresult.merge(df.drop(dropcolumns,axis=1), left_on=[lakecolumn,'Fall Year'],
right_on=[lakecolumn,yearcolumn], how='left')
#print('END sort by season Drop')
for c in dfresult.columns:
## if c not in [lakecolumn, yearcolumn,'Fall Year']+dropcolumns:
if c in ['Contributor','Clerk']:
## print 'backfilling', c
dfresult[c] = dfresult[c].fillna(method='bfill')
## clean up, remove no result years OK
# print dfresult.shape
ind = dfresult[iceoncolumn].isnull() & dfresult[iceoffcolumn].isnull()
## display(dfresult[ind])
#.copy
dfresult = dfresult[~ind].copy()
#print dfresult.shape
# remove duplicates
#display(dfresult[dfresult.duplicated(subset=[lakecolumn,yearcolumn,
# iceoncolumn,iceoffcolumn],keep=False)])
dfresult = dfresult.drop_duplicates(subset=[lakecolumn,yearcolumn,
iceoncolumn,iceoffcolumn])
#print dfresult.shape
if 'Duration' in dfresult.columns:
#display(dfresult.tail(6))
#display(df.tail(6))
dfresult.loc[dfresult.index[:-1],'Duration'] = df.loc[df.index[:],'Duration'].values
# last duration should be removed
dfresult.loc[dfresult.index[-1],'Duration'] = np.nan
if dfresult.lake.values[0]!='Mirror Lake':
print(dfresult.columns)
display(dfresult.head())
print(brokend)
return dfresult
#######################################################################################
#######################################################################################
#######################################################################################
def read_ts(filename, header=0, sheetname=False, index_col=None, logfile=None,delimiter=None,ncolumns=None,
record_contributor=True):
""" ncolumns : number of columns to keep, starting with first
"""
filetype = filename.split('.')[-1].lower()
if filetype == 'pdf':
tsdf = read_pdf(filename,logfile=logfile)
#elif filetype == 'jpg':
# tsdf = read_jpg(filename)
elif filetype in ['csv','txt','tab']:
tsdf = read_csv(filename, delimiter=delimiter, header=header,record_contributor=record_contributor)
#elif filetype in ['txt']:
# tsdf = read_csv(filename, delimiter=delimiter, header=header)
elif filetype in ['xls','xlsx']:
tsdf = read_excel(filename, sheetname=sheetname, logfile=logfile, index_col=index_col,header=header,ncolumns=ncolumns,
record_contributor=record_contributor)
elif filetype in ['doc','docx']:
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
if logfile is not None:
for p in doc.paragraphs:
logfile.write('\t{}\n'.format(p.text))
tsdf = pd.DataFrame()
"""
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
print ('=====================')
print (filename)
print ('=====================')
for p in doc.paragraphs:
print (p.text)
"""
elif filetype in ['jpg']:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
else:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
return tsdf
#######################################################################################
def read_csv(filename, delimiter=None, encoding='utf-8', header=0, record_contributor=True):
try:
df = pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='python',header=header)
if df.shape[1]==1:
print('{}\n\tToo few columns. Trying a different method.'.format(filename))
df = | pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='c',header=header) | pandas.read_csv |
import os
import pandas as pd
import filedialog
class playerinfo():
''' loads all dataframes with player info, teams, etc. '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.players=None
self.famcontact=None
self.masterSUs=None
self.teams=None
self.unilist=None
self.open_main_files() # loads above
def open_main_files(self):
''' Auto loads player & family contact info, teams/coaches, master signups
unilog info '''
if os.path.exists('players.csv'):
self.players=pd.read_csv('players.csv', encoding='cp437')
else:
print('players.csv not found.')
self.players=pd.DataFrame()
if os.path.exists('family_contact.csv'):
self.famcontact=pd.read_csv('family_contact.csv', encoding='cp437')
else:
self.famcontact=pd.DataFrame()
if os.path.exists('Teams_coaches.xlsx'):
self.teams=pd.read_excel('Teams_coaches.xlsx', sheetname='Teams')
self.coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches') # load coach info
else:
self.teams= | pd.DataFrame() | pandas.DataFrame |
import os.path
import pandas as pd
import numpy as np
import scipy
import scipy.signal as sig
import scipy.interpolate as inter
"""
This module encapsulates functions related to correlation, causation and data formatting
"""
b = 1.5
c = 4
q1 = 1.540793
q2 = 0.8622731
z = lambda x: (x-x.mean())/np.std(x, ddof=1)
g = lambda x: x if abs(x) <= b else q1*np.tanh(q2*(c-abs(x)))*np.sign(x) if abs(x) <= c else 0
def correlate(x, y, margin, method='pearson'):
""" Find delay and correlation between x and each column o y
Parameters
----------
x : `pandas.Series`
Main signal
y : `pandas.DataFrame`
Secondary signals
method : `str`, optional
Correlation method. Defaults to `pearson`. Options: `pearson`,`robust`,`kendall`,`spearman`
Returns
-------
`(List[float], List[int])`
List of correlation coefficients and delays in samples in the same order as y's columns
Notes
-----
Uses the pandas method corrwith (which can return pearson, kendall or spearman coefficients) to correlate. If robust
correlation is used, the mapping presented in [1]_ is used and then Pearson correlation is used. To speedup the lag finding,
the delays are calculated in log intervals and then interpolated by splines, as shown in [2]_, and the lag with maximum correlation
found in this interpolated function is then used as the delay.
References
----------
.. [1] <NAME>., <NAME>. "Fast Robust Correlation for High-Dimensional Data", Technometrics, vol. 63, Pages 184-198, 2021
.. [2] <NAME> & Papadimitriou, Spiros & <NAME>. (2005). BRAID: Stream mining through group lag correlations. Proceedings of the ACM SIGMOD International Conference on Management of Data. 599-610.
"""
beg, end = (x.index.min(), x.index.max())
y = interpolate(y,x.index,margin)
if(method == 'robust'):
method='pearson'
x = pd.Series(z(sig.detrend(x)), index=x.index, name=x.name)
x = x.apply(g)
y = y.apply(lambda s: z(sig.detrend(s))).applymap(g)
N = int(x.size*margin)
l = int(np.log2(N))
b = 4
log_lags = np.array([int(2**i+(j*2**i/b)) for i in range(2,l+1) for j in range(4) if 2**i+(j*2**i/b) < N])
log_lags = list(-1*log_lags)[::-1]+[-3,-2,-1,0,1,2,3]+list(log_lags)
new_lags = list(range(-1*max(log_lags),max(log_lags)+1))
vals = pd.DataFrame([lagged_corr(x,y,lag,method) for lag in log_lags])
vals = vals.apply(lambda s: inter.make_interp_spline(log_lags, abs(s),k=3)(new_lags))
peaks = vals.apply(lambda s: pd.Series([new_lags[i] for i in sig.find_peaks(s)[0]]+[new_lags[max(range(len(s)), key=s.__getitem__)]]).drop_duplicates())
peak_corr = pd.DataFrame(np.array([[x.corr((y[col].shift(int(peak)))[beg:end], method=method) if not pd.isna(peak) else 0 for peak in peaks[col]] for col in peaks]).transpose(), columns=y.columns)
dela = [peak_corr[col].abs().idxmax() for col in peak_corr]
delays = [int(peaks[col].iloc[dela[pos]]) for pos, col in enumerate(peak_corr)]
corrs = [round(peak_corr[col].iloc[dela[pos]],2) for pos, col in enumerate(peak_corr)]
return corrs, delays
def lagged_corr(x, y, lag, method='pearson'):
""" Find correlation between x and each column o y for a specific time lag
Parameters
----------
x : `pandas.Series`
Main signal
y : `pandas.DataFrame`
Secondary signals
lag : `int`
Number of samples to apply as lag before computing the correlation
method : `str`, optional
Correlation method. Defaults to `pearson`. Options: `pearson`,`kendall`,`spearman`
Returns
-------
`pandas.DataFrame`
Dataframe with the correlation value for each column of y
"""
if(method in ['pearson', 'kendall', 'spearman']):
return (y.shift(lag)[x.index[0]:x.index[-1]]).corrwith(x, method=method)
else:
return None
def find_delays(x, y):
""" Find delay between x and each column o y
Parameters
----------
x : `pandas.Series`
Main signal
y : `pandas.DataFrame`
Secondary signals
Returns
-------
`pandas.DataFrame`
Dataframe with the delay value for each column of y
"""
return y.apply(lambda k: sig.correlate(k,x,mode='valid')).apply(lambda k: k.abs().idxmax()-int(len(k/2))+1)
def interpolate(x, idx, margin):
""" Interpolate data to match idx+-margin
Parameters
----------
x : `pandas.Dataframe`
Signal
idx : `pandas.DatetimeIndex`
Index to match
margin : `float`
Percentage of values to add to each side of index
Returns
-------
`pandas.DataFrame`
Dataframe with the same columns as x interpolated to match idx+-margin
Notes
-----
It infers the frequency for the given DatetimeIndex and extends it to margin times prior
and after. This new DatetimeIndex is then combined with the given DataFrame and the NaN
values are completed with linear interpolation then. In the end, only the new index values
are kept, so that it matches exactly the given idx dates (except for the margin values).
"""
fs = | pd.infer_freq(idx) | pandas.infer_freq |
# Copyright (c) 2018, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf as gd
from cudf.tests.utils import assert_eq
def make_frames(index=None, nulls="none"):
df = pd.DataFrame(
{
"x": range(10),
"y": list(map(float, range(10))),
"z": list("abcde") * 2,
}
)
df.z = df.z.astype("category")
df2 = pd.DataFrame(
{
"x": range(10, 20),
"y": list(map(float, range(10, 20))),
"z": list("edcba") * 2,
}
)
df2.z = df2.z.astype("category")
if nulls == "all":
df.y = np.full_like(df.y, np.nan)
df2.y = np.full_like(df2.y, np.nan)
if nulls == "some":
mask = np.arange(10)
np.random.shuffle(mask)
mask = mask[:5]
df.y.loc[mask] = np.nan
df2.y.loc[mask] = np.nan
gdf = gd.DataFrame.from_pandas(df)
gdf2 = gd.DataFrame.from_pandas(df2)
if index:
df = df.set_index(index)
df2 = df2.set_index(index)
gdf = gdf.set_index(index)
gdf2 = gdf2.set_index(index)
return df, df2, gdf, gdf2
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
@pytest.mark.parametrize("index", [False, "z", "y"])
def test_concat(index, nulls):
if index == "y" and nulls in ("some", "all"):
pytest.skip("nulls in columns, dont index")
df, df2, gdf, gdf2 = make_frames(index, nulls=nulls)
# Make empty frame
gdf_empty1 = gdf2[:0]
assert len(gdf_empty1) == 0
df_empty1 = gdf_empty1.to_pandas()
# DataFrame
res = gd.concat([gdf, gdf2, gdf, gdf_empty1]).to_pandas()
sol = | pd.concat([df, df2, df, df_empty1]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from collections import OrderedDict
import gc
from current_clamp import *
from current_clamp_features import extract_istep_features
from visualization.feature_annotations import feature_name_dict
from read_metadata import *
from file_io import load_current_step
# from pymysql import IntegrityError
import datajoint as dj
schema = dj.schema('yueqi_ephys', locals())
FIG_DIR = 'analysis_current_clamp/figures_plot_recording'
'''
class DjImportedFromDirectory(dj.Imported):
# Subclass of Imported. Initialize with data directory.
def __init__(self, directory=''):
self.directory = directory
super().__init__()
'''
@schema
class EphysExperimentsForAnalysis(dj.Manual):
definition = """
# Ephys experiments (excel files) for analysis
experiment: varchar(128) # excel files to use for analysis
---
project: varchar(128) # which project the data belongs to
use: enum('Yes', 'No') # whether to use this experiment
directory: varchar(256) # the parent project directory
"""
def insert_experiment(self, excel_file):
'''
Insert new sample ephys metadata from excel to datajoint tables
'''
entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any')
entry_list = entry_list.to_dict('records')
no_insert = True
for entry in entry_list:
if entry['use'] == 'No':
continue
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class Animals(dj.Imported):
definition = """
# Sample metadata
-> EphysExperimentsForAnalysis
---
id: varchar(128) # organod ID (use date, but need better naming)
strain : varchar(128) # genetic strain
dob = null: date # date of birth
date = null: date # recording date
age = null: smallint # nunmber of days (date - dob)
slicetype: varchar(128) # what kind of slice prep
external: varchar(128) # external solution
internal: varchar(128) # internal solution
animal_comment = '': varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
animal_info, _ = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
key['id'] = animal_info['id']
key['strain'] = animal_info['strain']
if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB']
if not pd.isnull(animal_info['age']): key['age'] = animal_info['age']
key['date'] = animal_info['date']
key['slicetype'] = animal_info['type']
key['external'] = animal_info['external']
key['internal'] = animal_info['internal']
if not pd.isnull(animal_info['comment']): key['animal_comment'] = animal_info['comment']
self.insert1(row=key)
return
@schema
class PatchCells(dj.Imported):
definition = """
# Patch clamp metadata for each cell
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
---
rp = null: float # pipette resistance
cm_est = null: float # estimated Cm
ra_est = null: float # estimated Ra right after whole-cell mode
rm_est = null: float # estimated Rm
v_rest = null: float # resting membrane potential
fluor = '': varchar(128) # fluorescent label
fill = 'no': enum('yes', 'no', 'unknown', 'out') # wether the cell is biocytin filled. Out -- cell came out with pipette.
cell_external = '': varchar(128) # external if different from sample metadata
cell_internal = '': varchar(128) # internal if different from sample metadata
depth = '': varchar(128) # microns beneath slice surface
location = '': varchar(128) # spatial location
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
if 'params' in metadata.columns:
old_file = True
cell_info = parse_cell_info_2017_vertical(metadata)
else:
old_file = False
cell_info = parse_cell_info_2017(metadata)
for i, row in cell_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
if not pd.isnull(row['Rp']): newkey['rp'] = row['Rp']
if not pd.isnull(row['Cm']): newkey['cm_est'] = row['Cm']
if not pd.isnull(row['Ra']): newkey['ra_est'] = row['Ra']
if not pd.isnull(row['Vrest']): newkey['v_rest'] = row['Vrest']
if not pd.isnull(row['depth']): newkey['depth'] = row['depth']
if not old_file:
if not pd.isnull(row['fluor']): newkey['fluor'] = row['fluor']
if not pd.isnull(row['Rm']): newkey['rm_est'] = row['Rm']
if not pd.isnull(row['external']): newkey['cell_external'] = row['external']
if not pd.isnull(row['internal']): newkey['cell_internal'] = row['internal']
if not pd.isnull(row['location']): newkey['location'] = row['location']
if not | pd.isnull(row['fill']) | pandas.isnull |
# Load modules
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
#Read dataset into a pandas.DataFrame
beer_df = pd.read_csv('datasets/quarterly-beer-production-in-aus-March 1956-June 1994.csv')
#Display shape of the dataset
print('Shape of the dataframe:', beer_df.shape)
beer_df.head()
#Rename the 2nd column
beer_df.rename(columns={'Quarterly beer production in Australia: megalitres. March 1956 ? June 1994':
'Beer_Prod'
},
inplace=True
)
#Remove missing values
missing = (pd.isnull(beer_df['Quarter'])) | ( | pd.isnull(beer_df['Beer_Prod']) | pandas.isnull |
import copy
from collections import OrderedDict, UserString, UserDict
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import lognorm
from .control import Control, Controls
from .likelihood import Likelihood
from .impact import Impact
from .vulnerability import Vulnerability, Vulnerabilities
| pd.set_option("display.float_format", lambda x: "%.2f" % x) | pandas.set_option |
import sys
import ast
import pandas as pd
from flask import Flask, jsonify, request
from flasgger import Swagger
from flasgger.utils import swag_from
from resources import constants
from utils import api_utils
SWAGGER_CONFIG = {
"headers": [
],
"specs": [
{
"version": "0.1",
"title": "Humanitarian Data Service",
"description": "Consolidating fragmented raw sources of humanitarian data and serving up parsed and "
"cleaned data from a single API",
"endpoint": 'spec',
"route": '/spec',
"rule_filter": lambda rule: True # all in
}
],
"static_url_path": "/apidocs",
"static_folder": "swaggerui",
"specs_route": "/specs"
}
app = Flask(__name__)
api = Swagger(app, config=SWAGGER_CONFIG)
@app.route('/')
def hello():
# Background colors: https://uigradients.com/#GrapefruitSunset
landing_page = """
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<style type="text/css">
body {
padding: 50px 200px;
font-family: "Georgia";
color: #EEEEEE;
text-align: center;
background: linear-gradient(to left, #E96443 , #904E95);
}
</style>
</head>
<body>
<h1> Welcome to the Humanitarian Data Service</h1>
<i class="fa fa-globe" style="font-size:48px;"></i>
<p>See the interactive API docs <a href="/apidocs/index.html" style="color: #EEEEEE">here</a> </p>
<p>See the open source repository <a href="https://github.com/dragonaire/humdata" style="color: #EEEEEE">here</a></p>
</body>
</html>
"""
return landing_page
@app.route('/funding/totals/<string:country>/', methods=['GET'])
@swag_from('api_configs/funding_totals_by_country.yml')
def get_funding_totals(country):
country = country.strip().capitalize()
success, result = api_utils.safely_load_data('hno_funding_2016_2017.csv', 'funding', country)
if not success:
return result, 501
result = result.iloc[0].to_dict()
return jsonify(country=country, source=constants.DATA_SOURCES['HNO'], data=result, update=constants.UPDATE_FREQUENCY[3])
@app.route('/funding/categories/<string:country>/', methods=['GET'])
@swag_from('api_configs/funding_categories_by_country.yml')
def get_funding_categories(country):
country = country.strip().capitalize()
hno_funding_file = 'hno_funding_%s_2017.csv' % country.lower()
success, result = api_utils.safely_load_data(hno_funding_file, 'category funding')
if not success:
return result, 501
result = result.to_dict(orient='list')
return jsonify(country=country, source=constants.DATA_SOURCES['HNO'], data=result, update=constants.UPDATE_FREQUENCY[3])
def get_funding_by_fts_dimension(country, fts_dimension):
"""
Helper function for FTS funding endpoints.
Returns whether data retrieval was successful (or http errorcode if not), and the resulting json data (or error message if not).
"""
country = country.strip().capitalize()
fts_donors_file = 'fts-{}.csv'.format(fts_dimension)
success, result = api_utils.safely_load_data(fts_donors_file, '{} funding'.format(fts_dimension), country)
if not success:
return 501, result
result.drop(constants.COUNTRY_COL, axis=1, inplace=True)
return success, result.to_dict(orient='list')
@app.route('/funding/donors/<string:country>/', methods=['GET'])
@swag_from('api_configs/funding_donors_by_country.yml')
def get_funding_donors(country):
country = country.strip().capitalize()
success, result = get_funding_by_fts_dimension(country, 'donors')
if not success or success == 501:
return result, 501
return jsonify(country=country, source=constants.DATA_SOURCES['FTS'], data=result, update=constants.UPDATE_FREQUENCY[2])
@app.route('/funding/clusters/<string:country>/', methods=['GET'])
@swag_from('api_configs/funding_clusters_by_country.yml')
def get_funding_clusters(country):
country = country.strip().capitalize()
success, result = get_funding_by_fts_dimension(country, 'clusters')
if not success or success == 501:
return result, 501
return jsonify(country=country, source=constants.DATA_SOURCES['FTS'], data=result, update=constants.UPDATE_FREQUENCY[2])
@app.route('/funding/recipients/<string:country>/', methods=['GET'])
@swag_from('api_configs/funding_recipients_by_country.yml')
def get_funding_recipients(country):
country = country.strip().capitalize()
success, result = get_funding_by_fts_dimension(country, 'recipients')
if not success or success == 501:
return result, 501
return jsonify(country=country, source=constants.DATA_SOURCES['FTS'], data=result, update=constants.UPDATE_FREQUENCY[2])
@app.route('/needs/totals/<string:country>/', methods=['GET'])
@swag_from('api_configs/needs_totals_by_country.yml')
def get_needs_totals(country):
data_keys = ['HNO']
country = country.strip().capitalize()
success, result = api_utils.safely_load_data('hno_needs_total_2017.csv', 'needs', country)
if not success:
return result, 501
result = result.iloc[0]
result = result.to_dict()
result['Additional Data'] = ast.literal_eval(result['Additional Data'])
success, iom = api_utils.safely_load_data('iom_dtm14_needs_feb2017.csv', 'IOM needs', country)
if success:
iom = iom.iloc[0].to_dict()
iom['Percent Main Unmet Need'] = ast.literal_eval(iom['Percent Main Unmet Need'])
iom['Percent Main Cause Of Displacement'] = ast.literal_eval(iom['Percent Main Cause Of Displacement'])
iom['Regional Summary'] = ast.literal_eval(iom['Regional Summary'])
result['Additional Data'].update(iom)
data_keys.append('DTM')
sources = [constants.DATA_SOURCES[data_key] for data_key in data_keys]
return jsonify(country=country, source=sources, data=result, update=constants.UPDATE_FREQUENCY[3])
@app.route('/needs/regions/<string:country>/', methods=['GET'])
@swag_from('api_configs/needs_regions_by_country.yml')
def get_needs_regions(country):
country = country.strip().capitalize()
success, result = api_utils.safely_load_data('lcb_displaced_2017.csv', 'regional needs', country)
if not success:
return result, 501
result['PeriodDate'] = pd.to_datetime(result['Period'])
result.sort_values(by=['ReportedLocation', 'PeriodDate'], inplace=True)
dates = result['Period'].unique().tolist()
regions = result['ReportedLocation'].unique().tolist()
displacement_types = result['DisplType'].unique().tolist()
# Construct a dict for region name -> displacement type -> list of totals where index corresponds to dates list
values = {}
region_groups = result.groupby('ReportedLocation')
for region, group in region_groups:
group_df = | pd.DataFrame(group) | pandas.DataFrame |
import sys
import pandas as pd
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
from sqlalchemy import create_engine
def tokenize(text):
"""
Function to tokenize text using NLP pipeline with lemmatization
Args:
text (str): original text
Returns:
list of str: tokens of text
"""
text = re.sub("[^a-zA-Z0-9]"," ",text)
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
stopwords_list = stopwords.words("english")
for token in tokens:
clean_token = lemmatizer.lemmatize(token).lower().strip()
if (clean_token not in stopwords_list): clean_tokens.append(clean_token)
return clean_tokens
def load_data(messages_filepath, categories_filepath):
"""
Function to load datasets from filepaths
Returns:
dataframe: merged dataframe from two datasets
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories,on="id")
return df
def clean_data(df):
"""
Transform categories labels to columns and clean data errors
Args:
df (dataframe): merged dataframe containing message and categories
Returns:
df (dataframe): clean dataframe
"""
categories = df["categories"].str.split(";",expand=True)
row = categories.iloc[0]
category_colnames = row.apply(lambda x: x[:-2])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].apply(lambda x: x[-1])
categories[column] = pd.to_numeric(categories[column])
categories.replace(2,1,inplace=True)
df.drop("categories",axis=1,inplace=True)
df = | pd.concat([df,categories],axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
| tm.assert_equal(result, expected) | pandas.util.testing.assert_equal |
##
drive_path = 'c:/'
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
from scipy.stats import anderson_ksamp
from scipy.stats import kruskal
from scipy.stats import variation
from scipy import signal as sps
import seaborn as sns
import glob
import re
##
#This piece spits out all the peaks in one dataframe
def getpeaks(date):
'''Spits out all the peaks from imaging session
session input as string
'''
# This piece spits out all the peaks from one session in one dataframe
peakdf = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=175)
df = f[[col for col in f.columns if 'G PMT' in col]]
peak = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
# Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80])) > np.absolute(max(a[26:80])) and np.absolute(min(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = min(a[26:80])
peak.append(b)
elif np.absolute(max(a[26:80])) > np.absolute(min(a[26:80])) and np.absolute(max(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = max(a[26:80])
peak.append(b)
else:
b = 0
peak.append(b)
peaks = pd.DataFrame(peak).T
peaks.columns = df.columns
peaks = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), peaks], axis=1)
peakdf = peakdf.append(peaks, ignore_index=True)
peakdf.to_csv('%s_peaks.csv' % date, index=False)
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get average peak across all trials using peakdf dataframe
meandf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(peakdf.loc[peakdf['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meandf = meandf.append(mean)
meandf = meandf.reset_index(drop=True)
meandf.columns = [str(col) + '_' + date for col in meandf.columns]
meandf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meandf], axis=1)
meandf.to_csv('%s_mean.csv' % date, index=False)
# Get proportion of successful trials
successdf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
newdf = peakdf.loc[peakdf['Trial'].isin(odor)]
s = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
s.append(np.divide((newdf.loc[:, col] != 0).sum(), float(len(newdf.loc[:, col]))))
s = pd.DataFrame(s).T
s.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
successdf = successdf.append(s)
successdf = successdf.reset_index(drop=True)
successdf.columns = [str(col) + '_' + date for col in successdf.columns]
successdf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), successdf], axis=1)
successdf.to_csv('%s_success.csv' % date, index=False)
return 'Done'
##
def getintegral(date):
'''Compute integrals and integral means
date: string, session
'''
temp = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
# Pull the trials that correspond to specific date/odors
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get the frame rate for a specified date
num = trials.File.unique().tolist().index('%s' % date)
fr = trials.loc[trials['File'] == trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
# Get the integral
intdf = pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=125)
df = f[[col for col in f.columns if 'G PMT' in col]]
winstart = np.int(4 * fr)
winend = np.int(12 * fr)
integral = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
# Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80])) > np.absolute(max(a[26:80])) and np.absolute(min(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = sum(df[col][winstart:winend] * (1 / fr))
integral.append(b)
elif np.absolute(max(a[26:80])) > np.absolute(min(a[26:80])) and np.absolute(max(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = sum(df[col][winstart:winend] * (1 / fr))
integral.append(b)
else:
b = 0
integral.append(b)
integral = pd.DataFrame(integral).T
integral.columns = df.columns
integral = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), integral], axis=1)
intdf = intdf.append(integral)
intdf.to_csv('%s_integral.csv' % date, index=False)
# Get average integral across all trials using integral dataframe
meanint = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in intdf.loc[intdf['Trial'].isin(odor)][
[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(intdf.loc[intdf['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = intdf.loc[intdf['Trial'].isin(odor)][
[col for col in intdf.loc[intdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanint = meanint.append(mean)
meanint = meanint.reset_index(drop=True)
meanint.columns = [str(col) + '_' + date for col in meanint.columns]
meanint = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meanint], axis=1)
meanint.to_csv('%s_meanint.csv' % date, index=False)
return 'Done'
##
def getbaseline(date):
temp = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
# Pull the trials that correspond to specific date/odors
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get the frame rate for a specified date
num = trials.File.unique().tolist().index('%s' % date)
fr = trials.loc[trials['File'] == trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
# Get baseline
baseline = pd.DataFrame([])
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=125)
df = f[[col for col in f.columns if 'G PMT' in col]]
winstart = np.int(4 * fr)
winend = np.int(12 * fr)
base = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
b = (df[col][firsta:firstb]).mean()
base.append(b)
base = pd.DataFrame(base).T
base.columns = df.columns
base = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), base], axis=1)
baseline = baseline.append(base)
baseline.to_csv('%s_baseline.csv' % date, index=False)
# mean baseline
meanbase = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in baseline.loc[baseline['Trial'].isin(odor)][
[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(baseline.loc[baseline['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = baseline.loc[baseline['Trial'].isin(odor)][
[col for col in baseline.loc[baseline['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meanbase = meanbase.append(mean)
meanbase = meanbase.reset_index(drop=True)
meanbase.columns = [str(col) + '_' + date for col in meanbase.columns]
meanbase = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meanbase], axis=1)
meanbase.to_csv('%s_meanbase.csv'%date,index=False)
return 'Done'
##
def concat(odorfile):
'''trials is the file that has odor trial orders
'C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv'
'C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Habituation_Trials.csv'
'''
trials = pd.read_csv(odorfile)
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in trials.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
fullpeak = pd.DataFrame([])
# Concat peak responses
for date in trials.File.unique():
# reorganize dataframes
mean = pd.read_csv(
'C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\{0}\\{1}_mean.csv'.format(unicode(date, 'utf-8'),
unicode(date,
'utf-8')))
mdf = pd.concat([mean['Odor'], mean[[col for col in mean.columns if ')_' in col]]], axis=1)
temp = mdf.T
temp.reset_index(level=0, inplace=True)
temp.columns = temp.iloc[0]
temp = temp.reindex(temp.index.drop(0))
temp.rename(columns={'Odor': 'Mouse'}, inplace=True)
temp = temp.reset_index(drop=True)
group = []
for x in list(temp.index.values):
temp.iloc[x]['Mouse'] = temp.iloc[x]['Mouse'].split(')_')[1]
indexnum = trials.File.unique().tolist().index(temp['Mouse'][x])
groupname = trials.loc[trials.File == trials.File.unique()[indexnum]].Group.iloc[0]
group.append(groupname)
group = pd.DataFrame({'Group': group})
temp = pd.concat([group, temp], axis=1)
fullpeak = fullpeak.append(temp)
fullpeak = fullpeak.reset_index(drop=True)
fullpeak.to_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\fullpeak.csv', index=False)
# Concat successes
fullsuccess = pd.DataFrame([])
for date in trials.File.unique():
# reorganize dataframes
dframe = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\{0}\\{1}_success.csv'.format(
unicode(date, 'utf-8'), unicode(date, 'utf-8')))
sdf = pd.concat([dframe['Odor'], dframe[[col for col in dframe.columns if ')_' in col]]], axis=1)
temp = sdf.T
temp.reset_index(level=0, inplace=True)
temp.columns = temp.iloc[0]
temp = temp.reindex(temp.index.drop(0))
temp.rename(columns={'Odor': 'Mouse'}, inplace=True)
temp = temp.reset_index(drop=True)
group = []
for x in list(temp.index.values):
temp.iloc[x]['Mouse'] = temp.iloc[x]['Mouse'].split(')_')[1]
indexnum = trials.File.unique().tolist().index(temp['Mouse'][x])
groupname = trials.loc[trials.File == trials.File.unique()[indexnum]].Group.iloc[0]
group.append(groupname)
group = pd.DataFrame({'Group': group})
temp = pd.concat([group, temp], axis=1)
fullsuccess = fullsuccess.append(temp)
fullsuccess = fullsuccess.reset_index(drop=True)
fullsuccess.to_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\fullsuccess.csv', index=False)
# Concat the integrals
fullint = pd.DataFrame([])
for date in trials.File.unique():
# reorganize dataframes
dframe = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\GoodFiles\\{0}\\{1}_meanint.csv'.format(
unicode(date, 'utf-8'), unicode(date, 'utf-8')))
idf = pd.concat([dframe['Odor'], dframe[[col for col in dframe.columns if ')_' in col]]], axis=1)
temp = idf.T
temp.reset_index(level=0, inplace=True)
temp.columns = temp.iloc[0]
temp = temp.reindex(temp.index.drop(0))
temp.rename(columns={'Odor': 'Mouse'}, inplace=True)
temp = temp.reset_index(drop=True)
group = []
for x in list(temp.index.values):
temp.iloc[x]['Mouse'] = temp.iloc[x]['Mouse'].split(')_')[1]
indexnum = trials.File.unique().tolist().index(temp['Mouse'][x])
groupname = trials.loc[trials.File == trials.File.unique()[indexnum]].Group.iloc[0]
group.append(groupname)
group = pd.DataFrame({'Group': group})
temp = | pd.concat([group, temp], axis=1) | pandas.concat |
from itertools import product
from pathlib import Path
from warnings import warn
import numpy as np
import pandas as pd
import sep
from astropy.io import fits
from astropy.modeling.functional_models import Gaussian2D
from astropy.nddata import CCDData, Cutout2D, VarianceUncertainty
from astropy.stats import sigma_clipped_stats
from astropy.time import Time
from astropy.visualization import ImageNormalize, SqrtStretch, ZScaleInterval
from photutils.aperture import CircularAnnulus, CircularAperture
from ysfitsutilpy import (CCDData_astype, add_to_header, bdf_process,
bezel_ccd, errormap, fitsxy2py, imcombine, load_ccd,
medfilt_bpm, propagate_ccdmask, select_fits,
set_ccd_gain_rdnoise, trim_ccd)
from ysphotutilpy import (LinPolOE4, apphot_annulus, ellip_ap_an,
fit_Gaussian2D, sep_back, sep_extract, sky_fit)
from .preproc import (cr_reject_nic, find_fourier_peaks, fit_fourier,
vertical_correct)
from .util import (DARK_PATHS, FLAT_PATHS, FOURIERSECTS, GAIN, MASK_PATHS,
OBJSECTS, OBJSLICES, RDNOISE, USEFUL_KEYS, VERTICALSECTS,
infer_filter, parse_fpath, split_oe, summary_nic)
try:
import fitsio
HAS_FITSIO = True
except ImportError:
warn("python version of fitsio is strongly recommended (https://github.com/esheldon/fitsio/tree/master/)")
HAS_FITSIO = False
__all__ = [
"NICPolDir", "NICPolPhot", "read_pols",
"NICPolImage"]
_PHOT_COLNAMES = ['id', 'xcenter', 'ycenter', 'aparea',
'aperture_sum', 'aperture_sum_err',
'msky', 'nrej', 'nsky', 'ssky',
'source_sum', 'source_sum_err',
'mag', 'merr', 'x_fwhm', 'y_fwhm', 'theta']
MEDCOMB_SC3_F4 = dict(combine='med', reject='sc', sigma=3, maxiters=50, use_cfitsio=True, dtype='float32')
def DONE2HDR(header, verbose):
add_to_header(header, 'h', verbose=verbose, fmt=None, s="{:-^72s}".format(' DONE'))
def FINDFITS(tab, filt, oe, exptime=None, objname=None, loadccd=False, verbose=False):
type_key = ["FILTER", "OERAY"]
type_val = [filt.upper(), oe.lower()]
for k, v in zip(["EXPTIME", "OBJECT"], [exptime, objname]):
if v is not None:
type_key.append(k)
type_val.append(v)
return select_fits(summary_table=tab, type_key=type_key, type_val=type_val,
loadccd=loadccd, verbose=verbose)
def SAVENIC(ccd, original_path, object_name, savedir, combined=False, verbose=False):
def _set_fname(original_path, object_name, combined=False):
''' If combined, COUNTER, POL-AGL1, INSROT are meaningless, so remove these.
'''
es = parse_fpath(original_path)
es['OBJECT'] = object_name
if combined:
fstem = '_'.join([es['filt'], es['yyyymmdd'], es['OBJECT'], es['EXPTIME'], es['oe']])
else:
fstem = '_'.join(es.values())
return fstem + '.fits'
ccd.header["OBJECT"] = object_name
newpath = savedir/_set_fname(original_path, object_name, combined=combined)
ccd.write(newpath, overwrite=True)
if verbose:
print(f"Writing FITS to {newpath}")
return ccd, newpath
class NICPolDirMixin:
@staticmethod
def mkpuredark(tab, caldir, filt, oe, exptime, dark_min=10., verbose_combine=False):
_t = Time.now()
dark_fpaths = FINDFITS(tab, filt, oe, exptime=exptime, verbose=False)
comb = imcombine(dark_fpaths, **MEDCOMB_SC3_F4, verbose=verbose_combine)
comb.data[comb.data < dark_min] = 0
add_to_header(comb.header,
'h',
f"Images combined and dark_min {dark_min} applied.",
t_ref=_t,
verbose=verbose_combine)
_, comb_dark_path = SAVENIC(comb, dark_fpaths[0], "DARK", caldir, combined=True)
return comb_dark_path
@staticmethod
def mkskydark_single(fpath, tmpcal, skyname, mflat, dark_min=10., skydark_medfilt_bpm_kw={},
verbose_bdf=False, verbose_combine=False):
_t = Time.now()
sky = load_ccd(fpath)
add_to_header(sky.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Estimating DARK from this sky frame '))
# Sky / Flat
# flat division to prevent artificial CR rejection:
sky_f = sky.copy()
sky_f.data = sky.data/mflat
sky_f, _ = SAVENIC(sky_f, fpath, f"{skyname}_FLAT", tmpcal)
# (Sky/Flat)_cr
# sky_f_cr = cr_reject_nic(sky_f, crrej_kw=crrej_kw, verbose=verbose_crrej)
sky_f_cr = medfilt_bpm(sky_f, **skydark_medfilt_bpm_kw)
sky_f_cr, _ = SAVENIC(sky_f_cr, fpath, f"{skyname}_FLAT_CRREJ", tmpcal)
# (Sky/Flat)_cr * Flat
sky_cr = sky_f_cr.copy()
sky_cr.data *= mflat
sky_cr, _ = SAVENIC(sky_cr, fpath, f"{skyname}_FLAT_CRREJ_DEFLATTED", tmpcal)
# Dark ~ Sky - (Sky/Flat)_cr * Flat
sky_dark = sky_f_cr.copy() # retain CRREJ header info
sky_dark.data = sky.data - sky_f_cr.data*mflat
sky_dark.data[sky_dark.data < dark_min] = 0
add_to_header(
sky_dark.header, 'h', t_ref=_t, verbose=verbose_combine,
s=("Dark from this frame estimated by sky - (sky/flat)_cr*flat "
+ f"and replaced pixel value < {dark_min} = 0.")
)
add_to_header(
sky_dark.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Similar SKYDARK frames will be combined ')
)
_, sky_dark_path = SAVENIC(sky_dark, fpath, f"{skyname}_SKYDARK", self.tmpcal)
return sky_dark_path
@staticmethod
def mkskydark_comb(fpaths, caldir, skyname, verbose_combine=False, verbose_bdf=False):
comb_sky_dark = imcombine(fpaths, **MEDCOMB_SC3_F4, verbose=verbose_combine)
DONE2HDR(comb_sky_dark.header, verbose_bdf)
_, comb_sky_dark_path = SAVENIC(comb_sky_dark, fpaths[0],
f"{skyname}_SKYDARK", caldir, combined=True)
return comb_sky_dark_path
@staticmethod
def mkfringe_single(fpath, tmpcal, skyname, mdark, mflat, mdarkpath, mflatpath,
verbose_bdf=False, verbose_crrej=False):
sky = load_ccd(fpath)
# give mdark/mflat so that the code does not read the FITS files repeatedly:
add_to_header(sky.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Estimating FRINGE from this sky frame '))
sky_fringe = bdf_process(sky,
mdark=mdark,
mflat=CCDData(mflat, unit='adu'),
mdarkpath=mdarkpath,
mflatpath=mflatpath,
verbose_bdf=verbose_bdf,
verbose_crrej=verbose_crrej)
add_to_header(sky_fringe.header, 'h', verbose=verbose_bdf, fmt=None,
s="{:=^72s}".format(' Similar SKYFRINGE frames will be combined '))
_, sky_tocomb_path = SAVENIC(sky_fringe, fpath, f"{skyname}_FRINGE", tmpcal)
return sky_tocomb_path
@staticmethod
def mkfringe_comb(fpaths, logpath, skyname, caldir, scale_section, scale='avg',
scale_to_0th=False, fringe_min_value=0.0, verbose_combine=False, verbose_bdf=False):
# FRINGE must not be smoothed as remaining DARK signal may reside here.
comb_sky_fringe = imcombine(fpaths,
**MEDCOMB_SC3_F4,
scale=scale,
scale_to_0th=scale_to_0th,
scale_section=scale_section,
verbose=verbose_combine,
logfile=logpath)
# Normalize using the section
_t = Time.now()
norm_value = np.mean(comb_sky_fringe.data[fitsxy2py(scale_section)])
comb_sky_fringe.data /= norm_value
comb_sky_fringe.data[comb_sky_fringe.data < fringe_min_value] = 0
add_to_header(comb_sky_fringe.header, 'h', t_ref=_t, verbose=verbose_combine,
s="Normalized by mean of NORMSECT (NORMVALU), replaced value < FRINMINV to 0")
comb_sky_fringe.header["NORMSECT"] = scale_section
comb_sky_fringe.header["NORMVALU"] = norm_value
comb_sky_fringe.header["FRINMINV"] = fringe_min_value
DONE2HDR(comb_sky_fringe.header, verbose_bdf)
_, comb_sky_fringe_path = SAVENIC(comb_sky_fringe, fpaths[0],
f"{skyname}_SKYFRINGE", caldir, combined=True)
return comb_sky_fringe_path
@staticmethod
def _set_mflat(summary_flat, filt, oe, flatdir, flat_min_value=0.):
''' Note that it returns ndarray, not CCDData.
'''
if summary_flat is None:
return 1, None
if flatdir is not None:
mflatpath = FINDFITS(summary_flat, filt, oe, verbose=False)
if len(mflatpath) > 1:
raise ValueError(f"More than 1 flat for (FILTER, OERAY) = ({filt}, {oe}) found.")
elif len(mflatpath) == 0:
raise ValueError(f"No FITS file for (FILTER, OERAY) = ({filt}, {oe}) found.")
mflatpath = mflatpath[0]
mflat = load_ccd(mflatpath).data
mflat[mflat < flat_min_value] = 1.
else:
mflatpath = None
mflat = 1
return mflat, mflatpath
@staticmethod
def _set_dark(prefer_skydark, paths_skydark, paths_puredark, objname, filt, oe, exptime, verbose):
if prefer_skydark:
try:
mdarkpath = paths_skydark[(f"{objname}_sky", filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
if verbose:
print(f"prefer_skydark but skydark for ({objname}_sky, {filt}, {oe}, "
+ f"{exptime}) not found. Trying to use pure dark.")
try:
mdarkpath = paths_puredark[(filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
mdarkpath = None
mdark = None
if verbose:
print("\nNo dark file found. Turning off dark subtraction.")
else:
try:
mdarkpath = paths_puredark[(filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
except (KeyError, IndexError, FileNotFoundError):
if verbose:
print(f"Pure dark for ({filt}, {oe}, {exptime}) not found. "
+ f"Trying to use SKYDARK of ({objname}_sky, {filt}, {oe}, {exptime})",
end='... ')
try:
mdarkpath = paths_skydark[(f"{objname}_sky", filt, oe, exptime)]
mdark = load_ccd(mdarkpath)
if verbose:
print("Loaded successfully.")
except (KeyError, IndexError, FileNotFoundError):
mdarkpath = None
mdark = None
if verbose:
print("No dark file found. Turning off dark subtraction.")
return mdark, mdarkpath
@staticmethod
def _set_fringe(paths_skyfringe, objname, filt, oe, exptime, verbose):
try:
mfringepath = paths_skyfringe[(f"{objname}_sky", filt, oe, exptime)]
mfringe = load_ccd(mfringepath)
except (KeyError, IndexError, FileNotFoundError):
mfringepath = None
mfringe = None
if verbose:
print("No finge file found. Turning off fringe subtraction.")
return mfringe, mfringepath
@staticmethod
def _find_obj(arr, var,
thresh_tests=[30, 20, 10, 6, 5, 4, 3], bezel_x=(30, 30), bezel_y=(180, 120),
box_size=(64, 64), filter_size=(12, 12), deblend_cont=1,
minarea=314,
**extract_kw):
"""
Note
----
This includes ``sep``'s ``extract`` and ``background``.
Equivalent processes in photutils may include ``detect_sources``
and ``source_properties``, and ``Background2D``, respectively.
Parameters
----------
thresh : float, optional.
The SNR threshold. It is not an absolute pixel value because
internally the ``self.err_o`` and ``self.err_e`` will be
used.
bezel_x, bezel_y : int, float, list of such, optional.
The x and y bezels, in ``[lower, upper]`` convention.
box_size : int or array-like (int) optional.
The background smooting box size. Default is ``(64, 64)``
for NIC. **Note**: If array-like, order must be ``[height,
width]``, i.e., y and x size.
filter_size : int or array-like (int) optional.
The 2D median filter size. Default is ``(12, 12)`` for NIC.
**Note**: If array-like, order must be ``[height, width]``,
i.e., y and x size.
minarea : int, optional
Minimum number of pixels required for an object. Default is
100 for NIC.
deblend_cont : float, optional
Minimum contrast ratio used for object deblending. To
entirely disable deblending, set to 1.0.
# gauss_fbox : int, float, array-like of such, optional.
# The fitting box size to fit a Gaussian2D function to the
# objects found by ``sep``. This is done to automatically set
# aperture sizes of the object.
Returns
-------
bkg, obj, segm
"""
bkg_kw = dict(maskthresh=0.0, filter_threshold=0.0, box_size=box_size, filter_size=filter_size)
bkg = sep_back(arr, **bkg_kw)
sepv = sep.__version__
s_bkg = f"Background estimated from sep (v {sepv}) with {bkg_kw}."
thresh_tests = np.sort(np.atleast_1d(thresh_tests))[::-1]
for thresh in thresh_tests:
ext_kw = dict(thresh=thresh, minarea=minarea, deblend_cont=deblend_cont,
bezel_x=bezel_x, bezel_y=bezel_y, **extract_kw)
obj, seg = sep_extract(arr, bkg=bkg, var=var, **ext_kw)
nobj = len(obj)
if nobj < 1:
continue
else:
s_obj = f"Objects found from sep (v {sepv}) with {ext_kw}."
break
found = nobj >= 1
if not found:
s_obj = f"NO object found from sep (v {sepv}) with {ext_kw}."
return bkg, obj, seg, s_bkg, s_obj, found
class NICPolDir(NICPolDirMixin):
def __init__(self, location, rawdir="raw", caldir="calib", tmpcal="tmp_calib", tmpred="tmp_reduc",
flatdir=None, verbose=False):
self.location = Path(location)
if rawdir is None:
self.rawdir = self.location
else:
self.rawdir = self.location/rawdir
if not self.rawdir.exists():
raise FileNotFoundError("Raw data directory not found.")
self.caldir = self.location/caldir
self.tmpcal = self.location/tmpcal
self.tmpred = self.location/tmpred
# == make directories if not exist ================================================================ #
self.caldir.mkdir(parents=True, exist_ok=True)
self.tmpcal.mkdir(parents=True, exist_ok=True)
self.tmpred.mkdir(parents=True, exist_ok=True)
# Check flatdir
self.flatdir = Path(flatdir) if flatdir is not None else None
if (self.flatdir is not None) and (not self.flatdir.exists()):
raise FileNotFoundError("Flat directory not found.")
self.keys = USEFUL_KEYS + ["OERAY"]
# == collect header info of FLATs ================================================================== #
if self.flatdir is not None:
self.summary_flat = summary_nic(self.flatdir/"*.fits", keywords=self.keys, verbose=verbose)
else:
self.summary_flat = None
if verbose:
print("Extracting header info...")
# == collect header info of RAWs =================================================================== #
self.summary_raw = summary_nic(self.rawdir/"*.fits", keywords=self.keys, verbose=verbose)
# == Find object frames, object_sky frames, and find the unique EXPTIMEs. ========================== #
self.skydata = {}
self.sky_dark_exists = False
self.pure_dark_exists = False
self.sciframes_exptimes = {}
for objname in np.unique(self.summary_raw["OBJECT"]):
if objname.endswith("_sky"):
self.skydata[objname] = {}
self.sky_dark_exists = True
elif not self.pure_dark_exists and objname.lower() == "dark":
self.pure_dark_exists = True
elif objname.lower() not in ['test', 'flat', 'bias']: # just in case...
mask_sci = self.summary_raw["OBJECT"] == objname
self.sciframes_exptimes[objname] = np.unique(self.summary_raw[mask_sci]["EXPTIME"])
for skyname in self.skydata.keys():
skymask = self.summary_raw["OBJECT"] == skyname
self.skydata[skyname]["summary"] = self.summary_raw[skymask]
self.skydata[skyname]["exptimes"] = np.unique(self.summary_raw[skymask]["EXPTIME"])
# == Check if DARK exists ========================================================================== #
if self.pure_dark_exists:
self.summary_dark = self.summary_raw[self.summary_raw["OBJECT"] == "DARK"]
else:
self.summary_dark = None
def prepare_calib(self, dark_min=10., sky_scale_section="[20:130, 40:140]", fringe_min_value=0.0,
flat_min_value=0.0,
skydark_medfilt_bpm_kw=dict(size=5, std_section="[40:100, 40:140]", std_model='std',
med_sub_clip=[None, 10], med_rat_clip=None,
std_rat_clip=[None, 3]),
verbose=True, verbose_combine=False, verbose_bdf=False, verbose_crrej=False):
'''
Parameters
----------
skydark_medfilt_bpm_kw : dict, optional.
The median filtered bad pixel masking algorithm (MBPM)
parameters if SKYDARK must be extracted. The lower clips are
all `None` so that the SKYDARK frame contains **no** pixel
smaller than local median minus ``med_sub_clip[1]``.
fringe_min_value : float, optional.
All pixels smaller than this value in the fringe map
(super-bad pixels) are replaced with this. Setting
``fringe_min_value=0`` removes all negative pixels from
fringe, so the fringe-subtracted frame will contain negative
pixels (super-bad pixels) unchanged, so that easily replaced
in preprocessing.
'''
self.dark_min = dark_min
self.sky_scale_section = sky_scale_section
self.sky_scale_slice = fitsxy2py(self.sky_scale_section)
self.paths_puredark = {}
self.paths_skydark = {}
self.paths_skyfringe = {}
self.paths_flat = {}
self.fringe_min_value = fringe_min_value
self.flat_min_value = flat_min_value
print(f"Output and intermediate files are saved to {self.caldir} and {self.tmpcal}.")
if self.flatdir is None:
print("Better to specify flat files by flatdir.")
if verbose:
print("Estimating DARK from sky frames if exists... ")
# == Combine dark if DARK exists =================================================================== #
if verbose:
print("Combining DARK frames if exists", end='... ')
if self.pure_dark_exists:
exptimes = np.unique(self.summary_dark["EXPTIME"])
for filt, oe, exptime in product('JHK', 'oe', exptimes):
comb_dark_path = self.mkpuredark(
tab=self.summary_dark,
caldir=self.caldir,
filt=filt,
oe=oe,
exptime=exptime,
dark_min=self.dark_min,
verbose_combine=verbose_combine
)
self.paths_puredark[(filt, oe, exptime)] = comb_dark_path
if verbose:
print("Done.")
elif verbose:
print("No pure DARK found. Trying SKYDARK", end='... ')
# == Reduce SKY frames if exists =================================================================== #
for filt, oe in product('JHK', 'oe'):
mflat, mflatpath = self._set_mflat(self.summary_flat, filt, oe, self.flatdir, self.flat_min_value)
self.paths_flat[(filt, oe)] = mflatpath
for skyname, skydict in self.skydata.items():
for exptime in skydict['exptimes']:
sky_fpaths = FINDFITS(skydict['summary'], filt, oe, exptime, verbose=verbose)
skycomb_paths = []
try:
# == Check to see if there is PUREDARK ============================================= #
mdarkpath = self.paths_puredark[(filt, oe, exptime)]
print(self.paths_puredark[(filt, oe, exptime)])
except (KeyError, IndexError):
# == Estimate DARK from sky (SKYDARK) if no PUREDARK found ========================= #
sky_dark_paths = []
for fpath in sky_fpaths:
# -- estimate SKYDARK for ALL _sky frames
sky_dark_path = self.mkskydark_single(
fpath=fpath,
tmpcal=self.tmpcal,
skyname=skyname,
mflat=mflat,
dark_min=self.dark_min,
skydark_medfilt_bpm_kw=skydark_medfilt_bpm_kw,
verbose_bdf=verbose_bdf,
verbose_combine=verbose_combine
)
sky_dark_paths.append(sky_dark_path)
# -- Combine estimated SKYDARK (_sky) frames
if verbose:
print(" Combine the estimated DARK from _sky frames", end='... ')
comb_sky_dark_path = self.mkskydark_comb(
fpaths=sky_dark_paths,
caldir=self.caldir,
skyname=skyname,
verbose_combine=verbose_combine,
verbose_bdf=verbose_bdf
)
self.paths_skydark[(skyname, filt, oe, exptime)] = comb_sky_dark_path
mdarkpath = comb_sky_dark_path
if verbose:
print("Done.")
# == Make fringe frames for each sky =================================================== #
if verbose:
print(" Make and combine the SKYFRINGES (flat corrected) from _sky frames",
end='...')
mdark = load_ccd(mdarkpath)
for fpath in sky_fpaths:
sky_tocomb_path = self.mkfringe_single(
fpath=fpath,
tmpcal=self.tmpcal,
skyname=skyname,
mdark=mdark,
mflat=mflat,
mdarkpath=mdarkpath,
mflatpath=mflatpath,
verbose_bdf=verbose_bdf,
verbose_crrej=verbose_crrej
)
skycomb_paths.append(sky_tocomb_path)
# == Combine sky fringes =============================================================== #
# combine dark-subtracted and flat-corrected sky frames to get the fringe pattern by sky
# emission lines:
logpath = self.caldir/f"{filt.lower()}_{skyname}_{exptime:.1f}_{oe}_combinelog.csv"
comb_sky_fringe_path = self.mkfringe_comb(
fpaths=skycomb_paths,
logpath=logpath,
skyname=skyname,
caldir=self.caldir,
scale_section=self.sky_scale_section,
scale='avg',
scale_to_0th=False,
fringe_min_value=self.fringe_min_value,
verbose_combine=verbose_combine,
verbose_bdf=verbose_bdf
)
self.paths_skyfringe[(skyname, filt, oe, exptime)] = comb_sky_fringe_path
if verbose:
print("Done.")
def preproc(self, reddir="reduced", prefer_skydark=False,
med_rat_clip=[0.5, 2], std_rat_clip=[-3, 3], bezel_x=[20, 20], bezel_y=[20, 20],
medfilt_kw=dict(med_sub_clip=None, size=5),
do_crrej_pos=True, do_crrej_neg=True,
verbose=True, verbose_bdf=False, verbose_bpm=False, verbose_crrej=False, verbose_phot=False
):
'''
'''
self.reddir = self.location/reddir
self.reddir.mkdir(parents=True, exist_ok=True)
self.summary_cal = summary_nic(self.caldir/"*.fits", keywords=self.keys, verbose=verbose)
self.darks = {}
self.fringes = {}
for filt, oe in product('JHK', 'oe'):
mflatpath = self.paths_flat[(filt, oe)]
gain = GAIN[filt]
rdnoise = RDNOISE[filt]
if mflatpath is None:
mflat = None
else:
mflat = load_ccd(mflatpath)
mflat.data[mflat.data < self.flat_min_value] = 1.
for objname, exptimes in self.sciframes_exptimes.items():
for exptime in exptimes:
if objname.lower().endswith("_sky") or objname.upper() in ["DARK", "TEST"]:
continue # if sky/dark frames
# == Setup dark and fringe ============================================================= #
mdark, mdarkpath = self._set_dark(
prefer_skydark=prefer_skydark,
paths_skydark=self.paths_skydark,
paths_puredark=self.paths_puredark,
objname=objname,
filt=filt,
oe=oe,
exptime=exptime,
verbose=verbose
)
mfringe, mfringepath = self._set_fringe(
paths_skyfringe=self.paths_skyfringe,
objname=objname,
filt=filt,
oe=oe,
exptime=exptime,
verbose=verbose
)
# == Reduce data and do photometry ===================================================== #
raw_fpaths = FINDFITS(self.summary_raw, filt, oe, exptime, objname)
for fpath in raw_fpaths:
if verbose:
print('\n{}'.format(fpath))
rawccd = load_ccd(fpath)
set_ccd_gain_rdnoise(rawccd, gain=gain, rdnoise=rdnoise)
add_to_header(rawccd.header, 'h', verbose=verbose_bdf,
s="{:=^72s}".format(' Preprocessing start '), fmt=None)
# set_ccd_gain_rdnoise(rawccd)
# 1. Do Dark and Flat.
# 2. Subtract Fringe
if (mdark is not None) or (mflat is not None) or (mfringe is not None):
redccd = bdf_process(rawccd,
mdarkpath=mdarkpath,
mflatpath=mflatpath,
mdark=mdark,
mflat=mflat,
mfringe=mfringe,
mfringepath=mfringepath,
fringe_scale_fun=np.mean,
fringe_scale_section=self.sky_scale_section,
verbose_bdf=verbose_bdf)
objname = redccd.header['OBJECT']
proc = ''
if mdark is not None:
proc += "D"
if mflat is not None:
proc += "F"
if mfringe is not None:
proc += "Fr"
if proc != '':
objname = '_'.join([objname, proc])
redccd, _ = SAVENIC(redccd, fpath, objname, self.tmpred, verbose=verbose)
objname_proc = objname
else:
redccd = rawccd.copy()
# 3. Calculate median-filtered frame.
# 4. Calculate
# RATIO = original/medfilt,
# SIGRATIO = (original - medfilt) / mean(original)
# 5. Replace pixels by Median BPM (MBPM) algorithm
if verbose:
print("Median filter bad pixel masking (MBPM) will be used.")
redccd, res = medfilt_bpm(redccd,
std_section=self.sky_scale_section,
med_rat_clip=med_rat_clip,
std_rat_clip=std_rat_clip,
**medfilt_kw,
verbose=verbose_bpm,
full=True)
tmpccd = redccd.copy()
tmpccd.data = res['med_filt']
medfilt_objname = objname + "_MedFilt"
_ = SAVENIC(tmpccd, fpath, medfilt_objname, self.tmpred, verbose=verbose_bpm)
tmpccd.data = res['med_sub']
med_sub_objname = objname + "_MedSub"
_ = SAVENIC(tmpccd, fpath, med_sub_objname, self.tmpred, verbose=verbose_bpm)
tmpccd.data = res['med_rat']
med_rat_objname = objname + "_MedRatio"
_ = SAVENIC(tmpccd, fpath, med_rat_objname, self.tmpred, verbose=verbose_bpm)
tmpccd.data = res['std_rat']
std_rat_objname = objname + "_StdRatio"
_ = SAVENIC(tmpccd, fpath, std_rat_objname, self.tmpred, verbose=verbose_bpm)
tmpccd.data = (1*res['posmask'] + 2*res['negmask']).astype(np.uint8)
fullmask_objname = objname + "_MASK_pos1neg2"
_ = SAVENIC(tmpccd, fpath, fullmask_objname, self.tmpred, verbose=verbose_bpm)
DONE2HDR(redccd.header, verbose_bdf)
# -- Uncertainty calculation
# Use the raw one, i.e., BEFORE dark, flat, sky, crrej, etc.
# var = error^2
_t = Time.now()
var = (rawccd.data/gain # Photon noise = signal + dark + sky (fringe) BEFORE flat
+ (rdnoise/gain)**2 # readout noise
+ 1/12 # digitization (see Eq 12 and below of Merline+Howell 95)
).astype('float32')
# sometimes negative pixel exists and gives NaN is sqrt is taken...
# redccd.uncertainty = StdDevUncertainty(np.sqrt(var))
redccd.uncertainty = VarianceUncertainty(var)
add_to_header(redccd.header, 'h', verbose=verbose_bdf, t_ref=_t,
s=("1-sigma VARIANCE calculated "
+ f"by GAIN ({gain}) and RDNOISE ({rdnoise});"
+ " see ext=1 (EXTNAME = 'UNCERT')"))
# -- Save final result
redccd = bezel_ccd(redccd, bezel_x=bezel_x, bezel_y=bezel_y, replace=None,
verbose=verbose)
_ = SAVENIC(redccd, fpath, objname_proc, self.reddir, verbose=verbose)
if verbose:
print()
class NICPolPhot(NICPolDirMixin):
def __init__(self, location, objnames=None, reddir="reduced",
p_eff=dict(J=98., H=95., K=92.), dp_eff=dict(J=6., H=7., K=12.),
theta_inst=dict(J=0.5, H=1.3, K=-0.7), dtheta_inst=dict(J=1.3, H=3.1, K=6.3),
q_inst=dict(J=0.0, H=0.03, K=-0.02), u_inst=dict(J=-0.01, H=-0.03, K=-0.07),
dq_inst=dict(J=0.29, H=0.52, K=0.30), du_inst=dict(J=0.29, H=0.55, K=0.31),
correct_dqdu_stddev_to_stderr=True
):
self.location = Path(location)
if reddir is None:
self.reddir = self.location
else:
self.reddir = self.location/reddir
if not self.reddir.exists():
raise FileNotFoundError("Reduced data directory not found.")
self.p_eff = p_eff
self.dp_eff = dp_eff
self.theta_inst = theta_inst
self.dtheta_inst = dtheta_inst
# TODO: Currently instrumental polarizaiton correction is turned off.
# self.q_inst = q_inst
# self.u_inst = u_inst
# if correct_dqdu_stddev_to_stderr:
# for filt in "JHK":
# # Convert stddev to the standard error of the mean estimator (see TakahashiJ+2018)
# dq_inst[filt] = dq_inst[filt]/np.sqrt(150 + 15 + 15)
# du_inst[filt] = du_inst[filt]/np.sqrt(150 + 15 + 15)
# self.dq_inst = dq_inst
# self.du_inst = du_inst
# We need not make a summary, because filenames contain all such information.
self.redfpaths = list(self.reddir.glob("*.fits"))
self.redfpaths.sort()
self.objnames = objnames
self.parsed = pd.DataFrame.from_dict([parse_fpath(fpath) for fpath in self.redfpaths])
# ^^ 0.7-0.8 us per iteration, and few ms for DataFrame'ing
if self.objnames is not None:
objmask = self.parsed['OBJECT'].str.split('_', expand=True)[0].isin(np.atleast_1d(self.objnames))
self.parsed = self.parsed[objmask]
self.objfits = np.array(self.redfpaths)[objmask]
else:
self.objfits = self.redfpaths
self.parsed.insert(loc=0, column='file', value=self.objfits)
self.parsed['counter'] = self.parsed['counter'].astype(int)
self.parsed['set'] = np.tile(1 + np.arange(len(self.parsed)/3)//8, 3).astype(int)
# (nimg/filter) // files/set=8
self.parsed['PA'] = self.parsed['PA'].astype(float)
self.parsed['INSROT'] = self.parsed['INSROT'].astype(float)
self.parsed['IMGROT'] = self.parsed['IMGROT'].astype(float)
self.grouped = self.parsed.groupby(['filt', 'set'])
def photpol(self, radii, figdir=None, thresh_tests=[30, 20, 10, 6, 5, 4, 3, 2, 1, 0],
sky_r_in=60, sky_r_out=90, skysub=True,
satlevel=dict(J=7000, H=7000, K=7000), sat_npix=dict(J=5, H=5, K=5),
verbose_bkg=False, verbose_obj=False, verbose=True,
obj_find_kw=dict(bezel_x=(30, 30), bezel_y=(180, 120), box_size=(64, 64),
filter_size=(12, 12), deblend_cont=1, minarea=100),
output_pol=None):
self.phots = {}
self.positions = {}
self.pols = {}
if figdir is not None:
self.figdir = Path(figdir)
self.figdir.mkdir(parents=True, exist_ok=True)
savefig = True
else:
self.figdir = None
savefig = False
self.radii = np.atleast_1d(radii)
self.skyan_kw = dict(r_in=sky_r_in, r_out=sky_r_out)
self.Pol = {}
for (filt, set_id), df in self.grouped:
if len(df) < 8: # if not a full 4 images/set is obtained
continue
for i, (_, row) in enumerate(df.iterrows()):
fpath = row['file']
if HAS_FITSIO:
# Using FITSIO reduces time 0.3 s/set --> 0.1 s/set (1 set = 8 FITS files).
arr = fitsio.FITS(fpath)[0].read()
var = fitsio.FITS(fpath)[1].read() # variance only from photon noise.
else:
_ccd = load_ccd(fpath)
arr = _ccd.data
var = _ccd.uncertainty
find_res = self._find_obj(arr, var=var, thresh_tests=thresh_tests, **obj_find_kw)
bkg, obj, seg, s_bkg, s_obj, found = find_res
if found:
pos_x, pos_y = [obj['x'][0]], [obj['y'][0]]
saturated = False
cut = Cutout2D(data=arr, position=(pos_x[0], pos_y[0]), size=51)
n_saturated = np.count_nonzero(cut.data > satlevel[filt.upper()])
saturated = n_saturated > sat_npix[filt.upper()]
if saturated:
if verbose:
print(f"{n_saturated} pixels above satlevel {satlevel[filt.upper()]} (at {filt})"
+ "; Do not do any photometry. ")
objsum = np.nan*np.ones_like(radii)
varsum = np.nan*np.ones_like(radii)
else:
if verbose_bkg:
print(s_bkg)
if verbose_obj:
print(s_obj)
if verbose:
dx = pos_x[0] - arr.shape[1]/2
dy = pos_y[0] - arr.shape[0]/2
print(f"{fpath.name}: " # 0-th object at
+ f"(x, y) = ({pos_x[0]:6.3f}, {pos_y[0]:7.3f}), " # [0-indexing]
+ f"(dx, dy) = ({dx:+6.3f}, {dy:+6.3f})" # from image center
)
objsum, varsum, _ = sep.sum_circle(arr, var=var, x=pos_x, y=pos_y, r=self.radii)
if skysub:
ones = np.ones_like(arr)
aparea, _, _ = sep.sum_circle(ones, x=pos_x, y=pos_y, r=self.radii)
sky_an = CircularAnnulus((pos_x, pos_y), **self.skyan_kw)
sky = sky_fit(arr, annulus=sky_an)
objsum -= sky['msky']*aparea
varsum += (sky['ssky']*aparea)**2/sky['nsky'] + aparea*sky['ssky']**2
else:
objsum = np.nan*np.ones_like(radii)
varsum = np.nan*np.ones_like(radii)
# pos_x, pos_y = [arr.shape[1]/2], [arr.shape[0]/2]
# s_obj += "\n Using IMAGE CENTER as a fixed object center"
# if verbose:
# print("Object not found. Using IMAGE CENTER as a fixed object center.")
self.phots[filt, set_id, row['POL-AGL1'], row['oe']] = dict(objsum=objsum, varsum=varsum)
self.positions[filt, set_id, row['POL-AGL1'], row['oe']] = (pos_x[0], pos_y[0])
# self.ratio_valu = {}
# self.ratio_vari = {}
# for ang in ['00.0', '45.0', '22.5', '67.5']:
# phot_o = self.phots[filt, set_id, ang, 'o']
# phot_e = self.phots[filt, set_id, ang, 'e']
# self.ratio_valu[ang] = phot_e['objsum']/phot_o['objsum']
# # sum of (err/apsum)^2 = variance/apsum^2
# self.ratio_vari[ang] = (phot_e['varsum']/phot_e['objsum']**2
# + phot_o['varsum']/phot_o['objsum']**2)
# self.rq_valu = np.sqrt(self.ratio_valu['00.0']/self.ratio_valu['45.0'])
# self.ru_valu = np.sqrt(self.ratio_valu['22.5']/self.ratio_valu['67.5'])
# self.q_valu = (self.rq_valu - 1)/(self.rq_valu + 1)
# self.u_valu = (self.ru_valu - 1)/(self.ru_valu + 1)
# self.q_vari = (self.rq_valu/(self.rq_valu + 1)**2)**2*(self.ratio_vari['00.0']
# + self.ratio_vari['45.0'])
# self.u_vari = (self.ru_valu/(self.ru_valu + 1)**2)**2*(self.ratio_vari['22.5']
# + self.ratio_vari['67.5'])
# pol_valu = np.sqrt(self.q_valu**2 + self.u_valu**2)
# pol_err = np.sqrt(self.q_valu**2*self.q_vari + self.u_valu**2*self.u_vari)/pol_valu
# th_valu = 0.5*np.rad2deg(np.arctan2(self.u_valu, self.q_valu))
# th_err = 0.5*np.rad2deg(pol_err/pol_valu)
self.Pol[filt, set_id] = LinPolOE4(
i000_o=self.phots[filt, set_id, '00.0', 'o']['objsum'],
i000_e=self.phots[filt, set_id, '00.0', 'e']['objsum'],
i450_o=self.phots[filt, set_id, '45.0', 'o']['objsum'],
i450_e=self.phots[filt, set_id, '45.0', 'e']['objsum'],
i225_o=self.phots[filt, set_id, '22.5', 'o']['objsum'],
i225_e=self.phots[filt, set_id, '22.5', 'e']['objsum'],
i675_o=self.phots[filt, set_id, '67.5', 'o']['objsum'],
i675_e=self.phots[filt, set_id, '67.5', 'e']['objsum'],
di000_o=np.sqrt(self.phots[filt, set_id, '00.0', 'o']['varsum']),
di000_e=np.sqrt(self.phots[filt, set_id, '00.0', 'e']['varsum']),
di450_o=np.sqrt(self.phots[filt, set_id, '45.0', 'o']['varsum']),
di450_e=np.sqrt(self.phots[filt, set_id, '45.0', 'e']['varsum']),
di225_o=np.sqrt(self.phots[filt, set_id, '22.5', 'o']['varsum']),
di225_e=np.sqrt(self.phots[filt, set_id, '22.5', 'e']['varsum']),
di675_o=np.sqrt(self.phots[filt, set_id, '67.5', 'o']['varsum']),
di675_e=np.sqrt(self.phots[filt, set_id, '67.5', 'e']['varsum'])
)
self.Pol[filt, set_id].calc_pol(
p_eff=self.p_eff[filt.upper()], dp_eff=self.dp_eff[filt.upper()],
theta_inst=self.theta_inst[filt.upper()], dtheta_inst=self.dtheta_inst[filt.upper()],
pa_inst=np.mean(df['PA']),
rot_instq=np.mean(df['INSROT'][:4]), rot_instu=np.mean(df['INSROT'][-4:]),
q_inst=0, u_inst=0, dq_inst=0, du_inst=0,
degree=True, percent=True
)
self.pols[filt, set_id] = dict(pol=self.Pol[filt, set_id].pol,
dpol=self.Pol[filt, set_id].dpol,
theta=self.Pol[filt, set_id].theta,
dtheta=self.Pol[filt, set_id].dtheta)
self.phots = ( | pd.DataFrame.from_dict(self.phots) | pandas.DataFrame.from_dict |
import datetime
import numpy as np
import pandas as pd
import plotly.graph_objs as go
def last_commits_prep(payload):
commits = | pd.DataFrame.from_dict(payload['commits']) | pandas.DataFrame.from_dict |
import pandas as pd
import os
os.chdir("/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data")
import helpers
# Comparing two versions of survivor roster.
v1_file = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/patient_rosters/survivor_IDdict_v1_2019-02-27_PRIVATE.xlsx"
v2_file = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/patient_rosters/unnamed originals/Survivor Study Enrollment Log Stacked_6Mar19_For Dylan.xlsx"
mergeOn = ['G-Number', 'Study Specific #', 'Type', 'ID']
v1 = | pd.read_excel(v1_file, converters={'Study Specific #': str, 'ID': str}) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# ## Damage and Loss Assessment (12-story RC frame)
#
# This example continues the example2 to conduct damage and loss assessment using the PLoM model and compare the results against the results based on MSA
# ### Run example2
import numpy as np
import random
import time
from math import pi
import pandas as pd
from ctypes import *
import matplotlib.pyplot as plt
import sys
plt.ion()
# ### Import PLoM modules
# In[36]:
sys.path.insert(1, '../../')
from PLoM import *
# ### Load Incremental (IDA) Data
# MSA data are loaded via a comma-separate value (csv) file. The first row contains column names for both predictors (X) and responses (y). The following rows are input sample data. Users are expected to specif the csv filename.
# In[37]:
# Filename
filename = './data/response_frame12_ida_comb.csv'
model = PLoM(model_name='IDA', data=filename, col_header=True, plot_tag=True)
# ### Configuring tasks
# Please specify tasks to run - the list of tasks can be run in sqeunce or invidivdual tasks can be run separately.
# In[38]:
tasks = ['DataNormalization','RunPCA','RunKDE','ISDEGeneration']
# ### Step 0: Scaling the data
# In[39]:
# Configure the task
model.ConfigTasks(['DataNormalization'])
# Launch the run
model.RunAlgorithm()
# ### Step 1: Principal Component Analysis (PCA)
# In[40]:
# Tolerance for truncating principal components
tol_pca = 1e-6
# Configure the task
model.ConfigTasks(['RunPCA'])
# Launch the run
model.RunAlgorithm(epsilon_pca=tol_pca)
# ### Step 2: Kernel Density Estimation (KDE)
# In[41]:
# Smoothing parameter in the KDE
sp = 25
# Configure the task
model.ConfigTasks(['RunKDE'])
# Launch the run
model.RunAlgorithm(epsilon_kde=sp)
# ### Step 3: Create the generator
# In[42]:
# Extra parameters for ISDE generation
new_sample_num_ratio = 20
tol_PCA2 = 1e-5
# Configure the task
model.ConfigTasks(['ISDEGeneration'])
# Launch the run
model.RunAlgorithm(n_mc = new_sample_num_ratio, tol_PCA2 = tol_PCA2)
# ### Step 4: Exporting data
# In[43]:
# Available data list
model.export_results()
# In[44]:
# Pick up the original and new realizations, X0 and X_new
model.export_results(data_list=['/X0','/X_new'], file_format_list=['csv','csv'])
# ### Post-processing
# We would like to check the basic statistics of the input sample (i.e., IDA) and the generated new realizations by PLoM. The key metrics include the median, standard deviation, and correlation coefficient matrix of different structural responses.
# In[45]:
# Load results
df_ida = pd.read_csv('../../RunDir/IDA/DataOut/X0.csv')
df_plom = pd.read_csv('../../RunDir/IDA/DataOut/X_new.csv')
print(df_ida.head)
print(df_plom.head)
# In[46]:
x0 = df_ida.iloc[:,1:].T
x_c = df_plom.iloc[:,1:].T
x_name = x0.index.tolist()
x0 = np.array(x0)
x_c = np.array(x_c)
n = 27
# Correlation coefficient matrix
c_ida = np.corrcoef(x0)
c_plom = np.corrcoef(x_c)
c_combine = c_ida
tmp = np.triu(c_plom).flatten()
tmp = tmp[tmp != 0]
c_combine[np.triu_indices(27)] = tmp
# Plot covariance matrix
fig, ax = plt.subplots(figsize=(8,6))
ctp = ax.contourf(c_combine[3:,3:], cmap=plt.cm.hot, levels=1000)
ctp.set_clim(0,1)
ax.plot([0, 23], [0, 23], 'k--')
ax.set_xticks(list(range(n-3)))
ax.set_yticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_yticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_title('Covariance matrix comparison')
ax.grid()
cbar = fig.colorbar(ctp,ticks=[x/10 for x in range(11)])
plt.show()
# Plot the cross-section of correlation matrix
fig, ax = plt.subplots(figsize=(6,4))
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r:',label='PLoM')
for i in range(n-3):
ax.plot(np.array(range(n-3)),c_ida[i+3][3:],'k-')
ax.plot(np.array(range(n-3)),c_plom[i+3][3:],'r:')
ax.set_xticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_ylabel('Correlation coefficient')
ax.set_ylim([0,1])
ax.set_xlim([0,n-4])
ax.legend()
ax.grid()
plt.show()
# ### Hazard Adjustment
# This section can be used to process the PLoM predictions from raw IDA training. Site specific hazard information is needed as an input. An example site hazard csv file is provided, the first column is the Sa intensity, the second column is the median SaRatio, the third column is the median duration, and the last four columns are covariance matrix entries.
# In[47]:
# Load site hazard information
shz = pd.read_csv('./data/site_hazard.csv')
sa_levels = shz['Sa']
print(shz)
print(np.array(shz.iloc[0]['cov11':]).reshape((2,2)))
# In[48]:
# Draw samples from the site distribution
num_rlz = 1000 # sample size
np.random.seed(1) # random seed for replicating results
rlz_imv = []
for i in range(len(shz.index)):
rlz_imv.append(np.random.multivariate_normal(mean=[shz['mSaRatio'][i],shz['mDs'][i]],cov=np.array(shz.iloc[i]['cov11':]).reshape((2,2)),size=num_rlz))
# In[49]:
# Search nearest PLoM data points for each sample in rlz_imv
lnsa_plom = x_c[0]
lnsaratio_plom = x_c[1]
lnds_plom = x_c[2]
# Create the nearest interporator and interpolate data
from scipy.interpolate import NearestNDInterpolator
res_edp = []
for i in range(n-3):
# Loop all EDPs
interp_nn = NearestNDInterpolator(list(zip(lnsa_plom,lnsaratio_plom,lnds_plom)),x_c[3+i])
pred_nn = []
for j in range(len(shz.index)):
# Loop all intensity levels
pred_nn.append(interp_nn(np.ones(rlz_imv[j][:,0].shape)*np.log(shz['Sa'][j]),
rlz_imv[j][:,0],rlz_imv[j][:,1]))
res_edp.append(pred_nn)
fig, ax = plt.subplots(figsize=(6,4))
ax.plot(rlz_imv[0][:,0],rlz_imv[0][:,1],'r.',label='Resample')
plt.show()
# In[50]:
ref_msa = pd.read_csv('./data/response_rcf12_msa_la_nc.csv')
# In[51]:
from sklearn.neighbors import KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2,weights='distance',algorithm='auto',p=2)
res = []
for i in range(n-3):
# Loop all EDPs
neigh.fit(np.transpose(x_c[0:3]),x_c[i+3])
pred = []
for j in range(len(shz.index)):
# Loop all intensity levels
pred.append(neigh.predict(np.array((np.ones(rlz_imv[j][:,0].shape)*np.log(shz['Sa'][j]),rlz_imv[j][:,0],rlz_imv[j][:,1])).T))
res.append(pred)
# In[52]:
num_story = 12
num_sa = 6
sdr_cur_med_msa = np.zeros((num_story,num_sa))
sdr_cur_std_msa = np.zeros((num_story,num_sa))
sdr_cur_med_plom = np.zeros((num_story,num_sa))
sdr_cur_std_plom = np.zeros((num_story,num_sa))
for i in range(12):
for j in range(6):
sdr_cur_msa = ref_msa.loc[ref_msa['Sa']==shz['Sa'][j]][x_name[i+3][2:]]
sdr_cur_med_msa[i,j] = np.exp(np.mean(np.log(sdr_cur_msa)))
sdr_cur_std_msa[i,j] = np.std(np.log(sdr_cur_msa))
sdr_cur_plom = np.exp(res[i][j])
sdr_cur_med_plom[i,j] = np.exp(np.mean(res[i][j]))
sdr_cur_std_plom[i,j] = np.std(res[i][j])
fig = plt.figure(figsize=(12,8))
story_list = list(range(1,num_story+1))
for i in range(6):
plt.subplot(2,3,i+1)
ax = plt.gca()
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r-',label='PLoM-IDA \nHazard Adjusted')
ax.plot(sdr_cur_med_msa[:,i],story_list,'k-')
ax.plot(sdr_cur_med_msa[:,i]*np.exp(sdr_cur_std_msa[:,i]),story_list,'k--')
ax.plot(sdr_cur_med_msa[:,i]/np.exp(sdr_cur_std_msa[:,i]),story_list,'k--')
ax.plot(sdr_cur_med_plom[:,i],story_list,'r-')
ax.plot(sdr_cur_med_plom[:,i]*np.exp(sdr_cur_std_plom[:,i]),story_list,'r--')
ax.plot(sdr_cur_med_plom[:,i]/np.exp(sdr_cur_std_plom[:,i]),story_list,'r--')
ax.set_xlim(0.0,0.05)
ax.set_ylim(1,12)
ax.grid()
ax.legend()
ax.set_xlabel('$SDR_{max}$ (in/in)')
ax.set_ylabel('Story')
# In[53]:
num_story = 12
num_sa = 6
pfa_cur_med_msa = np.zeros((num_story,num_sa))
pfa_cur_std_msa = np.zeros((num_story,num_sa))
pfa_cur_med_plom = np.zeros((num_story,num_sa))
pfa_cur_std_plom = np.zeros((num_story,num_sa))
for i in range(12):
for j in range(6):
pfa_cur_msa = ref_msa.loc[ref_msa['Sa']==shz['Sa'][j]][x_name[i+15][2:]]
pfa_cur_med_msa[i,j] = np.exp(np.mean(np.log(pfa_cur_msa)))
pfa_cur_std_msa[i,j] = np.std(np.log(pfa_cur_msa))
pfa_cur_plom = np.exp(res[i+12][j])
pfa_cur_med_plom[i,j] = np.exp(np.mean(res[i+12][j]))
pfa_cur_std_plom[i,j] = np.std(res[i+12][j])
fig = plt.figure(figsize=(12,8))
story_list = list(range(1,num_story+1))
for i in range(6):
plt.subplot(2,3,i+1)
ax = plt.gca()
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r-',label='PLoM-IDA \nHazard Adjusted')
ax.plot(pfa_cur_med_msa[:,i],story_list,'k-')
ax.plot(pfa_cur_med_msa[:,i]*np.exp(pfa_cur_std_msa[:,i]),story_list,'k--')
ax.plot(pfa_cur_med_msa[:,i]/np.exp(pfa_cur_std_msa[:,i]),story_list,'k--')
ax.plot(pfa_cur_med_plom[:,i],story_list,'r-')
ax.plot(pfa_cur_med_plom[:,i]*np.exp(pfa_cur_std_plom[:,i]),story_list,'r--')
ax.plot(pfa_cur_med_plom[:,i]/np.exp(pfa_cur_std_plom[:,i]),story_list,'r--')
ax.set_xlim(0.0,1)
ax.set_ylim(1,12)
ax.grid()
ax.legend()
ax.set_xlabel('$PFA$ (g)')
ax.set_ylabel('Story')
# In[54]:
x0_ref = []
for i in range(n):
x0_ref.append([np.log(x) for x in ref_msa.iloc[:, i].values.tolist()])
c_msa = np.corrcoef(x0_ref)
res_conct = []
for i in range(n-3):
tmp = []
for j in range(len(shz.index)):
tmp = tmp+res[i][j].tolist()
res_conct.append(tmp)
c_plom = np.corrcoef(res_conct)
# Plot correlation of resampled data
fig, ax = plt.subplots(figsize=(6,4))
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r:',label='PLoM-IDA (Hazard Adjusted)')
for i in range(n-15):
ax.plot(np.array(range(n-3)),c_msa[i+3][3:],'k-')
ax.plot(np.array(range(n-3)),c_plom[i],'r:')
ax.set_xticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_ylabel('Correlation coefficient')
ax.set_ylim([0,1])
ax.set_xlim([0,n-16])
ax.legend()
ax.grid()
plt.show()
fig.savefig('plom_vs_ida_cov.png',dpi=600)
# In[55]:
# Estimation errors
err_med = np.linalg.norm(np.log(sdr_cur_med_plom) - np.log(sdr_cur_med_msa),axis=0)/np.linalg.norm(np.log(sdr_cur_med_msa),axis=0)
err_std = np.linalg.norm(sdr_cur_std_plom - sdr_cur_std_msa,axis=0)/np.linalg.norm(sdr_cur_std_msa,axis=0)
# Plot
fig, ax = plt.subplots(figsize=(6,6))
ax.plot(list(range(6)),err_med,'ko-',label='Mean EDP')
ax.plot(list(range(6)),err_std,'rs-',label='Standard deviation EDP')
ax.set_xticks(list(range(6)))
ax.set_xticklabels(['Sa = '+str(x)+'g' for x in sa_levels],rotation=30)
ax.set_xlim([0,5])
ax.set_ylim([0,1])
ax.set_ylabel('MSE')
ax.grid()
ax.legend()
plt.show()
# Save
np.savetxt('plom_ida.csv',np.exp(np.array(res_conct)).T,delimiter=',')
# Generate uncorrelated samples for comparison
num_uc = 1000 # sample size (per Sa level)
uc_sample = pd.DataFrame()
for j in range(num_sa):
for i in range(num_story):
uc_sample['1-PID-'+str(i+1)+'-1'] = np.exp(np.random.normal(loc=np.log(sdr_cur_med_plom[i,j]),scale=sdr_cur_std_plom[i,j],size=num_uc))
uc_sample['1-PFA-'+str(i+1)+'-1'] = 0.0*np.exp(np.random.normal(loc=np.log(pfa_cur_med_plom[i,j]),scale=pfa_cur_std_plom[i,j],size=num_uc))
uc_sample['1-PRD-1-1'] = uc_sample['1-PID-2-1']
uc_sample.to_csv('plom_ida_uc_s'+str(j+1)+'.csv',index_label='#Num')
# ### Damage and Loss
# This section is going to process the structural damage and loss estimation results. The SDR data are used as the input EDP to pelicun. Lognormal distribution is assumed for the input SDR sample in pelicun. The HAZUS-MH module is used, and the damage model is selected for high-rise concrete moment frame (C1H) with moderate-code design level and the occupancy type of COM1. Comparisons between MSA and PLoM results are made.
# In[11]:
# Damage states
import pandas as pd
df_damage = pd.DataFrame()
for i in range(4):
df_tmp = pd.read_csv('./data/'+'msa_s'+str(i+1)+'/DL_summary.csv')
df_damage['msa-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_s'+str(i+1)+'/DL_summary.csv')
df_damage['plom-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_uc_s'+str(i+1)+'/DL_summary.csv')
df_damage['plom-uc-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
for i in range(4):
fig, ax = plt.subplots(figsize=(6,4))
ax.hist(df_damage['msa-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='MSA, mean = '+str(np.round(np.mean(df_damage['msa-s'+str(i+1)]),3)))
ax.hist(df_damage['plom-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='PLoM, mean = '+str(np.round(np.mean(df_damage['plom-s'+str(i+1)]),3)))
ax.hist(df_damage['plom-uc-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='PLoM uncorr., mean = '+str(np.round(np.mean(df_damage['plom-uc-s'+str(i+1)]),3)))
ax.set_xlim([0.0,4])
ax.set_xlabel('Structural damage state')
ax.set_ylabel('Num. of realizations')
ax.legend()
ax.grid()
ax.set_title('Non-collapse damage states, Sa = '+str(sa_levels[i])+'g')
plt.show()
# In[12]:
# Expected loss ratios
import pandas as pd
df_loss = | pd.DataFrame() | pandas.DataFrame |
from io import StringIO
from typing import Iterable, _GenericAlias
from urllib.parse import urljoin
import json
import logging
import pytest
from pandas.api.types import is_object_dtype, is_categorical_dtype
import numpy as np
import pandas as pd
from omnipath import options
from omnipath.requests import Enzsub, Complexes, Intercell, Annotations
from omnipath._core.requests import SignedPTMs
from omnipath._core.query._query import EnzsubQuery
from omnipath._core.requests._utils import _split_unique_join, _strip_resource_label
from omnipath.constants._pkg_constants import Key, Endpoint
from omnipath._core.requests._annotations import _MAX_N_PROTS
class TestEnzsub:
def test_str_repr(self):
assert str(Enzsub()) == f"<{Enzsub().__class__.__name__}>"
assert repr(Enzsub()) == f"<{Enzsub().__class__.__name__}>"
def test_params_no_org_genesymbol(self):
params = Enzsub.params()
assert Key.ORGANISM.value not in params
assert Key.GENESYMBOLS.value in params
for k, valid in params.items():
if isinstance(valid, Iterable):
np.testing.assert_array_equal(
list(set(valid)), list(set(EnzsubQuery(k).valid))
)
else:
assert valid == EnzsubQuery(k).valid
def test_resources(self, cache_backup, requests_mock, resources: bytes):
url = urljoin(options.url, Endpoint.RESOURCES.s)
requests_mock.register_uri("GET", f"{url}?format=json", content=resources)
res = Enzsub.resources()
assert res == ("quux",)
assert requests_mock.called_once
def test_invalid_params(self):
with pytest.raises(ValueError, match=r"Invalid value `foo` for `EnzsubQuery`."):
Enzsub.get(foo="bar")
def test_invalid_license(self):
with pytest.raises(ValueError, match=r"Invalid value `bar` for `License`."):
Enzsub.get(license="bar")
def test_invalid_format(self):
with pytest.raises(ValueError, match=r"Invalid value `bar` for `Format`."):
Enzsub.get(format="bar")
def test_valid_params(self, cache_backup, requests_mock, tsv_data: bytes, caplog):
url = urljoin(options.url, Enzsub._query_type.endpoint)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
requests_mock.register_uri(
"GET",
f"{url}?fields=curation_effort%2Creferences%2Csources&format=tsv&license=academic",
content=tsv_data,
)
with caplog.at_level(logging.WARNING):
res = Enzsub.get(license="academic", format="text")
assert f"Invalid `{Key.FORMAT.s}='text'`" in caplog.text
np.testing.assert_array_equal(res.index, df.index)
np.testing.assert_array_equal(res.columns, df.columns)
np.testing.assert_array_equal(res.values, df.values)
assert requests_mock.called_once
def test_annotations(self):
assert set(Enzsub._annotations().keys()) == {e.param for e in EnzsubQuery}
assert all(
isinstance(a, (_GenericAlias, type)) for a in Enzsub._annotations().values()
)
def test_docs(self):
assert set(Enzsub._docs().keys()) == {e.param for e in EnzsubQuery}
assert all(d is None for d in Enzsub._docs().values())
def test_invalid_organism(self, cache_backup, requests_mock, tsv_data: bytes):
url = urljoin(options.url, Enzsub._query_type.endpoint)
requests_mock.register_uri(
"GET",
f"{url}?fields=curation_effort%2Creferences%2Csources&format=tsv",
content=tsv_data,
)
with pytest.raises(
ValueError,
match=r"Invalid value `foobarbaz` for `Organism`. Valid options are:",
):
Enzsub.get(organism="foobarbaz")
assert not requests_mock.called_once
def test_genesymbols_dont_matter(
self, cache_backup, requests_mock, tsv_data: bytes
):
url = urljoin(options.url, Enzsub._query_type.endpoint)
requests_mock.register_uri(
"GET",
f"{url}?fields=curation_effort%2Creferences%2Csources&format=tsv",
content=tsv_data,
)
_ = Enzsub.get(genesymbol=True)
assert requests_mock.called_once
def test_field_injection(self, cache_backup, requests_mock, tsv_data: bytes):
url = urljoin(options.url, Enzsub._query_type.endpoint)
requests_mock.register_uri(
"GET",
f"{url}?fields=Alpha%2Cbeta%2Ccuration_effort%2Creferences%2Csources&format=tsv",
content=tsv_data,
)
_ = Enzsub.get(fields=("beta", "Alpha", "Alpha"))
assert requests_mock.called_once
def test_no_dtype_conversion(self, cache_backup, requests_mock, tsv_data: bytes):
url = urljoin(options.url, Enzsub._query_type.endpoint)
options.convert_dtypes = False
requests_mock.register_uri(
"GET",
f"{url}?fields=curation_effort%2Creferences%2Csources&format=tsv",
content=tsv_data,
)
res = Enzsub.get()
assert is_object_dtype(res["modification"])
options.convert_dtypes = True
res = Enzsub.get()
assert is_categorical_dtype(res["modification"])
assert requests_mock.called_once
class TestIntercell:
def test_resources_wrong_type(self):
with pytest.raises(TypeError):
Intercell.resources(42)
def test_resources_no_generic_resources(self):
with pytest.raises(
ValueError, match=r"No generic categories have been selected."
):
Intercell.resources([])
def test_resources_no_generic(self, cache_backup, requests_mock, resources: bytes):
url = urljoin(options.url, Endpoint.RESOURCES.s)
requests_mock.register_uri("GET", f"{url}?format=json", content=resources)
res = Intercell.resources()
assert res == ("bar", "baz", "foo")
assert requests_mock.called_once
def test_resources_generic(self, cache_backup, requests_mock, resources: bytes):
url = urljoin(options.url, Endpoint.RESOURCES.s)
requests_mock.register_uri("GET", f"{url}?format=json", content=resources)
res = Intercell.resources(generic_categories=["42"])
assert res == ("bar", "foo")
res = Intercell.resources(generic_categories="24")
assert res == ("baz",)
res = Intercell.resources(generic_categories="foobarbaz")
assert res == ()
assert requests_mock.called_once # caching
def test_categories(self, cache_backup, requests_mock, intercell_data: bytes):
url = urljoin(options.url, Key.INTERCELL_SUMMARY.s)
data = json.loads(intercell_data)
requests_mock.register_uri("GET", f"{url}?format=json", content=intercell_data)
res = Intercell.categories()
assert res == tuple(sorted(set(map(str, data[Key.CATEGORY.s]))))
assert requests_mock.called_once
def test_generic_categories(
self, cache_backup, requests_mock, intercell_data: bytes
):
url = urljoin(options.url, Key.INTERCELL_SUMMARY.s)
data = json.loads(intercell_data)
requests_mock.register_uri("GET", f"{url}?format=json", content=intercell_data)
res = Intercell.generic_categories()
assert res == tuple(sorted(set(map(str, data[Key.PARENT.s]))))
assert requests_mock.called_once
def test_password_from_options(
self, cache_backup, requests_mock, intercell_data: bytes
):
old_pwd = options.password
options.password = "<PASSWORD>"
url = urljoin(options.url, Intercell._query_type.endpoint)
requests_mock.register_uri(
"GET",
f"{url}?format=tsv&password=<PASSWORD>",
content=intercell_data,
)
_ = Intercell.get()
options.password = old_pwd
assert requests_mock.called_once
def test_password_from_function_call(
self, cache_backup, requests_mock, intercell_data: bytes
):
old_pwd = options.password
options.password = "<PASSWORD>"
url = urljoin(options.url, Intercell._query_type.endpoint)
requests_mock.register_uri(
"GET",
f"{url}?format=tsv&password=<PASSWORD>",
content=intercell_data,
)
_ = Intercell.get(password="<PASSWORD>")
options.password = old_pwd
assert requests_mock.called_once
class TestComplex:
def test_complex_genes_wrong_dtype(self):
with pytest.raises(TypeError):
Complexes.complex_genes("foo", complexes=42)
def test_comples_genes_empty_complexes(self, caplog):
df = pd.DataFrame()
with caplog.at_level(logging.WARNING):
res = Complexes.complex_genes("foo", complexes=df)
assert res is df
assert "Complexes are empty" in caplog.text
def test_complex_genes_no_column(self):
with pytest.raises(KeyError):
Complexes.complex_genes("foo", complexes=pd.DataFrame({"foo": range(10)}))
def test_complex_genes_no_genes(self):
with pytest.raises(ValueError, match=r"No genes have been selected."):
Complexes.complex_genes([], complexes=None)
def test_complex_genes_complexes_not_specified(
self, cache_backup, requests_mock, tsv_data: bytes
):
url = urljoin(options.url, Complexes._query_type.endpoint)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
requests_mock.register_uri("GET", f"{url}?format=tsv", content=tsv_data)
res = Complexes.complex_genes("fooo")
np.testing.assert_array_equal(res.columns, df.columns)
assert res.empty
def test_complexes_complexes_specified(self, complexes: pd.DataFrame):
res = Complexes.complex_genes("foo", complexes=complexes, total_match=False)
assert isinstance(res, pd.DataFrame)
assert res.shape == (2, 2)
assert set(res.columns) == {"components_genesymbols", "dummy"}
assert all(
any(v in "foo" for v in vs.split("_"))
for vs in res["components_genesymbols"]
)
def test_complexes_total_match(self, complexes: pd.DataFrame):
res = Complexes.complex_genes(
["bar", "baz"], complexes=complexes, total_match=True
)
assert res.shape == (1, 2)
assert all(
all(v in ("bar", "baz") for v in vs.split("_"))
for vs in res["components_genesymbols"]
)
def test_complexes_no_total_match(self, complexes: pd.DataFrame):
res = Complexes.complex_genes(
["bar", "baz", "bar"], complexes=complexes, total_match=False
)
assert res.shape == (3, 2)
assert all(
any(v in ("bar", "baz") for v in vs.split("_"))
for vs in res["components_genesymbols"]
)
class TestAnnotations:
def test_too_many_proteins_requested(self):
with pytest.raises(
ValueError,
match=r"Please specify `force_full_download=True` in order to download the full dataset.",
):
Annotations.get()
def test_params(self):
params = Annotations.params()
assert Key.ORGANISM.value not in params
def test_genesymbols_matter(self, cache_backup, requests_mock, tsv_data: bytes):
url = urljoin(options.url, Annotations._query_type.endpoint)
requests_mock.register_uri(
"GET", f"{url}?proteins=bar&genesymbols=1&format=tsv", content=tsv_data
)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
res = Annotations.get(proteins=["bar"], genesymbols=True)
np.testing.assert_array_equal(res.index, df.index)
np.testing.assert_array_equal(res.columns, df.columns)
np.testing.assert_array_equal(res.values, df.values)
def test_invalid_organism_does_not_matter(
self, cache_backup, requests_mock, tsv_data: bytes
):
url = urljoin(options.url, Annotations._query_type.endpoint)
requests_mock.register_uri(
"GET", f"{url}?proteins=foo&format=tsv", content=tsv_data
)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
res = Annotations.get(proteins=["foo", "foo"], organism="foobarbaz")
np.testing.assert_array_equal(res.index, df.index)
np.testing.assert_array_equal(res.columns, df.columns)
np.testing.assert_array_equal(res.values, df.values)
@pytest.mark.parametrize(
"n_prots",
[
_MAX_N_PROTS - 1,
_MAX_N_PROTS,
_MAX_N_PROTS + 1,
2 * _MAX_N_PROTS,
3 * _MAX_N_PROTS + 42,
],
)
def test_downloading_more_than_n_proteins(
self, n_prots: int, cache_backup, requests_mock, tsv_data: bytes
):
url = urljoin(options.url, Annotations._query_type.endpoint)
prots = sorted(f"foo{i}" for i in range(n_prots))
for i in range((n_prots // _MAX_N_PROTS) + 1):
tmp = prots[i * _MAX_N_PROTS : (i + 1) * _MAX_N_PROTS]
if not len(tmp):
break
requests_mock.register_uri(
"GET",
f"{url}?format=tsv&proteins={'%2C'.join(tmp)}",
content=tsv_data,
)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
res = Annotations.get(proteins=prots)
np.testing.assert_array_equal(res.columns, df.columns)
assert len(res) == (len(df) * int(np.ceil(n_prots / _MAX_N_PROTS)))
if n_prots <= _MAX_N_PROTS:
np.testing.assert_array_equal(res.index, df.index)
np.testing.assert_array_equal(res.values, df.values)
def test_resources(self, cache_backup, requests_mock, tsv_data: bytes):
url = urljoin(options.url, Annotations._query_type.endpoint)
requests_mock.register_uri(
"GET", f"{url}?format=tsv&proteins=foo&resources=bar", content=tsv_data
)
df = pd.read_csv(StringIO(tsv_data.decode("utf-8")), sep="\t")
res = Annotations.get("foo", resources="bar")
np.testing.assert_array_equal(res.index, df.index)
np.testing.assert_array_equal(res.columns, df.columns)
np.testing.assert_array_equal(res.values, df.values)
class TestSignedPTMs:
def test_get_signed_ptms_wrong_ptms_type(self):
with pytest.raises(TypeError, match=r"Expected `ptms`"):
SignedPTMs.get(42, pd.DataFrame())
def test_get_signed_ptms_wrong_interactions_type(self):
with pytest.raises(TypeError, match=r"Expected `interactions`"):
SignedPTMs.get(pd.DataFrame(), 42)
def test_get_signed_ptms(self):
ptms = pd.DataFrame(
{"enzyme": ["alpha", "beta", "gamma"], "substrate": [0, 1, 0], "foo": 42}
)
interactions = pd.DataFrame(
{
"source": ["gamma", "beta", "delta"],
"target": [0, 0, 1],
"is_stimulation": True,
"is_inhibition": False,
"bar": 1337,
}
)
expected = pd.merge(
ptms,
interactions[["source", "target", "is_stimulation", "is_inhibition"]],
left_on=["enzyme", "substrate"],
right_on=["source", "target"],
how="left",
)
res = SignedPTMs.get(ptms, interactions)
np.testing.assert_array_equal(res.index, expected.index)
np.testing.assert_array_equal(res.columns, expected.columns)
np.testing.assert_array_equal(pd.isnull(res), pd.isnull(expected))
np.testing.assert_array_equal(
res.values[~pd.isnull(res)], expected.values[~pd.isnull(expected)]
)
def test_graph_not_a_dataframe(self):
with pytest.raises(
TypeError, match=r"Expected `data` to be of type `pandas.DataFrame`,"
):
SignedPTMs.graph(42)
def test_graph_empty_ptms(self):
ptms = pd.DataFrame()
interactions = pd.DataFrame(
{
"source": ["gamma", "beta", "delta"],
"target": [0, 0, 1],
"is_stimulation": True,
"is_inhibition": False,
"bar": 1337,
}
)
with pytest.raises(ValueError, match=r"No PTMs were retrieved. Please"):
SignedPTMs.get(ptms, interactions)
def test_graph_empty_interactions(self):
ptms = pd.DataFrame(
{
"enzyme": ["alpha", "beta", "gamma"],
"substrate": [0, 1, 0],
"foo": 42,
"enzyme_genesymbol": "bar",
"substrate_genesymbol": "baz",
}
)
interactions = pd.DataFrame()
with pytest.raises(ValueError, match=r"No interactions were retrieved. Please"):
SignedPTMs.get(ptms, interactions)
def test_graph_source_target(self):
ptms = pd.DataFrame(
{
"enzyme": ["alpha", "beta", "gamma"],
"substrate": [0, 1, 0],
"foo": 42,
"enzyme_genesymbol": "bar",
"substrate_genesymbol": "baz",
}
)
src, tgt = SignedPTMs._get_source_target_cols(ptms)
assert src == "enzyme_genesymbol"
assert tgt == "substrate_genesymbol"
src, tgt = SignedPTMs._get_source_target_cols(
ptms[ptms.columns.difference(["enzyme_genesymbol", "substrate_genesymbol"])]
)
assert src == "enzyme"
assert tgt == "substrate"
def test_graph(self):
import networkx as nx
ptms = pd.DataFrame(
{
"enzyme": ["alpha", "beta", "gamma"],
"substrate": [0, 1, 0],
"foo": 42,
"references": "bar;baz",
}
)
interactions = pd.DataFrame(
{
"source": ["gamma", "beta", "delta"],
"target": [0, 0, 1],
"is_stimulation": True,
"is_inhibition": False,
"bar": 1337,
}
)
expected = pd.merge(
ptms,
interactions[["source", "target", "is_stimulation", "is_inhibition"]],
left_on=["enzyme", "substrate"],
right_on=["source", "target"],
how="left",
)
G = SignedPTMs.graph(expected)
assert isinstance(G, nx.DiGraph)
assert G.is_directed()
assert len(G) == 5
for _, _, attr in G.edges(data=True):
assert attr["foo"] == 42
assert attr["references"] == ["bar", "baz"]
class TestUtils:
def test_split_unique_join_no_func(self, string_series: pd.Series):
res = _split_unique_join(string_series)
np.testing.assert_array_equal(
res, | pd.Series(["foo:123", "bar:45;baz", None, "bar:67;baz:67", "foo"]) | pandas.Series |
#' ---
#' title: Greater Seattle Area Housing--Sales Price Prediction
#' author: <NAME>
#' date: 2017-02-27
#' abstract: |
#' The goal of this project is to predict the sale price of a property
#' by employing various predictive machine learning models in an ensemble
#' given housing data such as the number of bedrooms/bathrooms, square
#' footage, year built as well as other less intuitive variables as
#' provided by the Zillow API.
#' header-includes:
#' - \usepackage{booktabs}
#' - \usepackage{longtable}
#' ---
#' # Introduction
#' The Greater Seattle Area housing market has gone through a dramatic price
#' increase in recent years with the median valuation at $609,100, an 11.3%
#' increase from last year and a 4.7% increase forecasted for the next
#' year[^zillowstat]. Because of the dramatic change that has occurred in a
#' short period of time, it has become increasingly difficult to predict
#' property values. We believe that a machine learning model can predict a
#' property value with reasonable accuracy given the property features and
#' the time aspect (_lastSoldDate_).
#' [^zillowstat]: Source: Zillow Data as of February 2017.
#+ echo=False
import math
import numpy as np
import pandas as pd
import pprint
import pweave
from sklearn import linear_model, svm, tree
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, train_test_split
from sklearn.model_selection import GridSearchCV
import sqlite3
pd.set_option('display.max_columns', None)
#' # Data Collection and Preprocessing
#' ## Data Collection Process
#' The most important element of any data science project is the data itself.
#' This project heavily utilizes data from Zillow, a digital real estate
#' destination for buyers, sellers, and agents. Fortunately, Zillow provides
#' a public API which provides a convenience to an otherwise tedious task.
#' Although the availability of a public API has made the data collection
#' process simple, there are some limitations that we had to be cognizant of.
#' Our vision was to start with a "seed" property which in turn would
#' collect "comps" or comparables. Comps are simply other properties that
#' have similar features to our seed property. This will provide a buyer an
#' idea of what the value of the property should be.
#'
#' The first limitation is that the full set of information that we were
#' looking for cannot be extracted from one API endpoint. Zillow does not
#' provide an endpoint which returns property information of comps given a
#' seed property. What it provides instead is one endpoint that returns a
#' list of comp property IDs (Zillow Property ID or ZPID) given a seed
#' property address and a separate endpoint that returns property information
#' given a ZPID. Furthermore, the comp endpoint returns a maximum of 25 comps
#' per seed property. Thus the collection process is divided into three steps:
#' 1. Collect comp IDs given a property address using _GetDeepSearchResults_.
#' 2. Loop through each ZPID, collect 25 more comps for each, and append
#' results to list of the other ZPIDs.
#' 3. Collect property information for each ZPID collected using
#' _GetDeepComps_.
#' The second limitation is that Zillow has limited the number of calls
#' allowed per day to 1000. This poses a problem if one's intent was to
#' collect a significant amount of data. This limits our collection process
#' further since we had to resort to making two calls. A simple solution was
#' to include a sleep timer of 24 hours when a call encounters a rate limit
#' warning. Although somewhat inconvenient, the solution achieved what we
#' needed to accomplish.
#'
#' ## Training Data
#' The table below is just a sample of what the training data looks like.
#' We've removed many of the columns to make sure the table fits in the page.
#' This is only to provide an idea of the formatting.
#+ echo=False
conn = sqlite3.connect('zillowdata.db')
q = '''SELECT *
FROM properties
WHERE lastSoldPrice IS NOT NULL'''
training_raw = pd.read_sql_query(q, conn)
conn.close()
#+ echo=False, results='tex', caption='Sample of dataset'
print(training_raw.head(10).to_latex(
columns=[
'street',
'city',
'zip',
'finishedSqFt',
'lastSoldDate',
'lastSoldPrice'
],
index=False
))
#' Printing the _shape_ attribute shows that we have 2826 observations and 23
#' columns.
#+ term=True
training_raw.shape
#' Finally, we have the following columns
#+ term=True
training_raw.dtypes
#' Since the goal of this project is to predict the sale price, it is obvious
#' that the _lastSoldPrice_ should be the response variable while the other
#' columns can act as feature variables. Of course, some processing such as
#' dummy variable conversion is required before training begins.
#'
#' ## Data Processing
#'
#' The next step is to process and clean the data. First let's take a look at
#' each variable and decide which ones need to be excluded. ZPID and street
#' address logically do not affect sales price and thus can be excluded. Street
#' address may explain some sale price variabilty, however it requires further
#' processing for proper formatting, that is, we must eliminate unit numbers,
#' suffix standardization (Dr. vs Drive), etc. This proves to be a difficult
#' task that is beyond the scope of this project. Further, the effects of this
#' variable is closely related to region. We have chosen to exclude it here
#' but may be worth exploring further in the future. Finally, the state
#' variable can also be excluded here as we are keeping the scope of this
#' project to WA only.
#+ term=True
training = training_raw # Save original data intact
training.drop(['zpid', 'street', 'state'], axis=1, inplace=True)
training.dtypes
#' We can see that many of these variables are of type _object_. We'll need to
#' convert these to the appropriate types. Most of these columns, excluding
#' date columns, can be converted to numeric.
cols = training.columns[training.columns.isin([
'taxAssessmentYear',
'taxAssessment',
'yearBuilt',
'lotSizeSqFt',
'finishedSqFt',
'bathrooms',
'bedrooms',
'lastSoldPrice',
'zestimate',
'zestimateValueChange',
'zestimateValueLow',
'zestimateValueHigh',
'zestimatePercentile'
])]
for col in cols:
training[col] = pd.to_numeric(training[col])
#' Now let's convert _lastSoldDate_ and _zestimateLastUpdated_ to dates.
cols = training.columns[training.columns.isin([
'lastSoldDate',
'zestimateLastUpdated'
])]
for col in cols:
training[col] = | pd.to_datetime(training[col], infer_datetime_format=True) | pandas.to_datetime |
import warnings
import pandas as pd
import numpy as np
import copy
from .syntax import Preprocessor, Regressor, Evaluator
from ..base import BASE
from ...utils import value
##################################################################### 2 Prepare Data
class automatic_run(BASE):
def fit(self):
self.Fit_sklearn()
# tokens = ['df','dfy_predict']
# for token in tokens:
# if token in self.outputs:
# if self.outputs[token].value is not None:
# if self.header:
# df = self.outputs[token].value
# df = pd.DataFrame(df, columns=self.inputs[token[0:3]].value.columns)
# self.outputs[token].value = df
# else:
# self.outputs[token].value = pd.DataFrame(self.outputs[token].value)
self.Send()
# delete all inputs
del self.inputs
# feature representation
class PolynomialFeatures(BASE):
def fit(self):
self.Fit_sklearn()
# headers from class method: get_feature_names
if self.method in ['fit_transform','transform']:
if self.header:
df = self.outputs['df'].value
df = pd.DataFrame(df, columns = self.outputs['api'].value.get_feature_names())
self.outputs['df'].value = df
else:
df = self.outputs['df'].value
df = pd.DataFrame(df)
self.outputs['df'].value = df
self.Send()
# delete all inputs
del self.inputs
class OneHotEncoder(BASE):
def fit(self):
self.Fit_sklearn()
# .toarray() is requied
if self.method in ['fit_transform','transform']:
df = self.outputs['df'].value.toarray()
df = pd.DataFrame(df, columns = self.inputs['df'].value.columns)
self.outputs['df'].value = df
self.Send()
# delete all inputs
del self.inputs
# basic oprator
class Imputer(BASE):
def fit(self):
self.Fit_sklearn()
# headers from input df and based on the class attribute: statistics_
if self.method in ['fit_transform', 'transform']:
if self.header:
df = self.outputs['df'].value
api = self.outputs['api'].value
stats = api.statistics_
NOTnan_ind = [i for i, val in enumerate(stats) if not np.isnan(val)]
df = pd.DataFrame(df, columns=self.inputs['df'].value.columns[NOTnan_ind])
self.outputs['df'].value = df
else:
df = self.outputs['df'].value
df = pd.DataFrame(df)
self.outputs['df'].value = df
self.Send()
# delete all inputs
del self.inputs
# scaler
# feature selector
# feature transformer
# splitter
class train_test_split(BASE):
def fit(self):
# step1: check inputs
self.required('dfx', req=True)
dfx = self.inputs['dfx'].value
dfy = self.inputs['dfy'].value
# step2: assign inputs to parameters if necessary (param = @token)
self.paramFROMinput()
# step3: check the dimension of input data frame
dfx, _ = self.data_check('dfx', dfx, ndim=2, n0=None, n1=None, format_out='df')
# step4: import module and make APIs
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
if dfy is None:
tts_out = F(dfx, **self.parameters)
dfx_train, dfx_test = tts_out
self.set_value('dfx_train', pd.DataFrame(dfx_train,columns=dfx.columns))
self.set_value('dfx_test',pd.DataFrame(dfx_test,columns=dfx.columns))
else:
dfy, _ = self.data_check('dfy', dfy, ndim=1, n0=dfx.shape[0], n1=None, format_out='df')
tts_out = F(dfx, dfy, **self.parameters)
dfx_train, dfx_test, dfy_train, dfy_test = tts_out
self.set_value('dfx_train', pd.DataFrame(dfx_train,columns=dfx.columns))
self.set_value('dfx_test', pd.DataFrame(dfx_test,columns=dfx.columns))
self.set_value('dfy_train', pd.DataFrame(dfy_train,columns=dfy.columns))
self.set_value('dfy_test', pd.DataFrame(dfy_test,columns=dfy.columns))
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(
err).__name__ + ': ' + err.message
raise TypeError(msg)
# send and delete inputs
self.Send()
del self.inputs
del dfx
del dfy
class KFold(BASE):
def fit(self):
self.paramFROMinput()
if 'func_method' in self.parameters:
method = self.parameters.pop('func_method')
else:
method = None
if method not in self.metadata.WParameters.func_method.options:
msg = "@Task #%i(%s): The method '%s' is not available for the function '%s'." % (
self.iblock, self.Task,method,self.Function)
raise NameError(msg)
else:
if method == None:
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.set_value('api', api)
elif method == 'split':
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.required('dfx', req=True)
dfx = self.inputs['dfx'].value
fold_gen = api.split(dfx)
self.set_value('api', api)
self.set_value('fold_gen', fold_gen)
self.Send()
del self.inputs
class LeaveOneOut(BASE):
def fit(self):
self.paramFROMinput()
if 'func_method' in self.parameters:
method = self.parameters.pop('func_method')
else:
method = None
if method not in self.metadata.WParameters.func_method.options:
msg = "@Task #%i(%s): The method '%s' is not available for the function '%s'." % (
self.iblock, self.Task,method,self.Function)
raise NameError(msg)
else:
if method == None:
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.set_value('api', api)
elif method == 'split':
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.required('dfx', req=True)
dfx = self.inputs['dfx'].value
fold_gen = api.split(dfx)
self.set_value('api', api)
self.set_value('fold_gen', fold_gen)
self.Send()
del self.inputs
class ShuffleSplit(BASE):
def fit(self):
self.paramFROMinput()
if 'func_method' in self.parameters:
method = self.parameters.pop('func_method')
else:
method = None
if method not in self.metadata.WParameters.func_method.options:
msg = "@Task #%i(%s): The method '%s' is not available for the function '%s'." % (
self.iblock, self.Task,method,self.Function)
raise NameError(msg)
else:
if method is None:
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.set_value('api', api)
elif method is 'split':
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.required('dfx', req=True)
dfx = self.inputs['dfx'].value
fold_gen = api.split(dfx)
self.set_value('api', api)
self.set_value('fold_gen', fold_gen)
self.Send()
del self.inputs
class StratifiedShuffleSplit(BASE):
def fit(self):
self.paramFROMinput()
if 'func_method' in self.parameters:
method = self.parameters.pop('func_method')
else:
method = None
if method not in self.metadata.WParameters.func_method.options:
msg = "@Task #%i(%s): The method '%s' is not available for the function '%s'." % (
self.iblock, self.Task,method,self.Function)
raise NameError(msg)
else:
if method is None:
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.set_value('api', api)
elif method is 'split':
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(err).__name__ + ': ' + err.message
raise TypeError(msg)
self.required('dfx', req=True)
dfx = self.inputs['dfx'].value
fold_gen = api.split(dfx)
self.set_value('api', api)
self.set_value('fold_gen', fold_gen)
self.Send()
del self.inputs
##################################################################### 3 Define Model
# Regression
##################################################################### 4 Search
class GridSearchCV(BASE):
def fit(self):
# step1: check inputs
self.required('dfx', req=True)
self.required('estimator', req=True)
dfx = self.inputs['dfx'].value
dfy = self.inputs['dfy'].value
estimator = self.inputs['estimator'].value
self.parameters['estimator'] = estimator
# step2: check parameters
# Note: estimator is a required parameter and can be received from the input stream
# Note: scorer is not a required parameter but can be received from the input stream
self.paramFROMinput()
# step3: check the dimension of input data frame
dfx, _ = self.data_check('dfx', dfx, ndim=2, n0=None, n1=None, format_out='ar')
if dfy is not None:
dfy, _ = self.data_check('dfy', dfy, ndim=1, n0=dfx.shape[0], n1=None, format_out='ar')
print( dfx.shape)
print( dfy.shape)
# step4: import module and make API
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
api = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(
err).__name__ + ': ' + err.message
raise TypeError(msg)
# step5: process
api.fit(dfx, dfy)
# step6: send
order = [edge[1] for edge in self.Base.graph if edge[0]==self.iblock]
for token in set(order):
if token not in self.outputs:
msg = "@Task #%i(%s): not a valid output token '%s'" % (self.iblock + 1, self.Task, token)
raise NameError(msg)
elif token == 'best_estimator_':
if api.get_params()['refit']:
best_estimator_ = copy.deepcopy(api.best_estimator_)
else:
best_estimator_ = copy.deepcopy(self.parameters['estimator'])
best_estimator_.set_params(**api.best_params_)
# best_estimator_.fit(dfx,dfy)
self.set_value('best_estimator_', best_estimator_)
self.outputs[token].count = order.count(token)
self.Base.send[(self.iblock, token)] = self.outputs[token]
elif token == 'cv_results_':
self.set_value('cv_results_', pd.DataFrame(api.cv_results_))
self.outputs[token].count = order.count(token)
self.Base.send[(self.iblock, token)] = self.outputs[token]
elif token == 'api':
self.set_value('api',api)
self.outputs[token].count = order.count(token)
self.Base.send[(self.iblock, token)] = self.outputs[token]
# step7: delete inputs
del self.inputs
del dfx
del dfy
class cross_val_score(BASE):
def fit(self):
# step1: check inputs
self.required('dfx', req=True)
self.required('estimator', req=True)
dfx = self.inputs['dfx'].value
dfy = self.inputs['dfy'].value
estimator = self.inputs['estimator'].value
# step2: check parameters
# Note: estimator is a required parameter and can be received from the input stream
# Note: scorer is not a required parameter but can be received from the input stream
self.paramFROMinput()
if 'estimator' not in self.parameters:
self.parameters['estimator'] = estimator
if 'X' not in self.parameters:
self.parameters['X'] = dfx
# step3: check the dimension of input data frame
dfx, _ = self.data_check('dfx', dfx, ndim=2, n0=None, n1=None, format_out='ar')
if dfy is not None:
dfy, _ = self.data_check('dfy', dfy, ndim=1, n0=dfx.shape[0], n1=None, format_out='ar')
# step4: import module and make APIs
try:
exec ("from %s.%s import %s" % (self.metadata.modules[0], self.metadata.modules[1], self.Function))
submodule = getattr(__import__(self.metadata.modules[0]), self.metadata.modules[1])
F = getattr(submodule, self.Function)
scores = F(**self.parameters)
except Exception as err:
msg = '@Task #%i(%s): ' % (self.iblock + 1, self.Task) + type(
err).__name__ + ': ' + err.message
raise TypeError(msg)
# step6: set outputs
order = [edge[1] for edge in self.Base.graph if edge[0]==self.iblock]
for token in set(order):
if token == 'scores':
self.set_value(token, | pd.DataFrame(scores) | pandas.DataFrame |
# %%
import pandas as pd
sp_dir = '/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2'
up_mid_down = pd.read_csv(sp_dir + '/Upstream/upstream_data_pipeline_sp/Postprocessed_outputs_2/downstream_postprocessed_scenarios_fix.csv')
up_mid_down = up_mid_down[up_mid_down['gwp']==20]
def prep_for_webtool(up_mid_down):
scenario = | pd.DataFrame() | pandas.DataFrame |
"""Tools for pre/post processing inputs/outputs from neural networks
Unlike pytorch's builtin tools, this code allows building pytorch modules from
scikit-learn estimators.
"""
import pandas as pd
import numpy as np
import xarray as xr
import torch
from torch import nn
from uwnet.thermo import compute_apparent_source
from uwnet.modules import MapByKey, LinearFixed
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import logging
logger = logging.getLogger(__name__)
def prepare_data(data, exog=['QT', 'SLI', 'SST', 'SOLIN'], sample=None):
"""Flatten XArray dataset into dataframe.
The columns of the dataframe are the variables names and height indices.
The rows are the samples, which are taken along 'x', 'y', and 'time'.
"""
logger.info("Flattening xarray Dataset to pandas DataFrame")
size = data[exog].nbytes/1e9
logger.info(f"Size: {size} GB")
vals = []
names = []
sample_dims = ['x', 'y', 'time']
for key in exog:
val = data[key].stack(s=sample_dims)
if 'z' not in val.dims:
val = val.expand_dims('z')
arr = val.transpose('s', 'z').values
vals.append(arr)
for z in range(arr.shape[1]):
names.append((key, z))
inputs = np.concatenate(vals, axis=1)
idx = pd.MultiIndex.from_tuples(names)
return | pd.DataFrame(inputs, columns=idx) | pandas.DataFrame |
import pandas as pd
import io
import requests
from datetime import datetime
#Import data file if it already exists
try:
past_data = pd.read_excel("Utah_Data.xlsx")
past_dates = past_data["Date"].tolist()
except:
past_data = | pd.DataFrame({}) | pandas.DataFrame |
# Spectral_Analysis_Amp_and_Phase.py
import os
import numpy as np
import pandas as pd
import scipy.linalg as la
import matplotlib.pyplot as plt
# Import time from the data or define it
t = np.arange(0.015, 0.021, 10**-7)
dt = 10**-7
# Define trainsize and number of modes
trainsize = 20000 # Number of snapshots used as training data.
num_modes = 44 # Number of POD modes.
reg = 0 # Just an input in case we regularize DMDc.
# Locate the full data of snapshots FOM and ROMs (INPUT)
Folder_name_data = 'C:\\Users\\Admin\\Desktop\\combustion\\'
file_name_FOM = 'traces_gems_60k_final.npy'
file_name_ROM_DMDc = 'traces_rom_DMDc_rsvd.npy'
file_name_ROM_cubic_r25 = 'traces_rom_cubic_tripple_reg_r25.npy'
file_name_ROM_cubic_r44 = 'traces_rom_cubic_r44.npy'
file_name_ROM_Quad_r44 = 'traces_rom_60k_100_30000.npy'
# Define output file location and file names to identify phase and amplitudes (OUTPUT)
folder_name = "C:\\Users\\Admin\\Desktop\\combustion\\spectral\\Final_plots\\"
Amp_name = folder_name + "\\" + "Amp" # Amplitude plots
Phase_name = folder_name + "\\" + "Phase" # Phase plots
# Load the data
FOM_ = np.load(Folder_name_data + file_name_FOM)
ROM_DMDc = np.load(Folder_name_data + file_name_ROM_DMDc)
ROM_cubic_r25 = np.load(Folder_name_data + file_name_ROM_cubic_r25)
ROM_cubic_r44 = np.load(Folder_name_data + file_name_ROM_cubic_r44)
ROM_Quad_r44 = np.load(Folder_name_data + file_name_ROM_Quad_r44)
# Plotting adjustments
End_plot_at = 60000 # 59990 # 40000
freq_limit_to_plot = 15000
# =============================================================================
def lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit, savefile):
"""Plots for comparision of data in time. Check the saved data in
folder_name.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
savefile
Suffix to save the file name
"""
print("Time series plots")
plt.xlim([0.015, 0.021]) # set axis limits
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid', c='k')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed', c='#ff7f0e')
# plt.plot(t[0:End_plot_at],
# pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
# label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF', linestyle='dashed', c='b')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot', c='r')
plt.xlabel('time')
plt.ylabel(unit)
plt.axvline(x=t[0] + trainsize*dt, color='black')
plt.legend()
fname = f"{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{savefile}.pdf"
plt.savefig(os.path.join(folder_name, fname),
bbox_inches="tight", dpi=200)
plt.show()
def L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit):
"""Plot L2 norm error comparision between all the ROMs.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
"""
print("L2 norm error plot")
e_ROM_Quad_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r25 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_DMDc = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at]))/la.norm( | pd.DataFrame(FOM_) | pandas.DataFrame |
import pandas
import os
from locale import *
import locale
locale.setlocale(LC_NUMERIC, '')
fs = pandas.read_csv('./Ares.csv', sep=';', encoding='cp1252', parse_dates=[1,3,5,7,9,11], dayfirst=True)
# Separar por pares de columnas
materia_organica = pandas.DataFrame(fs[[fs.columns[0], fs.columns[1]]])
conductividad = pandas.DataFrame(fs[[fs.columns[2], fs.columns[3]]])
amonio = pandas.DataFrame(fs[[fs.columns[4], fs.columns[5]]])
solidos = pandas.DataFrame(fs[[fs.columns[6], fs.columns[7]]])
temperatura = pandas.DataFrame(fs[[fs.columns[8], fs.columns[9]]])
lluvias = pandas.DataFrame(fs[[fs.columns[10], fs.columns[11]]])
mareas = pandas.DataFrame(fs[[fs.columns[12], fs.columns[13]]])
oxigeno = pandas.DataFrame(fs[[fs.columns[14], fs.columns[15]]])
#Clean trailing NaNs
materia_organica = materia_organica.dropna()
conductividad = conductividad.dropna()
amonio = amonio.dropna()
solidos = solidos.dropna()
temperatura = temperatura.dropna()
lluvias = lluvias.dropna()
mareas = mareas.dropna()
oxigeno = oxigeno.dropna()
#Convertir a fechas la primera columna
materia_organica[materia_organica.columns[0]] = pandas.to_datetime(materia_organica[materia_organica.columns[0]], dayfirst=True)
conductividad[conductividad.columns[0]] = pandas.to_datetime(conductividad[conductividad.columns[0]], dayfirst=True)
amonio[amonio.columns[0]] = pandas.to_datetime(amonio[amonio.columns[0]], dayfirst=True)
solidos[solidos.columns[0]] = pandas.to_datetime(solidos[solidos.columns[0]], dayfirst=True)
temperatura[temperatura.columns[0]] = pandas.to_datetime(temperatura[temperatura.columns[0]], dayfirst=True)
lluvias[lluvias.columns[0]] = pandas.to_datetime(lluvias[lluvias.columns[0]], dayfirst=True)
mareas[mareas.columns[0]] = pandas.to_datetime(mareas[mareas.columns[0]], dayfirst=True)
oxigeno[oxigeno.columns[0]] = pandas.to_datetime(oxigeno[oxigeno.columns[0]], dayfirst=True)
#Convertir a float la segunda
materia_organica[materia_organica.columns[1]] = materia_organica[materia_organica.columns[1]].map(locale.atof)
conductividad[conductividad.columns[1]] = conductividad[conductividad.columns[1]].map(locale.atof)
amonio[amonio.columns[1]] = amonio[amonio.columns[1]].map(locale.atof)
solidos[solidos.columns[1]] = solidos[solidos.columns[1]].map(locale.atof)
temperatura[temperatura.columns[1]] = temperatura[temperatura.columns[1]].map(locale.atof)
lluvias[lluvias.columns[1]] = lluvias[lluvias.columns[1]].map(locale.atof)
mareas[mareas.columns[1]] = mareas[mareas.columns[1]].map(locale.atof)
oxigeno[oxigeno.columns[1]] = oxigeno[oxigeno.columns[1]].map(locale.atof)
lluvias = lluvias[lluvias[lluvias.columns[1]] >= 0]
materia_organica = pandas.Series(materia_organica[materia_organica.columns[1]].values, materia_organica[materia_organica.columns[0]])
conductividad = pandas.Series(conductividad[conductividad.columns[1]].values, conductividad[conductividad.columns[0]])
amonio = pandas.Series(amonio[amonio.columns[1]].values, amonio[amonio.columns[0]])
solidos = pandas.Series(solidos[solidos.columns[1]].values, solidos[solidos.columns[0]])
temperatura = | pandas.Series(temperatura[temperatura.columns[1]].values, temperatura[temperatura.columns[0]]) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = | Series([]) | pandas.Series |
# coding: utf-8
import json
import pandas as pd
import numpy as np
import glob
import ast
from modlamp.descriptors import *
import re
import cfg
import os
def not_in_range(seq):
if seq is None or len(seq) < 1 or len(seq) > 80:
return True
return False
def bad_terminus(peptide):
if peptide.nTerminus[0] is not None or peptide.cTerminus[0] is not None:
return True
return False
def is_valid(peptide):
try:
seq = peptide.seq[0]
if not seq.isupper():
return False
if bad_terminus(peptide):
return False
if not_in_range(seq):
return False
if seq.find("X") != -1:
return False
return True
except:
return False
def get_valid_sequences():
peptides = pd.DataFrame()
all_file_names = []
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
if is_valid(peptide):
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
peptides.to_csv("./data/valid_sequences.csv")
return peptides
def add_activity_list(peptides):
activity_list_all = []
for targets in peptides.targetActivities: # one seq has a list of targets
try:
activity_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
con = target['concentration']
activity_list.append(target['concentration'])
except:
continue
activity_list_all.append(activity_list)
except:
activity_list_all.append([])
continue
peptides["activity_list"] = activity_list_all
return peptides
def add_toxic_list(peptides):
toxic_list_all = []
for targets in peptides.hemoliticCytotoxicActivities: # one seq has a list of targets
try:
toxic_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
toxic_list.append(target['concentration'])
except:
continue
toxic_list_all.append(toxic_list)
except:
toxic_list_all.append([])
continue
peptides["toxic_list"] = toxic_list_all
return peptides
def add_molecular_weights(peptides):
seqs = [doc for doc in peptides["seq"]]
mws = []
for seq in seqs:
try:
desc = GlobalDescriptor(seq.strip())
desc.calculate_MW(amide=True)
mw = desc.descriptor[0][0]
mws.append(mw)
except:
mws.append(None)
peptides["molecular_weight"] = mws
return peptides
def convert_units(peptides):
converted_activity_all = []
converted_toxic_all = []
for activity_list, toxic_list, molecular_weight in zip(peptides.activity_list,
peptides.toxic_list,
peptides.molecular_weight):
converted_activity_list = []
converted_toxic_list = []
for item in activity_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_activity_list.append(float(item) * molecular_weight / 1000)
except:
pass
for item in toxic_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_toxic_list.append(float(item) * molecular_weight / 1000)
except:
pass
converted_activity_all.append(converted_activity_list)
converted_toxic_all.append(converted_toxic_list)
peptides["converted_activity"] = converted_activity_all
peptides["converted_toxic"] = converted_toxic_all
print('--> Writing valid sequences with molecular weights converted to valid_sequences_with_mw_converted.csv')
peptides.to_csv("./data/valid_sequences_with_mw_converted.csv")
return peptides
# Starting process
print('Dataset Creation process begins ... ')
# AMP data
print('**** Creating AMP datasets ****')
# Get Valid Sequences
peptide_all = get_valid_sequences()
print ('1. Getting all valid peptide sequences from DBAASP, number of seqs extracted = ', len(peptide_all))
print('--> Sequences stored in valid_sequences.csv')
# Add molecular weights
print('2. Converting Molecular weights')
peptide_all_with_mw = add_molecular_weights(peptide_all)
# Extract list of anti-microbial activities and list of toxicities
peptide_all_with_activity = add_activity_list(peptide_all)
peptide_all_with_activity_toxicity = add_toxic_list(peptide_all_with_activity)
# Add the converted units to activity list and toxicity list
peptide_all_converted = convert_units(peptide_all_with_activity_toxicity)
# Statistics
def get_stats():
peptides = pd.DataFrame()
all_file_names = []
total = 0
unusual_amino_acids = 0
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
total += 1
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
unusual_amino_acids += 1
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
print ("--> For DBAASP:")
print ("Total number of sequences:", total)
print ("Total number of unusual AminoAcids:", unusual_amino_acids)
return peptides
print('3. Some Statistics of collected valid sequences')
peptide_all = get_stats()
not_valid_count = len([seq for seq in peptide_all.seq if not_in_range(seq)])
print ("--> Number of not in range sequences:", not_valid_count)
print ("--> Number of valid sequences:", len(peptide_all_converted))
has_activity = [item for item in peptide_all_converted.activity_list if item != []]
print ("--> Number of valid sequences with antimicrobial activity:", len(has_activity))
has_toxicity = [item for item in peptide_all_converted.toxic_list if item != []]
print ("--> Number of valid sequences with toxicity:", len(has_toxicity))
################################################################
df = pd.read_csv("./data/valid_sequences_with_mw_converted.csv")
print (len(df))
# df.head() # default df: is dbaasp
def add_min_max_mean(df_in):
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
df_in["min_activity"] = min_col
df_in["max_activity"] = max_col
df_in["avg_activity"] = mean_col
return df_in
def all_activity_more_than_30(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 30:
return False # all of them
# just for negative (pos: any item < 10, neg: all of them > 30)
return True
def all_activity_more_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] < num:
return False
return True
def all_activity_more_than(df, num):
return df[df['converted_activity'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_toxic_more_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] > num:
return False
return True
def all_toxic_less_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_less_than_str(x, num))]
def has_activity_less_than_10(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 10:
return True
return False
def has_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < num:
return True
return False
def has_activity_less_than(df, num):
return df[df['converted_activity'].apply(lambda x: has_activity_less_than_str(x, num))]
def get_seq_len_less_than(df, seq_length):
df_short = df[df['seq'].apply(lambda x: len(x) <= seq_length)]
return df_short
def remove_df(df1, df2):
return pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
# add min, max, mean to all dbaasp
df = add_min_max_mean(df)
df_dbaasp = df[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
df_dbaasp.to_csv("./data/all_valid_dbaasp.csv")
# 3) Overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml
print('4. Finding overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml ...')
def get_satpdb(train_file):
for line in train_file.readlines():
if "Peptide ID" in line:
record = {}
line = re.sub(u"\\<.*?\\>", "", line)
peptideId = line.split('Peptide ID')[1].split('Sequence')[0]
record['Peptide.ID'] = peptideId
record['Sequence'] = line.split('Sequence')[1].split('C-terminal modification')[0]
record['C.terminal.modification'] = line.split('C-terminal modification')[1].split('N-terminal modification')[0]
record['N.terminal.modification'] = line.split('N-terminal modification')[1].split('Peptide Type')[0]
record['Peptide.Type'] = line.split('Peptide Type')[1].split('Type of Modification')[0]
record['Type.of.Modification'] = line.split('Type of Modification')[1].split('Source (Databases)')[0]
record['Source..Databases.'] = line.split('Source (Databases)')[1].split('Link to Source')[0]
record['Link.to.Source'] = line.split('Link to Source')[1].split('Major Functions')[0]
record['Major.Functions'] = line.split('Major Functions')[1].split('Sub-functions')[0]
record['Sub.functions'] = line.split('Sub-functions')[1].split('Additional Info')[0]
record['Additional.Info'] = line.split('Additional Info')[1].split('Helix (%)')[0]
record['Helix'] = line.split('Helix (%)')[1].split('Strand (%)')[0]
record['Strand'] = line.split('Strand (%)')[1].split('Coil (%)')[0]
record['Coil'] = line.split('Coil (%)')[1].split('Turn (%)')[0]
record['Turn'] = line.split('Turn (%)')[1].split('DSSP states')[0]
record['DSSP.states'] = line.split('DSSP states')[1].split('Tertiary Structure')[0]
return peptideId, record
def get_satpdbs():
dict_train = {}
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "satpdb/source/*.html")):
with open(j_file, encoding='utf-8') as train_file:
try:
name, record = get_satpdb(train_file)
dict_train[name] = record
except:
print(f'error loading html:{j_file}')
peptides = pd.DataFrame.from_dict(dict_train, orient='index')
peptides.to_csv(os.path.join(cfg.DATA_ROOT,"satpdb/satpdb.csv"))
return peptides
df_satpdb = get_satpdbs()
#df_satpdb = pd.read_csv("./data/satpdb/satpdb.csv")
df_satpdb = df_satpdb.rename(index=str, columns={"Sequence": "seq",
"C.terminal.modification": "cterminal",
"N.terminal.modification": "nterminal",
"Peptide.Type": "Peptide_Type",
"Type.of.Modification": "modi"})
valid_df_satpdb = df_satpdb[(df_satpdb.cterminal == "Free") &
(df_satpdb.nterminal == "Free") &
(df_satpdb.Peptide_Type == "Linear") &
(df_satpdb.modi == "None")]
print ("--> Number of valid satpdb = ", len(valid_df_satpdb))
df_overlap = pd.merge(df, valid_df_satpdb, on='seq', how='inner')
print ("--> Number of overlap sequences = ", len(df_overlap))
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
df_overlap["min_activity"] = min_col
df_overlap["max_activity"] = max_col
df_overlap["avg_activity"] = mean_col
df_overlap_all = df_overlap[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
print('5. Writing the overlap sequences to all_overlap.csv')
df_overlap_all.to_csv("./data/all_overlap.csv")
# length for all <=50
#
# overlap_neg: satpdb all activity greater than 100 : negative
# ** satpdb_pos: satpdb (the same as uniprot1) - overlap_neg
# dbaasp < 25 -> pos anything
# ** amp_pos = dbassp < 25 + satpdb_pos
# select sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50
print('6. Selecting sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50')
df = get_seq_len_less_than(df, 50)
df_overlap = get_seq_len_less_than(df_overlap, 50)
valid_df_satpdb = get_seq_len_less_than(valid_df_satpdb, 50)
print('7. Selecting negative and positive sequences for AMP activity')
overlap_neg = all_activity_more_than(df_overlap, 100)
print ("--> Number of negative seq in satpdb", len(overlap_neg))
print ("--> Number of unique seq in satpdb", len(valid_df_satpdb["seq"].drop_duplicates()))
satpdb_pos = remove_df(valid_df_satpdb["seq"].drop_duplicates(), overlap_neg["seq"])
satpdb_pos1 = pd.DataFrame({'seq': satpdb_pos.values}) # amp_pos[["seq"]]
satpdb_pos1["source"] = ["satpdb_pos"] * len(satpdb_pos1)
satpdb_pos1 = satpdb_pos1[["seq", "source"]]
print ("--> Number of positive seq in satpdb", len(satpdb_pos))
satpdb_pos1.seq = satpdb_pos1.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
satpdb_pos1 = satpdb_pos1.drop_duplicates('seq')
print('--> Writing to satpdb_pos.csv')
satpdb_pos1.to_csv("./data/satpdb_pos.csv", index=False, header=False)
def get_ampep(path):
ampeps = {}
ampeps['seq'] = []
for line in open(path).readlines():
if not line.startswith('>'):
ampeps['seq'].append(line.strip())
return | pd.DataFrame.from_dict(ampeps) | pandas.DataFrame.from_dict |
import argparse
import sys
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import metrics
from scipy.stats import pearsonr, spearmanr
from scipy.optimize import curve_fit
from collections import Counter
import pickle
import pdb
parser = argparse.ArgumentParser(description = '''Visualize and analyze the DockQ scores.''')
#Bench4
parser.add_argument('--bench4_dockq_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 AF in csv.')
parser.add_argument('--bench4_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for benchmark 4 from RF in csv.')
parser.add_argument('--plDDT_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#parser.add_argument('--pconsdock_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
#parser.add_argument('--pconsdock_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to pconsdock metrics in csv.')
parser.add_argument('--bench4_kingdom', nargs=1, type= str, default=sys.stdin, help = 'Path to kingdoms for bench4 in csv.')
parser.add_argument('--dssp_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp annotations for bench4 in csv.')
parser.add_argument('--afdefault_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_bench4', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
#Marks positivef
parser.add_argument('--marks_dockq_RF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set RF in csv.')
parser.add_argument('--marks_dockq_AF_bb', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF back bone atoms in csv.')
parser.add_argument('--marks_dockq_AF_aa', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set AF all atoms in csv.')
parser.add_argument('--marks_dockq_GRAMM', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set GRAMM in csv.')
parser.add_argument('--marks_dockq_TMfull', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set TMdock in csv.')
parser.add_argument('--marks_dockq_TMint', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set interface TMdock in csv.')
parser.add_argument('--marks_dockq_mdockpp', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for marks set MdockPP in csv.')
parser.add_argument('--plDDT_marks_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--plDDT_marks_fused', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
parser.add_argument('--dssp_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to dssp metrics in csv.')
parser.add_argument('--ifstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to if metrics in csv.')
parser.add_argument('--aln_scores_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to aln scores in csv.')
parser.add_argument('--oxstats_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to statistics over organisms in csv.')
parser.add_argument('--afdefault_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to default AF alignments Neff in csv.')
parser.add_argument('--tophits_neff_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to top hits alignments Neff in csv.')
parser.add_argument('--af_chain_overlap_marks', nargs=1, type= str, default=sys.stdin, help = 'Path to chain overlap for AF a3m in csv.')
#Marks negative
parser.add_argument('--plDDT_marks_negative_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#Negatome
parser.add_argument('--plDDT_negatome_af', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv.')
#New set
parser.add_argument('--newset_dockq_AF', nargs=1, type= str, default=sys.stdin, help = 'Path to dockq scores for new set AF in csv.')
parser.add_argument('--plDDT_newset', nargs=1, type= str, default=sys.stdin, help = 'Path to plDDT metrics in csv for newset.')
#Output directory
parser.add_argument('--outdir', nargs=1, type= str, default=sys.stdin, help = 'Path to output directory. Include /in end')
################FUNCTIONS#################
def dockq_box(bench4_dockq, outdir):
'''Plot a boxplot of the dockq score for the different modes
'''
#Plot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
modes = bench4_dockq.columns[1:]
all_modes = []
all_scores = []
all_msas = []
all_model_options = []
accuracies = {}
for mode in modes:
#Frac correct and avg score
fraq_correct = np.argwhere(bench4_dockq[mode].values>=0.23).shape[0]/len(bench4_dockq)
accuracies[mode]=fraq_correct
av = np.average(bench4_dockq[mode].values)
print(mode, np.round(fraq_correct,3),np.round(av,3))
#Save scores
all_scores.extend([*bench4_dockq[mode].values])
mode = '_'.join(mode.split('_')[4:])
mode = mode.split('_')
msa = mode[0]
model = '_'.join(mode[1:-1])
option = mode[-1]
#save
all_modes.extend([msa+'\n'+model+'\n'+option]*len(bench4_dockq))
all_msas.extend([msa]*len(bench4_dockq))
all_model_options.extend([model+' '+option]*len(bench4_dockq))
def correlate_scores(bench4_dockq, outdir):
'''Correlate the scores for all different modeling strategies
'''
modes = ['DockQ_dockqstats_bench4_af2_hhblits_model_1_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_rec10',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_hhblits_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2stdmsa_model_1_ptm_rec10',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_ens8',
'DockQ_dockqstats_bench4_af2_af2andhhblitsmsa_model_1_ptm_rec10']
corr_matrix = np.zeros((len(modes),len(modes)))
for i in range(len(modes)):
scores_i = bench4_dockq[modes[i]].values
for j in range(i+1,len(modes)):
scores_j = bench4_dockq[modes[j]].values
#Correlate
R,p = pearsonr(scores_i,scores_j)
corr_matrix[i,j]=np.round(R,2)
corr_matrix[j,i]=np.round(R,2)
print(modes)
print(corr_matrix)
#Create df
corr_df = pd.DataFrame()
modes = ['_'.join(x.split('_')[4:]) for x in modes]
corr_df['Comparison'] = modes
for i in range(len(modes)):
corr_df[modes[i]]=corr_matrix[i,:]
corr_df.to_csv(outdir+'model_correlations.csv')
def fetch_missing_dockq(marks_dockq_AF_bb,marks_dockq_AF_aa):
'''Fetch missing DockQ scores
'''
ids = ['_'.join(x.split('-')) for x in marks_dockq_AF_aa.complex_id.values]
#Get mising scores
missing = marks_dockq_AF_bb[~marks_dockq_AF_bb.complex_id.isin(ids)]
ids = [x[:6]+'-'+x[7:] for x in missing.complex_id.values]
missing['complex_id']=ids
marks_dockq_AF_aa = pd.concat([marks_dockq_AF_aa,missing[marks_dockq_AF_aa.columns]])
return marks_dockq_AF_aa
def pdockq(if_plddt_contacts, dockq_scores, outdir):
#pdockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
#Create RA
x_ra = []
y_ra = []
y_std = []
y_av_err = []
step = 20
for t in np.arange(0,max(if_plddt_contacts)-step,step):
inds = np.argwhere((if_plddt_contacts>=t)&(if_plddt_contacts<t+step))[:,0]
x_ra.append(t+step/2)
y_ra.append(np.average(dockq_scores[inds]))
y_std.append(np.std(dockq_scores[inds]))
y_av_err.append(np.average(np.absolute(dockq_scores[inds]-y_ra[-1])))
#Do a simple sigmoid fit
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
xdata = if_plddt_contacts[np.argsort(if_plddt_contacts)]
ydata = dockq_scores[np.argsort(if_plddt_contacts)]
p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess
popt, pcov = curve_fit(sigmoid, xdata, ydata,p0, method='dogbox')
y = sigmoid(xdata, *popt)
plt.plot(xdata,y,color='r',label='Sigmoidal fit')
#Calc error
print('Sigmoid params:',*popt)
plt.scatter(if_plddt_contacts,dockq_scores,s=1)
#plt.plot(x_ra,y_ra,label='Running average', color='tab:blue')
#plt.fill_between(x_ra,np.array(y_ra)-np.array(y_av_err),np.array(y_ra)+np.array(y_av_err),color='tab:blue',alpha=0.25, label='Average error')
plt.title('pDockQ')
plt.xlabel('IF plDDT⋅log(IF contacts)')
plt.ylabel('DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pDockQ.svg',format='svg',dpi=300)
plt.close()
print('Average error for sigmoidal fit:',np.average(np.absolute(y-ydata)))
print('L=',np.round(popt[0],3),'x0=',np.round(popt[1],3) ,'k=',np.round(popt[2],3), 'b=',np.round(popt[3],3))
return popt
def ROC_pred_marks(marks_dockq_AF, plDDT_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_marks['complex_id']=plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on=['complex_id'],how='inner')
#Get min of chains
single_chain_plddt = np.min(merged[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
merged['min_chain_plddt_av_1'] = single_chain_plddt
#Analyze ROC as a function of
plDDT_metrics = ['if_plddt_av', 'min_chain_plddt_av',
'plddt_av', 'num_atoms_in_interface', 'num_res_in_interface']
plDDT_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
run='1'
dockq_scores = merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run'+run].values
correct = np.zeros(len(dockq_scores))
correct[np.argwhere(dockq_scores>=0.23)]=1
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
colors = {0:'darkblue',1:'magenta',2:'orange',3:'darkgreen',4:'tab:blue',5:'tab:yellow',6:'tab:black'}
for i in range(len(plDDT_metrics)):
plDDT_metric_vals = merged[plDDT_metrics[i]+'_'+run].values
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, plDDT_metric_vals, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
label = plDDT_metrics[i]
plt.plot(fpr, tpr, label = plDDT_nice_names[label]+': AUC = %0.2f' % roc_auc,color=colors[i])
#Add log(if contacts)*if_plddt_av
if_plddt_contacts = merged['if_plddt_av_1'].values*np.log10(merged['num_atoms_in_interface_1'].values+1)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, if_plddt_contacts, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Get pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
sigmoid_params = pdockq(if_plddt_contacts, dockq_scores, outdir)
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, sigmoid(if_plddt_contacts,*sigmoid_params), pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('ROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_marks.svg',format='svg',dpi=300)
plt.close()
#pDockQ vs DockQ
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(sigmoid(if_plddt_contacts,*sigmoid_params),dockq_scores,s=1)
plt.title('pDockQ vs DockQ')
plt.xlabel('pDockQ')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'pdockq_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#plot if plddt vs log contacts and color by dockq
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['num_atoms_in_interface_1'].values+1, merged['if_plddt_av_1'].values,c=dockq_scores,s=2)
cbar = plt.colorbar()
cbar.set_label('DockQ')
plt.xscale('log')
plt.ylim([40,100])
plt.title('Interface contacts, plDDT and DockQ')
plt.xlabel('Interface contacts')
plt.ylabel('Average interface plDDT')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'if_conctacts_vs_plddt.svg',format='svg',dpi=300)
plt.close()
return sigmoid_params
def score_marks_5runs_paired_af(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run3','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('Test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using av plDDT*log(if_contacts) in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('AUC using the same ranking', roc_auc)
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=3,color='tab:blue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=3,color='mediumseagreen',label='Min')
plt.title('Model ranking on the test set')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_marks_5runs.svg',format='svg',dpi=300)
plt.close()
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_af2']=first_ranked_scores
marks_dockq_AF['top_ranked_pDockQ']=first_ranked_separators
marks_dockq_AF['top_ranked_model_run_af2']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_af2_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_af2_marks_reduced.csv')
return marks_dockq_AF
def score_marks_5runs_paired_fused(marks_dockq_AF, plDDT_marks, sigmoid_params, outdir):
'''Analyze the variation in DockQ using 5 identical runs of the same settings
'''
plDDT_marks['complex_id'] = plDDT_marks.id1+'-'+plDDT_marks.id2
merged = pd.merge(marks_dockq_AF,plDDT_marks,on='complex_id',how='inner')
#Get separator
separator1 = merged[['if_plddt_av_1', 'if_plddt_av_2','if_plddt_av_3','if_plddt_av_4','if_plddt_av_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[['DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run2',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run3','DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run4',
'DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run5']].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('FUSED test set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using if_plddt_av and num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Get AUC using that success rate
correct = np.zeros(len(first_ranked_scores))
correct[np.argwhere(first_ranked_scores>=0.23)]=1
fpr, tpr, threshold = metrics.roc_curve(correct, first_ranked_separators, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
print('FUSED AUC using the same ranking', roc_auc)
#Assign top ranked scores and origin
marks_dockq_AF['top_ranked_model_DockQ_fused']=first_ranked_scores
marks_dockq_AF['top_ranked_model_run_fused']=max_inds+1
#Create and save df
roc_df = pd.DataFrame()
roc_df['FPR']=fpr
roc_df['TPR']=tpr
roc_df['FPR*SR']=fpr*(np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
roc_df['TPR*SR']=tpr*np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores)
roc_df['PPV']=tpr/(tpr+fpr)
roc_df['pDockQ']=threshold
roc_df.to_csv(outdir+'roc_df_fused_marks.csv')
#Select a reduced version of the df
roc_df.loc[np.arange(0,len(roc_df),10)].to_csv(outdir+'roc_df_fused_marks_reduced.csv')
return marks_dockq_AF
def marks_box(marks_dockq_AF, marks_dockq_GRAMM, marks_dockq_mdockpp, marks_dockq_TMfull, marks_dockq_TMint, marks_dockq_RF,outdir):
'''Box df of Marks set
'''
marks_dockq_TMint = marks_dockq_TMint.dropna()
marks_dockq_TMfull = marks_dockq_TMfull.dropna()
#Get data
rf_scores = marks_dockq_RF.DockQ_dockqstats_marks_RF.values
gramm_scores = marks_dockq_GRAMM[1].values
mdockpp_scores = marks_dockq_mdockpp.DockQ.values
TMfull_scores = marks_dockq_TMfull.dockq.values
TMint_scores = marks_dockq_TMint.dockq.values
paired_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values
af2_std_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values
run1_both_scores= marks_dockq_AF.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values
run1_fused_scores = marks_dockq_AF.DockQ_dockqstats_marks_af2_pairedandfused_model_1_rec10_run1.values
top_paired_af_scores = marks_dockq_AF.top_ranked_model_DockQ_af2.values
top_paired_fused_scores = marks_dockq_AF.top_ranked_model_DockQ_fused.values
data1 = [rf_scores, gramm_scores, mdockpp_scores, TMint_scores, af2_std_scores, paired_scores, top_paired_af_scores, top_paired_fused_scores]
data2 = [run1_both_scores, run1_fused_scores, top_paired_af_scores,top_paired_fused_scores]
all_data = [data1,data2]
xlabels1 = ['RF','GRAMM', 'MDockPP', 'TMdock\nInterfaces', 'AF2', 'Paired', 'AF2+Paired\ntop ranked','Block+Paired\ntop ranked']
xlabels2 = ['AF2+Paired', 'Block+Paired', 'AF2+Paired\ntop ranked', 'Block+Paired\ntop ranked']
all_xlabels = [xlabels1, xlabels2]
#Color
colors = sns.husl_palette(len(xlabels1)+2)
all_colors = [colors[:len(xlabels1)],colors[-len(xlabels2):]]
for i in range(len(all_data)):
#Boxplot
fig,ax = plt.subplots(figsize=(24/2.54,12/2.54))
data = all_data[i] #Get data and xlabel variation
xlabels = all_xlabels[i]
colors = all_colors[i]
#Success rates
srs = []
for j in range(len(data)):
sr = np.argwhere(data[j]>=0.23).shape[0]/len(data[j])
median = np.median(data[j])
print(xlabels[j],'sr:',np.round(sr,3),len(data[j]),median)
#xlabels[j]+='\nSR: '+str(np.round(100*sr,1))+'%'
#xlabels[j]+='\nM: '+str(np.round(median,3))
# Creating plot
#ax.violinplot(data)
bp = ax.boxplot(data, patch_artist = True, notch=True, showfliers=False)
for patch, color in zip(bp['boxes'], colors):
patch.set_facecolor(color)
patch.set_alpha(0.75)
# changing color and linewidth of
# medians
for median in bp['medians']:
median.set(color ='k',linewidth = 3)
# #Add swarm
# for i in range(len(data)):
# # Add some random "jitter" to the x-axis
# x = np.random.normal(i, 0.04, size=len(data[i]))
# plt.plot(x+1, data[i], 'r.', alpha=0.2)
# changing color and linewidth of
# whiskers
for whisker in bp['whiskers']:
whisker.set(color ='grey',
linewidth = 1)
# changing color and linewidth of
# caps
for cap in bp['caps']:
cap.set(color ='grey',
linewidth = 1)
plt.title('DockQ scores for the test set',fontsize=20)
plt.xticks(np.arange(1,len(xlabels)+1),xlabels,fontsize=12)
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'DockQ_box_test'+str(i)+'.svg',format='svg',dpi=300)
plt.close()
def AF_vs_RF_marks(marks_dockq_RF,marks_dockq_AF, outdir):
'''Compare the scores for RF vs AF
'''
merged = pd.merge(marks_dockq_RF,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks RF and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ_dockqstats_marks_RF'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('RF vs AF2 performance on the test set')
plt.xlabel('RF DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'RF_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_RF = np.argwhere(merged['DockQ_dockqstats_marks_RF'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate RF:',num_correct_RF,'out of',num_total,'|',np.round(100*num_correct_RF/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where RF outperforms AF
scores = merged[['DockQ_dockqstats_marks_RF','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
rf_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(rf_pos,axis=1)
print('RF outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(rf_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(rf_pos))
def AF_vs_GRAMM_marks(marks_dockq_GRAMM, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_GRAMM = marks_dockq_GRAMM.rename(columns={1: 'DockQ GRAMM'})
marks_dockq_GRAMM['complex_id'] = ['_'.join(x.split('-')) for x in marks_dockq_GRAMM[0]]
merged = pd.merge(marks_dockq_GRAMM,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks GRAMM and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ GRAMM'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('GRAMM vs AF2 performance on the test set')
plt.xlabel('GRAMM DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'GRAMM_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_GRAMM = np.argwhere(merged['DockQ GRAMM'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate GRAMM:',num_correct_GRAMM,'out of',num_total,'|',np.round(100*num_correct_GRAMM/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ GRAMM','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
GRAMM_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(GRAMM_pos,axis=1)
print('GRAMM outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(GRAMM_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(GRAMM_pos))
def AF_vs_TMint_marks(marks_dockq_TMint, marks_dockq_AF, outdir):
'''Compare the scores for GRAMM vs AF
'''
marks_dockq_TMint = marks_dockq_TMint.rename(columns={'dockq': 'DockQ TMint'})
merged = pd.merge(marks_dockq_TMint,marks_dockq_AF,on='complex_id',how='inner')
print('Number of complexes in merged Marks TMint and AF', len(merged))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged['DockQ TMint'],merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'],s=1)
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
#Plot correct cutoff
plt.plot([0.23,0.23],[0,0.23],linewidth=1,linestyle='--',color='k')
plt.plot([0,0.23],[0.23,0.23],linewidth=1,linestyle='--',color='k',label='Success cutoff')
plt.title('TMint vs AF2 performance on the test set')
plt.xlabel('TMint DockQ')
plt.ylabel('AF DockQ')
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'TMint_vs_AF_test.svg',format='svg',dpi=300)
#Get num correct
num_correct_TMint = np.argwhere(merged['DockQ TMint'].values>=0.23).shape[0]
num_correct_AF = np.argwhere(merged['DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1'].values>=0.23).shape[0]
num_total = len(merged)
print('Success rate TMint:',num_correct_TMint,'out of',num_total,'|',np.round(100*num_correct_TMint/num_total,2),'%')
print('Success rate AF:',num_correct_AF,'out of',num_total,'|',np.round(100*num_correct_AF/num_total,2),'%')
#Get where GRAMM outperforms AF
scores = merged[['DockQ TMint','DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1']].values
TMint_pos = scores[np.argwhere(scores[:,0]>=0.23)[:,0],:]
max_scores = np.argmax(TMint_pos,axis=1)
print('TMint outperform AF', np.argwhere(max_scores==0).shape[0], 'out of',len(TMint_pos),'times|',np.argwhere(max_scores==0).shape[0]/len(TMint_pos))
def real_features_marks(marks_dockq_AF, dssp_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Compare the separation in the marks dataset for AF using metrics from the
real structures
'''
#Change DSSP df
dssp_marks['Helix']=dssp_marks.G+dssp_marks.H+dssp_marks.I
dssp_marks['Sheet']=dssp_marks.E+dssp_marks.B
dssp_marks['Loop']=dssp_marks[' '].values
ss = dssp_marks[['Helix','Sheet','Loop']].values #0,1,2
dssp_marks['ss_class']=np.argmax(dssp_marks[['Helix','Sheet','Loop']].values,axis=1)
dssp_marks = dssp_marks[['id1','id2','ss_class']]
#Merge dfs
dssp_marks['complex_id']=dssp_marks.id1+'-'+dssp_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged_dssp = pd.merge(marks_dockq_AF,dssp_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,aln_scores_marks,on=['complex_id'],how='inner')
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
merged_if = pd.merge(merged_if,AFneffs_marks,on=['complex_id'],how='inner')
merged_if = pd.merge(merged_if,topneffs_marks,on=['complex_id'],how='inner')
'''
G = 3-turn helix (310 helix). Min length 3 residues.
H = 4-turn helix (α helix). Minimum length 4 residues.
I = 5-turn helix (π helix). Minimum length 5 residues.
T = hydrogen bonded turn (3, 4 or 5 turn)
E = extended strand in parallel and/or anti-parallel β-sheet conformation. Min length 2 residues.
B = residue in isolated β-bridge (single pair β-sheet hydrogen bond formation)
S = bend (the only non-hydrogen-bond based assignment).
C = coil (residues which are not in any of the above conformations).
'''
print('Num complexes in DSSP feature analysis',len(merged_dssp))
#Plot success rate per ss class
ss_classes = {0:'Helix',1:'Sheet',2:'Loop'}
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for i in range(3):
sel = merged_dssp[merged_dssp.ss_class==i]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(ss_classes[i],'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.distplot(sel.top_ranked_model_DockQ_af2,label=ss_classes[i]+' : '+str(np.round(100*success,1))+' % successful',hist=False)
plt.title('DockQ and SS for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_SS_marks.svg',format='svg',dpi=300)
plt.close()
#Plot feature vs DockQ
#Get min chain len
merged_if['smallest chain length'] = np.min(merged_if[['l1','l2']].values,axis=1)
#Get max chain len
merged_if['biggest chain length'] = np.max(merged_if[['l1','l2']].values,axis=1)
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score', 'AFdefault_Neff', 'tophit_Neff']
nicer_names = {'num_if_contacts_total':'number of interface contacts','smallest chain length':'smallest chain length', 'biggest chain length':'biggest chain length',
'aln_score':'alignment score', 'AFdefault_Neff':'AF Neff', 'tophit_Neff':'Paired Neff'}
print('Num complexes in real feature analysis',len(merged_if))
#Plot each third and the distribution vs vars
for var in vars:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
print (np.quantile(merged_if[var],0.5,axis=0))
l=[np.min(merged_if[var])]
l+=[np.quantile(merged_if[var],0.33,axis=0)]
l+=[np.quantile(merged_if[var],0.67,axis=0)]
l+=[np.max(merged_if[var])]
print (l)
j=0
for i in l[0:3]:
j+=1
#print ("test: ",i,j,l[j])
sel = merged_if.loc[ (merged_if[var] > i) & (merged_if[var] < l[j]) ]
success=np.argwhere(sel.top_ranked_model_DockQ_af2.values>=0.23).shape[0]/len(sel)
print(j,str(i)+" - "+ str(l[j])+":",'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.kdeplot(sel.top_ranked_model_DockQ_af2,label=str(round(i,0))+"-"+str(round(l[j],0))+' : '+str(np.round(100*success,1))+' % successful')
plt.title('DockQ and ' + nicer_names[var] + '\nfor the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_'+var+'_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_org(marks_dockq_AF, oxstats_marks, ifstats_marks, aln_scores_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
#AFneffs_marks['complex_id']=[code.replace('-', '_') for code in AFneffs_marks['complex_id']]
#topneffs_marks['complex_id']=[code.replace('-', '_') for code in topneffs_marks['complex_id']]
aln_scores_marks['complex_id']=aln_scores_marks.id1+'-'+aln_scores_marks.id2
aln_scores_marks = aln_scores_marks[['complex_id','aln_score']]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,ifstats_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,aln_scores_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
#Get min chain len
merged['smallest chain length'] = np.min(merged[['l1','l2']].values,axis=1)
#Get max chain len
merged['biggest chain length'] = np.max(merged[['l1','l2']].values,axis=1)
organisms = ['Homo sapiens','Saccharomyces cerevisiae', 'Escherichia coli']
vars = ['num_if_contacts_total','smallest chain length', 'biggest chain length', 'aln_score','AFdefault_Neff', 'tophit_Neff']
#Save
orgs = []
dockq_scores = []
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for org in organisms:
sel = merged[merged.Org1==org]
sel = sel[sel.Org2==org]
print('Number of complexes for',org,len(sel))
#Successs rate
sel_scores = sel.top_ranked_model_DockQ_af2.values
sr = np.argwhere(sel_scores>=0.23).shape[0]/len(sel_scores)
print('Success rate',sr)
#correlation
for var in vars:
R,p = spearmanr(sel[var].values,sel['top_ranked_model_DockQ_af2'].values)
print(var, np.round(R,2))
if org =='Saccharomyces cerevisiae':
org = 'S.cerevisiae'
if org =='Escherichia coli':
org = 'E.coli'
sns.distplot(sel_scores,label=org+' : '+str(np.round(sr*100,1))+' % successful',hist=False)
plt.title('DockQ per organism for the test set')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_per_org_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_per_kingdom(marks_dockq_AF, oxstats_marks, AFneffs_marks, topneffs_marks, outdir):
'''Analyze the dockq per organism
'''
#Merge
oxstats_marks['complex_id'] = oxstats_marks.id1+'-'+oxstats_marks.id2
#AFneffs_marks['complex_id']=['_'.join(x.split('-')) for x in AFneffs_marks.complex_id]
#topneffs_marks['complex_id']=['_'.join(x.split('-')) for x in topneffs_marks.complex_id]
merged = pd.merge(marks_dockq_AF,oxstats_marks,on='complex_id',how='left')
merged = pd.merge(merged,AFneffs_marks,on=['complex_id'],how='inner')
merged = pd.merge(merged,topneffs_marks,on=['complex_id'],how='inner')
kingdoms = ['E', 'B', 'A', 'V']
nice_labels = {'top_ranked_model_DockQ_af2':'DockQ', 'AFdefault_Neff':'AF Neff', 'tophit_Neff':'Paired Neff'}
for var in ['top_ranked_model_DockQ_af2', 'AFdefault_Neff', 'tophit_Neff']:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for kd in kingdoms:
sel = merged[merged.kingdom1==kd]
sel = sel[sel.kingdom2==kd]
#Successs rate
sel_scores = sel[var].values
if var=='top_ranked_model_DockQ_af2':
sr = np.argwhere(sel_scores>=0.23).shape[0]/len(sel_scores)
print('Success rate for',kd,sr,len(sel_scores))
sns.distplot(sel_scores,label=kd+' : '+str(np.round(sr*100,1))+' % successful',hist=False)
else:
sns.distplot(sel_scores,label=kd,hist=False)
plt.title(nice_labels[var]+' per kingdom for the test set')
plt.xlabel(nice_labels[var])
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+var+'_per_kd_marks.svg',format='svg',dpi=300)
plt.close()
def marks_dockq_vs_aln_overlap(marks_dockq_AF, af_chain_overlap_marks, outdir):
'''Analyze the dockq vs chain overlap
'''
#Merge
cid = ['_'.join(x.split('-')) for x in af_chain_overlap_marks.complex_id.values]
af_chain_overlap_marks['complex_id']=cid
merged = pd.merge(marks_dockq_AF,af_chain_overlap_marks,on='complex_id',how='inner')
#Plot tertiles
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
l=[np.min(merged.Overlap)]
l+=[np.quantile(merged.Overlap,0.33,axis=0)]
l+=[np.quantile(merged.Overlap,0.67,axis=0)]
l+=[np.max(merged.Overlap)]
j=0
for i in l[0:3]:
j+=1
sel = merged.loc[ (merged['Overlap'] > i) & (merged['Overlap'] < l[j]) ]
success=np.argwhere(sel.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10.values>=0.23).shape[0]/len(sel)
print(j,str(i)+" - "+ str(l[j])+":",'success rate',np.round(success,3),'over',len(sel),'structures')
#
sns.kdeplot(sel.DockQ_dockqstats_marks_af2_af2stdmsa_model_1_rec10,label=str(round(i,2))+"-"+str(round(l[j],2))+' : '+str(np.round(100*success,1))+' % successful')
plt.title('DockQ vs chain overlap in AF2 msas')
plt.xlabel('DockQ')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'dockq_vs_overlap.svg',format='svg',dpi=300)
plt.close()
#Plot overlap distribution
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
sns.distplot(merged.Overlap)
plt.title('Chain overlap distribution in AF2 msas')
plt.xlabel('Overlap fraction')
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'overlap_distr.svg',format='svg',dpi=300)
plt.close()
def score_newset_5runs(newset_dockq_AF, plDDT_newset, sigmoid_params, outdir):
'''Compare the separation in the newset dataset for AF using metrics from the
predicted structures
'''
#Merge dfs
plDDT_newset['complex_id'] = plDDT_newset.id1+'-'+plDDT_newset.id2
merged = pd.merge(newset_dockq_AF,plDDT_newset,on=['complex_id'],how='inner')
#Get num res in interface
separator1 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator2 = merged[['num_atoms_in_interface_1', 'num_atoms_in_interface_2','num_atoms_in_interface_3','num_atoms_in_interface_4','num_atoms_in_interface_5']].values
separator = separator1*np.log10(separator2+1)
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
separator = sigmoid(separator, *sigmoid_params)
scores = merged[newset_dockq_AF.columns[1:]].values
#Get max and min scores
max_scores = np.max(scores,axis=1)
min_scores = np.min(scores,axis=1)
#Get success rates per initializations
srs = []
for i in range(scores.shape[1]):
srs.append(np.argwhere(scores[:,i]>=0.23).shape[0]/len(scores))
print('New set scoring:')
print('Success rate 5 runs top scores',np.argwhere(max_scores>=0.23).shape[0]/len(max_scores))
print('Av diff',np.average(max_scores-min_scores))
print('Std diff',np.std(max_scores-min_scores))
print('Avg and std success rate', np.average(srs),np.std(srs))
#Separate the models using the number of contacts in the interface
max_inds = np.argmax(separator,axis=1)
first_ranked_scores = []
first_ranked_separators = []
#Get max separator scores
for i in range(len(max_inds)):
first_ranked_scores.append(scores[i,max_inds[i]])
first_ranked_separators.append(separator[i,max_inds[i]])
#Convert to array
first_ranked_scores = np.array(first_ranked_scores)
first_ranked_separators = np.array(first_ranked_separators)
#Get success rate
print('Ranking test set success rate using num contacts in interface',np.argwhere(first_ranked_scores>=0.23).shape[0]/len(first_ranked_scores))
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(first_ranked_scores, max_scores,s=10,color='darkblue',label='Max')
plt.scatter(first_ranked_scores, min_scores,s=10,color='mediumseagreen',label='Min')
plt.title('Model ranking from 5 runs on the new dimer set\n(both MSAs, model 1, 10 recycles)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot([0,1],[0,1],color='k',linewidth=1,linestyle='--')
plt.xlabel('DockQ first ranked model')
plt.ylabel('DockQ')
plt.legend()
plt.tight_layout()
plt.savefig(outdir+'DockQ_newset_5runs.svg',format='svg',dpi=300)
plt.close()
def dev_vs_test(marks_dockq_AF, oxstats_marks, ifstats_marks, aln_scores_marks, AFneffs_marks,
topneffs_marks, bench4_kingdom, dssp_bench4, AFneffs_bench4, topneffs_bench4, outdir):
'''Analyze the distributions of different features for the dev vs the test set
Neff
Kingdom
SS in interface
Number of interface contacts
Chain length (biggest and smallest)
'''
#Merge bench4
bench4_merged = pd.merge(bench4_kingdom,dssp_bench4,on=['id1','id2'],how='inner')
bench4_merged['complex_id'] = bench4_merged.PDB+'_u1-'+bench4_merged.PDB+'_u2'
bench4_merged = pd.merge(bench4_merged,AFneffs_bench4,on='complex_id',how='inner')
bench4_merged = pd.merge(bench4_merged,topneffs_bench4,on='complex_id',how='inner')
bench4_merged['min_chain_len'] = np.min(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_merged['max_chain_len'] = np.max(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_merged['sum_chain_len'] = np.sum(bench4_merged[['Sequence Length 1','Sequence Length 2']].values,axis=1)
bench4_kingdom['Kingdom'] = bench4_kingdom['Kingdom'].replace({' Bacteria ':'B', ' Eukaryota ':'E','Virus':'V'})
bench4_merged['if_fraction'] = np.divide(bench4_merged['num_if_contacts_total'],bench4_merged['sum_chain_len'])
#Merge Marks
marks_merged = pd.merge(oxstats_marks, ifstats_marks, on=['id1','id2'],how='inner')
marks_merged['complex_id'] = marks_merged.id1+'_'+marks_merged.id2
marks_merged = pd.merge(marks_merged,AFneffs_marks,on='complex_id',how='inner')
marks_merged = pd.merge(marks_merged,topneffs_marks,on='complex_id',how='inner')
marks_merged['min_chain_len'] = np.min(marks_merged[['l1','l2']].values,axis=1)
marks_merged['max_chain_len'] = np.max(marks_merged[['l1','l2']].values,axis=1)
marks_merged['sum_chain_len'] = np.sum(marks_merged[['l1','l2']].values,axis=1)
marks_merged['if_fraction'] = np.divide(marks_merged['num_if_contacts_total'],marks_merged['sum_chain_len'])
#Get kingdom fractions
kingdoms = ['E', 'B', 'A', 'V']
print('KD','Bench4','Marks')
for kd in kingdoms:
sel_bench4 = bench4_kingdom[bench4_kingdom.Kingdom==kd]
sel_marks = marks_merged[(marks_merged.kingdom1==kd)&(marks_merged.kingdom2==kd)]
print(kd,len(sel_bench4)/len(bench4_kingdom),len(sel_marks)/len(marks_merged))
#Plot vars
vars = ['num_if_contacts_total', 'min_chain_len','max_chain_len', 'sum_chain_len', 'AFdefault_Neff' ,'tophit_Neff','if_fraction']
for var in vars:
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
sns.distplot(bench4_merged[var],label='Dev. set',hist=True,kde=True,norm_hist=True)
sns.distplot(marks_merged[var],label='Test. set',hist=True,kde=True,norm_hist=True)
plt.legend()
plt.title('Dev. vs Test '+var)
plt.xlabel(var)
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'dev_vs_test_'+var+'.svg',format='svg',dpi=300)
plt.close()
def neg_vs_pos(plDDT_marks_af, plDDT_marks_negative_af, plDDT_negatome_af, sigmoid_params):
'''Compare the interfaces of positive and neg marks set + negatome
'''
#Filter out the homodimers from the negatome
keep_inds = []
for i in range(len(plDDT_negatome_af)):
row = plDDT_negatome_af.loc[i]
if row.id1!=row.id2:
keep_inds.append(i)
print('Num homodimers:',len(plDDT_negatome_af)-len(keep_inds))
plDDT_negatome_af = plDDT_negatome_af.loc[keep_inds]
#Get AUC using the different metrics
#Get min of chains
#Pos
single_chain_plddt = np.min(plDDT_marks_af[['ch1_plddt_av_1', 'ch2_plddt_av_1']].values,axis=1)
plDDT_marks_af['min_chain_plddt_av_1'] = single_chain_plddt
#Neg marks
single_chain_plddt = np.min(plDDT_marks_negative_af[['ch1_plddt_av', 'ch2_plddt_av']].values,axis=1)
plDDT_marks_negative_af['min_chain_plddt_av'] = single_chain_plddt
#Negatome
single_chain_plddt = np.min(plDDT_negatome_af[['ch1_plddt_av', 'ch2_plddt_av']].values,axis=1)
plDDT_negatome_af['min_chain_plddt_av'] = single_chain_plddt
#Analyze ROC as a function of
feature_nice_names = {'if_plddt_av':'IF_plDDT', 'min_chain_plddt_av':'Min plDDT per chain',
'plddt_av':'Average plDDT', 'num_atoms_in_interface':'IF_contacts',
'num_res_in_interface':'IF_residues'}
colors = {'if_plddt_av':'darkblue','min_chain_plddt_av':'magenta','plddt_av':'orange',
'num_atoms_in_interface':'darkgreen','num_res_in_interface':'tab:blue', 'IF_cp':'cyan', 'pDockQ':'k'}
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
for key in feature_nice_names:
pos_features = plDDT_marks_af[key+'_1'].values
neg_features = np.concatenate([plDDT_marks_negative_af[key].values, plDDT_negatome_af[key].values])
#ROC
correct = np.zeros(len(pos_features)+len(neg_features))
correct[:len(pos_features)]=1
all_features = np.concatenate([pos_features, neg_features])
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
#Plot ROC
plt.plot(fpr, tpr, label = feature_nice_names[key]+': AUC = %0.2f' % roc_auc, color=colors[key])
#TPRs
print(key,'TPR at FPR 1%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.01)[-1][0]]))
print(key,'TPR at FPR 5%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.05)[-1][0]]))
#Add log(if contacts)*if_plddt_av
pos_features_if_cp = plDDT_marks_af['if_plddt_av_1'].values*np.log10(plDDT_marks_af['num_atoms_in_interface_1'].values+1)
neg_features_marks_if_cp = plDDT_marks_negative_af['if_plddt_av'].values*np.log10(plDDT_marks_negative_af['num_atoms_in_interface'].values+1)
neg_features_negatome_if_cp = plDDT_negatome_af['if_plddt_av'].values*np.log10(plDDT_negatome_af['num_atoms_in_interface'].values+1)
neg_features_if_cp = np.concatenate([neg_features_marks_if_cp, neg_features_negatome_if_cp])
correct = np.zeros(len(pos_features_if_cp)+len(neg_features_if_cp))
correct[:len(pos_features_if_cp)]=1
all_features = np.concatenate([pos_features_if_cp, neg_features_if_cp])
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
#plt.plot(fpr, tpr, label = 'IF_plDDT⋅log(IF_contacts)'+': AUC = %0.2f' % roc_auc,color='tab:cyan')
#Do the same with pDockQ
def sigmoid(x, L ,x0, k, b):
y = L / (1 + np.exp(-k*(x-x0)))+b
return (y)
pos_features_pdockq = sigmoid(pos_features_if_cp, *sigmoid_params)
neg_features_pdockq = sigmoid(neg_features_if_cp, *sigmoid_params)
all_features = np.concatenate([pos_features_pdockq, neg_features_pdockq])
#Create ROC
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label = 'pDockQ'+': AUC = %0.2f' % roc_auc,color='k',linestyle='--')
#TPRs
print('pDockQ TPR at FPR 1%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.01)[-1][0]]))
print('pDockQ TPR at FPR 5%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=0.05)[-1][0]]))
#Plot formatting
plt.plot([0,1],[0,1],linewidth=1,linestyle='--',color='grey')
plt.legend(fontsize=9)
plt.title('Identifying interacting proteins\nROC as a function of different metrics')
plt.xlabel('FPR')
plt.ylabel('TPR')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ROC_pos_neg.svg',format='svg',dpi=300)
#Marks comparison
print('Only marks negative')
neg_features = sigmoid(neg_features_marks_if_cp, *sigmoid_params)
#ROC
correct = np.zeros(len(pos_features_pdockq)+len(neg_features))
correct[:len(pos_features)]=1
all_features = np.concatenate([pos_features_pdockq, neg_features])
fpr, tpr, threshold = metrics.roc_curve(correct, all_features, pos_label=1)
roc_auc = metrics.auc(fpr, tpr)
t=0.01
print('Average interface pDockQ TPR at FPR '+str(t*100)+'%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2),'\nAUC:',roc_auc, 'FPR:',np.round(100*fpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2))
t=0.05
print('Average interface pDockQ TPR at FPR '+str(t*100)+'%=',np.round(100*tpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2),'\nAUC:',roc_auc, 'FPR:',np.round(100*fpr[np.argwhere(np.round(fpr,2)<=t)[-1][0]],2))
#Plot distribution of separators
feature_nice_names = {'if_plddt_av':'IF_plDDT', 'num_atoms_in_interface':'IF_contacts',
'pDockQ':'pDockQ'}
xlims = {'if_plddt_av':[-20,120], 'num_atoms_in_interface':[-100,500],
'IF_cp':[0,250], 'pDockQ':[0,1]}
bins = {'if_plddt_av':20, 'num_atoms_in_interface':50,
'IF_cp':20, 'pDockQ':20}
matplotlib.rcParams.update({'font.size': 9})
for key in feature_nice_names:
fig,ax = plt.subplots(figsize=(6/2.54,6/2.54))
if key not in ['IF_cp','pDockQ']:
pos_features = plDDT_marks_af[key+'_1'].values
neg_features = np.concatenate([plDDT_marks_negative_af[key].values, plDDT_negatome_af[key].values])
if key=='IF_cp':
pos_features = pos_features_if_cp
neg_features = neg_features_if_cp
if key=='pDockQ':
pos_features = pos_features_pdockq
neg_features = neg_features_pdockq
plt.hist(pos_features,label='pos',color=colors[key],alpha=0.75,bins=bins[key],density=True)
plt.hist(neg_features,label='neg',color='gray',alpha=0.75,bins=bins[key],density=True)
plt.legend(fontsize=9)
#plt.title('Distribution of '+feature_nice_names[key],fontsize=9)
plt.xlim(xlims[key])
plt.xlabel(feature_nice_names[key])
plt.ylabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+key+'_pos_vs_neg_distr.svg',format='svg',dpi=300)
plt.close()
# #Create df of fpr vs tpr
# roc_df = pd.DataFrame()
# roc_df['FPR']=fpr
# roc_df['TPR']=tpr
# roc_df['Number of interface contacts'] = threshold
def ppv_vs_dockq_marks(marks_dockq_AF,ifstats_marks,outdir):
'''Analysis of the relationship between if stats and DockQ
'''
ifstats_marks['complex_id']=ifstats_marks.id1+'-'+ifstats_marks.id2
merged_if = pd.merge(marks_dockq_AF,ifstats_marks,on=['complex_id'],how='inner')
#Calc PPV
merged_if['PPV'] = merged_if['num_accurate_if']/merged_if['num_if_contacts_total']
#Plot
fig,ax = plt.subplots(figsize=(12/2.54,12/2.54))
plt.scatter(merged_if.PPV,merged_if.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1,
s=1,label='AF2+Paired',c='tab:blue')
plt.scatter(merged_if.PPV,merged_if.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10,
s=1,label='Paired',c='tab:orange')
#RA
ra_x = []
ra_y_paired = []
ra_y_both = []
sr_paired = []
sr_both = []
step = 0.05
for i in np.arange(0,0.5,step):
sel = merged_if[(merged_if.PPV>=i)&(merged_if.PPV<i+0.1)]
if len(sel)<1:
continue
ra_x.append(i+step/2)
#ra_y_paired.append(np.average(sel.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10))
#ra_y_both.append(np.average(sel.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1))
#SR
sr_paired.append(np.argwhere(sel.DockQ_dockqstats_marks_af2_hhblitsn2_model_1_rec10.values>=0.23).shape[0]/len(sel))
sr_both.append(np.argwhere(sel.DockQ_dockqstats_marks_af2_af2andhhblitsmsa_model_1_rec10_run1.values>=0.23).shape[0]/len(sel))
#RA
# plt.plot(ra_x,ra_y_paired,label='RA Paired',c='tab:orange')
# plt.plot(ra_x,ra_y_both,label='RA AF2+Paired',c='tab:blue')
#SR
plt.plot(ra_x,sr_paired,label='SR Paired',c='tab:orange')
plt.plot(ra_x,sr_both,label='SR AF2+Paired',c='tab:blue')
plt.legend()
plt.title('Interface PPV vs DockQ')
plt.xlabel('Interface PPV')
plt.ylabel('DockQ')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(outdir+'ppv_vs_dockq.svg',format='svg',dpi=300)
plt.close()
#################MAIN####################
#Parse args
args = parser.parse_args()
#Data
#bench4
bench4_dockq_aa = pd.read_csv(args.bench4_dockq_aa[0])
bench4_dockq_RF = pd.read_csv(args.bench4_dockq_RF[0])
plDDT_bench4 = pd.read_csv(args.plDDT_bench4[0])
#pconsdock_bench4 = pd.read_csv(args.pconsdock_bench4[0])
#pconsdock_marks = pd.read_csv(args.pconsdock_marks[0])
bench4_kingdom = pd.read_csv(args.bench4_kingdom[0])
dssp_bench4 = pd.read_csv(args.dssp_bench4[0])
AFneffs_bench4 = pd.read_csv(args.afdefault_neff_bench4[0])
topneffs_bench4 = | pd.read_csv(args.tophits_neff_bench4[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 15:11:22 2020
@author: 81701
"""
from datetime import datetime, timedelta
import gc
import numpy as np, pandas as pd
import lightgbm as lgb
h = 28
max_lag = 0
tr_last = 1913
fday = datetime(2016,4, 25)
CAL_DTYPES={"event_name_1": "category", "event_name_2": "category", "event_type_1": "category",
"event_type_2": "category", "weekday": "category", 'wm_yr_wk': 'int16', "wday": "int16",
"month": "int16", "year": "int16", "snap_CA": "float32", 'snap_TX': 'float32', 'snap_WI': 'float32' }
PRICE_DTYPES = {"store_id": "category", "item_id": "category", "wm_yr_wk": "int16","sell_price":"float32" }
catcols = ['id','item_id','dept_id','store_id','cat_id','state_id']
numcols = [f"d_{day}" for day in range(1, tr_last + 1)]
dtype = {numcol: "float32" for numcol in numcols}
dtype.update({col:"category" for col in catcols if col != 'id'})
dt = pd.read_csv("kaggle/input/m5-forecasting-accuracy/sales_train_validation.csv",
usecols = catcols + numcols, dtype = dtype)
# =============================================================================
def check_zero_rate():
count = 0
for i in range(30490):
sample = dt.iloc[i][6:]
if len(np.where(sample == 0)[0])/sample.shape[0] < 0.1:
count += 1
print(count)
# =============================================================================
# =============================================================================
def check_zero_dic_gen():
maxcount = np.array(dt.shape[0]*[0])
continuousCount = []
for i in range(dt.shape[0]):
oncount = False
count = []
index = []
sample = dt.iloc[i][6:]
for j in range(0,sample.shape[0]):
if sample[j] == 0:
if oncount == True:
count[-1] = count[-1]+1
else:
oncount = True
count.append(1)
index.append(j)
else:
oncount = False
countdic = {index[i]:count[i] for i in range(len(index))}
continuousCount.append(countdic)
np.save('countdic.npy',np.array(continuousCount))
np.save('maxcount.npy',maxcount)
# =============================================================================
def data_preprocess():
all_string_of_days = [f'd_{i}' for i in range(1,tr_last+1)]
countdic = np.load('countdic.npy')
# compute initial zero
begining_zero_index = []
for i in range(countdic.shape[0]):
if list(countdic[i].keys())[0] == 0:
begining_zero_index.append(i)
begining_zero_id = dt.iloc[begining_zero_index]["id"]
begining_zero_id = np.array(begining_zero_id)
begining_zero_length = [list(countdic[i].values())[0] for i in begining_zero_index]
begining_zero_string = []
for i in range(len(begining_zero_length)):
begining_zero_string.append(all_string_of_days[:begining_zero_length[i]])
dic_without_init_zero = countdic.copy()
for i in range(dic_without_init_zero.shape[0]):
if list(dic_without_init_zero[i].keys())[0] == 0:
dic_without_init_zero[i].pop(0)
#print(dic_without_init_zero.shape)
# create off shelf infomation list
#[(index,[(off_time,off_len),(off_time,off_len)]),()...]
# make off_shelf_list
off_shelf_list = []
for idx in range(len(dic_without_init_zero)):
sorteddic = sorted(dic_without_init_zero[idx].items(), key = lambda kv:(kv[1], kv[0]),reverse=True)
diff = [sorteddic[i][1] - sorteddic[i+1][1] for i in range(len(sorteddic) - 1)]
if len(diff) > 3:
for i in [2,1,0]:
if diff[i] > 30 and np.percentile(diff,95) < 2:
off_shelf_list.append((idx,[(sorteddic[j][0],sorteddic[j][1]) for j in range(i+1) if sorteddic[j][0] + sorteddic[j][1] < tr_last-max_lag]))
break
off_shelf_list_temp = []
for item in off_shelf_list:
if item[1] != []:
off_shelf_list_temp.append(item)
off_shelf_list = off_shelf_list_temp
# finish off_shelf_list
off_shelf_string = []
for i in range(len(off_shelf_list)):
for j in range(len(off_shelf_list[i][1])):
if j == 0:
off_shelf_string.append(all_string_of_days[off_shelf_list[i][1][j][0]:off_shelf_list[i][1][j][0]+off_shelf_list[i][1][j][1]])
else:
off_shelf_string[i] += all_string_of_days[off_shelf_list[i][1][j][0]:off_shelf_list[i][1][j][0]+off_shelf_list[i][1][j][1]]
off_shelf_index = [off_shelf_list[i][0] for i in range(len(off_shelf_list))]
off_shelf_id = dt.iloc[off_shelf_index]["id"]
off_shelf_id = np.array(off_shelf_id)
# mark data begining with 0
for i,thisid in enumerate(begining_zero_id):
print(i)
dt.loc[thisid,begining_zero_string[i]] = np.nan
# mark off shelf data
for i,thisid in enumerate(off_shelf_id):
print(i)
dt.loc[thisid,off_shelf_string[i]] = -1
dt.to_csv('dt_-1.csv',index = None)
# =============================================================================
# last_is_zero = []
# for i in range(countdic.shape[0]):
# if list(countdic[i].keys())[-1] + list(countdic[i].values())[-1] == tr_last and list(countdic[i].keys())[-1] < tr_last - max_lag:
# last_is_zero.append(i)
#
# last_is_zero_id = np.array(dt.iloc[last_is_zero]["id"])
#
# # save data with 57 days more off shelf
# dt_last_zero = dt[dt.id.isin(last_is_zero_id)]
# #dt_last_zero.to_csv('data/last_zero_57.csv')
#
# dt_non_last_zero = dt[~dt.id.isin(last_is_zero_id)]
# =============================================================================
def create_dt(is_train = True, nrows = None):
prices = pd.read_csv("kaggle/input/m5-forecasting-accuracy/sell_prices.csv", dtype = PRICE_DTYPES)
for col, col_dtype in PRICE_DTYPES.items():
if col_dtype == "category":
prices[col] = prices[col].cat.codes.astype("int16")
prices[col] -= prices[col].min()
cal = pd.read_csv("kaggle/input/m5-forecasting-accuracy/calendar.csv", dtype = CAL_DTYPES)
cal["date"] = | pd.to_datetime(cal["date"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
df_motor = pd.read_csv('/home/ubuntu/bagfiles/3r/r2_motor.csv', header=0)
df_odom1 = pd.read_csv('/home/ubuntu/bagfiles/net_arch/PPO3_odom.csv', header=0)
df_odom2 = pd.read_csv('/home/ubuntu/bagfiles/net_arch/PPO4_odom.csv', header=0)
df_odom3 = pd.read_csv('/home/ubuntu/bagfiles/net_arch/PPO5_odom.csv', header=0)
print(df_motor)
print(df_motor.columns.values)
# Set motor_rpm dataframe
time = df_motor['time']
time = np.arange(len(time)) / 100
rpm = df_motor['.angular_velocities']
motor = [eval(ele) for ele in rpm]
x0=[]
x1=[]
x2=[]
x3=[]
for a in motor:
x0.append(abs(a[0]))
x1.append(abs(a[1]))
x2.append(abs(a[2]))
x3.append(abs(a[3]))
# N = 1000
# x1m = np.convolve(x1, np.ones(N)/N, mode='valid')
# x3m = np.convolve(x3, np.ones(N)/N, mode='valid')
# time_m = time[len(time) - len(x1m):smirk:
# y = df_motor['y']
# z = df_motor['z']
# plot
motor_data = | pd.DataFrame(time) | pandas.DataFrame |
import argparse
import subprocess
import sys
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from parallelm.mlops import StatCategory as st
from parallelm.mlops import mlops as mlops
from parallelm.mlops.stats.bar_graph import BarGraph
from parallelm.mlops.stats.graph import MultiGraph
from parallelm.mlops.stats.table import Table
def parse_args():
"""
Parse arguments from component
:return: Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("--num_features", help="# Features")
parser.add_argument("--num_samples", help="# Samples")
parser.add_argument("--input_file", help="Input Data File")
parser.add_argument("--validation_split", help="# Validation Split")
parser.add_argument("--n_estimators", help="Number of Estimators")
parser.add_argument("--max_depth", help="Max Depth")
parser.add_argument("--learning_rate", help="Learning Rate")
parser.add_argument("--min_child_weight", help="Min Child Weight")
parser.add_argument("--objective", help="Objective")
parser.add_argument("--gamma", help="Gamma")
parser.add_argument("--max_delta_step", help="Max Delta Step")
parser.add_argument("--subsample", help="Subsample")
parser.add_argument("--reg_alpha", help="Reg Alpha")
parser.add_argument("--reg_lambda", help="Reg Lambda")
parser.add_argument("--scale_pos_weight", help="Scale Pos Weight")
parser.add_argument("--auc_threshold", help="AUC Threshold")
parser.add_argument("--ks_threshold", help="KS Threshold")
parser.add_argument("--psi_threshold", help="PSI Threshold")
parser.add_argument("--output-model", help="Data File to Save Model")
options = parser.parse_args()
return options
def get_psi(v1_in, v2_in, num=10):
"""
calculate PSI.
:param v1: vector 1
:param v2: vector 2
:param num: number of bins
:return: PSI Value
"""
if len(v1_in) < 2:
v1 = v2_in
v2 = np.zeros(1)
elif len(v2_in) == 0:
v1 = v1_in
v2 = np.zeros(1)
else:
v1 = v1_in
v2 = v2_in
rank1 = pd.qcut(v1, num, labels=False) + 1
basepop1 = | pd.DataFrame({'v1': v1, 'rank1': rank1}) | pandas.DataFrame |
import gc
import json
import logging
import os
import warnings
from datetime import datetime
import numpy as np
from enum import auto, Enum
from multiprocessing import Process, Event, JoinableQueue
from multiprocessing.managers import SyncManager
from multiprocessing.queues import Empty, Full
from tqdm import tqdm
class MyManager(SyncManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Small hack to allow setting of the total number of steps
def set_total(self, total):
self.total = total
tqdm.set_total = set_total
MyManager.register('tqdm', tqdm)
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class ProcessingError():
class Type(Enum):
NoValidEndpoint = auto()
PatientIdDuplicate = auto()
InsufficientData = auto()
CannotAlignDatetimes = auto() # Error is set if we cannot find the patients in the patient timepoints file used for alignment.
def __init__(self, patientid, errortype, **kwargs):
self.patientid = patientid
self.errortype = errortype
self.additional_data = kwargs
def __str__(self):
base_str = 'Error: {:20} Patient: {}'.format(self.errortype.name, self.patientid)
if self.additional_data:
base_str += ' ' + str(self.additional_data)
return base_str
class HDF5Reader(Process):
def __init__(self, input_queue, output_queue, hdf5_group, progressbar, **read_kwargs):
self.input_queue = input_queue
self.output_queue = output_queue
self.hdf5_group = hdf5_group
self.progressbar = progressbar
self.read_kwargs = read_kwargs
self.exit = Event()
super().__init__()
def run(self):
self.log = logging.getLogger(str(self))
while True:
try:
next_file = self.input_queue.get(block=True, timeout=1)
except Empty:
if self.exit.is_set():
break
else:
self.log.debug('Reached timeout while waiting for input')
continue
self.progressbar.set_description('File: {}'.format(os.path.basename(next_file)))
self.process_HDF5_file(next_file)
self.input_queue.task_done()
self.log.debug('Finished processing file {}'.format(os.path.basename(next_file)))
self.progressbar.update()
def terminate(self):
self.exit.set()
def process_HDF5_file(self, endpoint_file):
import pandas as pd
self.log.debug('Processing file {}'.format(os.path.basename(endpoint_file)))
if 'binarized' in endpoint_file:
try:
data = pd.read_hdf(endpoint_file, mode='r')
except:
data = pd.DataFrame(columns=['PatientID'])
else:
data = | pd.read_hdf(endpoint_file, self.hdf5_group, mode='r', **self.read_kwargs) | pandas.read_hdf |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import sys as sys
import pandas as pd
sys.path.insert(1, './../..')
from Dispersion_NN import Dispersion_NN
#************Start of of user block******************
output_csv_file='./Fig2_ab_NN.csv'
read_csv=True #change to True if one want to read the data from csv file,\
# instead of calculating from SLiM_NN
read_output_csv_file='./Fig2_ab_NN.csv'
fontsize=12
nu=1.
zeff=1.
eta=2.
shat=0.006
beta=0.002
ky=0.0
mu=0.
xstar=10
mu_list=np.arange(0,6,0.01)
path='./../../Trained_model/'
NN_omega_file =path+'SLiM_NN_omega.h5'
NN_gamma_file =path+'SLiM_NN_stabel_unstable.h5'
norm_omega_csv_file=path+'NN_omega_norm_factor.csv'
norm_gamma_csv_file=path+'NN_stabel_unstable_norm_factor.csv'
#************End of user block******************
para_list=[[nu, zeff, eta, shat, beta, ky, mu, xstar]\
for mu in mu_list]
Dispersion_NN_obj=Dispersion_NN(NN_omega_file,NN_gamma_file,norm_omega_csv_file,norm_gamma_csv_file)
f_list=[]
gamma_list=[]
gamma_10_list=[]
if read_csv==True:
data= | pd.read_csv(read_output_csv_file) | pandas.read_csv |
#Importamos librerias
import pandas as pd
import datetime as dt
import requests
#Importamos Panel Lider (web scraping)
def iol_scraping_panel_lider():
df = pd.read_html(
"https://iol.invertironline.com/Mercado/Cotizaciones",
decimal=',', thousands='.')
df = df[0]
df = df.iloc[:, 0:13]
df.rename(columns = {"Símbolo":"simbolo", "ÚltimoOperado":"ultimo_operado",
"VariaciónDiaria":"var_diaria", "CantidadCompra":"cantidad_compra",
"PrecioCompra":"precio_compra", "PrecioVenta":"precio_venta",
"CantidadVenta":"cantidad_venta", "Apertura":"open",
"Mínimo":"min", "Máximo":"max", "ÚltimoCierre":"close",
"MontoOperado":"monto", "Fecha/Hora":"hora"}, inplace = True)
df.insert((len(df.columns) - 1), "fecha", str(dt.date.today()))
df.simbolo = df.simbolo.str.split(" ")
df.simbolo = df.simbolo.apply(lambda x: x[0])
return df
iol_scraping_panel_lider()
#API IOL
BASE_URL = "https://api.invertironline.com"
## AUTENTICACIÓN
"""
https://api.invertironline.com/token
POST /token HTTP/1.1
Host: api.invertironline.com
Content-Type: application/x-www-form-urlencoded
username=MIUSUARIO&password=MICONTRASEÑA&grant_type=password
"""
def iol_authentication(user_name, password):
h = {
"Content-Type":"application/x-www-form-urlencoded"
}
body = {
"username":user_name,
"password":password,
"grant_type":"password"
}
response = requests.post(BASE_URL + "/token", headers = h, data = body)
if response.status_code == 200:
response = (response.json())
return response
else:
return (f"Error: {response.status_code} con respuesta = {response.text}")
#iol_response = iol_authentication("iol_user_name", "iol_password")
##TIME TO EXPIRE
LIST_MONTH = {
"Jan":"01",
"Feb":"02",
"Mar":"03",
"Apr":"04",
"May":"05",
"Jun":"06",
"Jul":"07",
"Aug":"08",
"Sep":"09",
"Oct":"10",
"Nov":"11",
"Dec":"12"
}
def iol_seconds_to_expire(iol_response):
iol_expire = iol_response[".expires"][5::].split(" ")
iol_expire[1] = LIST_MONTH[iol_expire[1]]
expire_datetime = " "
expire_datetime = expire_datetime.join(iol_expire)
expire_datetime = dt.datetime.strptime(expire_datetime, "%d %m %Y %H:%M:%S %Z")
sec_to_expire = (expire_datetime - dt.datetime.now()).total_seconds()
return sec_to_expire
##MI CUENTA
###Estado de Cuenta
def iol_get_estado_de_cuenta(iol_response):
h = {
"Authorization":"Bearer " + iol_response["access_token"]
}
response = requests.get(BASE_URL + "/api/v2/estadocuenta", headers = h)
if response.status_code == 200:
response = response.json()
response = response["cuentas"]
response = pd.json_normalize(
response, "saldos", ["numero", "tipo", "moneda", "titulosValorizados",
"total", "margenDescubierto"]
)
response.columns = ["liquidacion", "saldo", "comprometido", "disponible",
"disponible_operar", "nro_cta", "tipo", "moneda", "titulos_valorizados",
"total", "margen_descubierto"]
return response
else:
return (f"Error: {response.status_code} con respuesta = {response.text}")
###Portafolio (Verificar que pasa cuando hay varias operaciones en distintos plazos)
def iol_get_portafolio(iol_response, pais = "argentina"):
h = {
"Authorization":"Bearer " + iol_response["access_token"]
}
endpoint = BASE_URL + f"/api/v2/portafolio/{pais}"
response = requests.get(endpoint, headers = h)
if response.status_code == 200:
response = (response.json())
response = response["activos"]
response = pd.json_normalize(response)
return response
else:
return (f"Error: {response.status_code} con respuesta = {response.text}")
###Operación
def iol_get_operacion(iol_response, numero):
h = {
"Authorization":"Bearer " + iol_response["access_token"]
}
endpoint = BASE_URL + f"/api/v2/operaciones/{numero}"
response = requests.get(endpoint, headers = h)
if response.status_code == 200:
response = (response.json())
response = pd.DataFrame(response)
return response
else:
return (f"Error: {response.status_code} con respuesta = {response.text}")
###Delete operación
def iol_delete_operacion(iol_response, numero):
h = {
"Authorization":"Bearer " + iol_response["access_token"]
}
endpoint = BASE_URL + f"/api/v2/operaciones/{numero}"
response = requests.delete(endpoint, headers = h)
if response.status_code == 200:
response = (response.json())
response = | pd.DataFrame(response) | pandas.DataFrame |
# Libraries
import random
from math import pi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
COLOR = ['#B6BFF2', '#04C4D9', '#F2C12E', '#F26363', '#BF7E04', '#7F2F56', '#E8B9B5', '#63CAF3', '#F27405', '#68BD44']
MARKER = ['D', '^', 'o', 'H', '+', 'x', 's', 'p', '*', '3']
def cross_methods_plot(matrics_name, method_name, values, save_name, postfix='.jpg', color=COLOR,
spider=True, bar=False):
'''
Comparison of experimental results among methods
:param matrics_name: selected performance matrics, e.g. ['BWT','ACC', 'FWT']
:param method_name: selected CL method applied on the dataset, e.g. ['SI','EWC']
:param df: 2D array w/ rows indicating matrices, columns indicating methods
:param save_name: name of the img to be saved
:param postfix: type of the img, e.g. jpg, png, pdf, etc.
:param color: alternative colors
:param spider: use spider plot, default is True
:param bar: use bar plot, default is False
:return: save figure
'''
if not spider and not bar:
raise NotImplementedError("No figure type is selected.")
raw_data = {'matrices\method': matrics_name}
for i in range(len(method_name)):
raw_data[method_name[i]] = values[i]
df_col = ['matrices\method'] + method_name
df = | pd.DataFrame(raw_data, columns=df_col) | pandas.DataFrame |
import pandas as pd
from .indicator import Indicator
class RSI(Indicator):
_NAME = 'rsi'
def __init__(self, currency_pair='btc_jpy', period='1d', length=14):
super().__init__(currency_pair, period)
self._length = self._bounded_length(length)
def request_data(self, count=100, to_epoch_time=None):
candlesticks_df = self._get_candlesticks_df(count, to_epoch_time)
rsi = self._exec_talib_func(candlesticks_df, price='close', timeperiod=self._length)
formatted_rsi = self._formatting(candlesticks_df, rsi)
return formatted_rsi
def _required_candlesticks_count(self, count):
return self._bounded_count(count) + self._length
def _formatting(self, candlesticks, rsi):
rsi.rename(self.name, inplace=True)
rsi_with_time = | pd.concat([candlesticks['time'], rsi], axis=1) | pandas.concat |
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries to use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import mysql.connector
# 读取数据库的指针设置
with open('conf.json', 'r') as fd:
conf = json.load(fd)
src_db = mysql.connector.connect(**conf['src_db'])
# 一些常量
riskFreeRate = 0.02 # 无风险利率
varThreshold =0.05 # 5%VAR阈值
scaleParameter = 50 # 一年50周
# 表名
index_data_table = 'fund_weekly_index' # index时间序列数据
index_name_table = 'index_id_name_mapping'
type_index_table = 'index_stype_code_mapping' # 表格名称-不同基金种类对应的指数
# 私募指数基金分类表格对应(只需要跑一次)
def get_type_index_table(tableName = type_index_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('stype_code',inplace=True)
return pdResult
# 私募指数名称及ID分类表格对应(只需要跑一次)
def get_index_table(tableName = index_name_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('index_id',inplace=True)
return pdResult
# 私募指数净值的时间序列
def get_index(index, tableName=index_data_table):
try:
# sql_query='select id,name from student where age > %s'
cursor = src_db.cursor()
sql = "select index_id,statistic_date,index_value from %s where index_id = '%s'" % (tableName, index)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
pdResult = pd.DataFrame(result, dtype=float)
pdResult.columns = ['index', 'date', 'net_worth']
pdResult = pdResult.drop_duplicates().set_index('date')
pdResult = pdResult.dropna(axis=0)
pdResult = pdResult.fillna(method='ffill')
return pdResult
# 按季度分类
def byseasons(x):
if 1<=x.month<=3:
return str(x.year)+'_'+str(1)
elif 4<= x.month <=6:
return str(x.year)+'_'+str(2)
elif 7<= x.month <=9:
return str(x.year)+'_'+str(3)
else:
return str(x.year)+'_'+str(4)
# 计算最大回撤,最大回撤开始结束时间
def cal_max_dd_indicator(networthSeries):
maxdd = pd.DataFrame(index = networthSeries.index, data=None, columns =['max_dd','max_dd_start_date','max_dd_end_date'],dtype = float)
maxdd.iloc[0] = 0
maxdd.is_copy = False
for date in networthSeries.index[1:]:
maxdd.loc[date] = [1 - networthSeries.loc[date] / networthSeries.loc[:date].max(),networthSeries.loc[:date].idxmax(),date]
#maxdd[['max_dd_start_date','max_dd_end_date']].loc[date] = [[networthSeries.loc[:date].idxmax(),date]]
#maxdd['max_dd_start_date'].loc[date] = networthSeries.loc[:date].idxmax()
return maxdd['max_dd'].max(), maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_start_date'],maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_end_date']
# 计算最大回撤(每季度),输入为dataframe,输出也为dataframe
def cal_maxdd_by_season(df):
seasonList = sorted(list(set(df['season'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['season'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'season'
return maxdd_df
# 计算最大回撤(每年),输入为dataframe,输出也为dataframe
def cal_maxdd_by_year(df):
seasonList = sorted(list(set(df['year'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['year'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'year'
return maxdd_df
# 准备数据原始dataframe
def get_count_data(cnx):
cursor = cnx.cursor()
sql = "select fund_id,foundation_date,fund_type_strategy from fund_info"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['fund_id', 'found_date', 'strategy']
sql = "select type_id, strategy from index_type_mapping"
cursor.execute(sql)
result = cursor.fetchall()
meg = pd.DataFrame(result)
meg.columns = ['type_id', 'strategy']
# 数据清理
df = df.dropna()
df = df[df['strategy'] != u'']
# 合并对应表
df = pd.merge(df, meg)
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_' + str(i.month) for i in df['found_date']]
return df.drop('strategy', axis=1)
# 得到按年份分类统计,输出 dataframe
def get_ann_fund(df):
temp = df.groupby(['type_id', 'year'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index='year', columns=['type_id'])
temp['Type_0'] = df.groupby(['year'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_fund(df):
temp = df.groupby(['type_id', 'month'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index=['month'], columns=['type_id'])
temp['Type_0'] = df.groupby(['month'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 准备数据原始dataframe
def get_org_count(cnx):
cursor = cnx.cursor()
sql = "SELECT org_id, found_date FROM PrivateEquityFund.org_info WHERE org_category LIKE '4%'"
cursor.execute(sql)
result = cursor.fetchall()
df = | pd.DataFrame(result) | pandas.DataFrame |
import pytest
from matplotcheck.base import PlotTester
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy import stats
"""Fixtures"""
@pytest.fixture
def pd_df_reg_data():
"""Create a pandas dataframe with points that are roughly along the same
line."""
data = {
"A": [1.2, 1.9, 3.0, 4.1, 4.6, 6.0, 6.9, 8.4, 9.0],
"B": [2.4, 3.9, 6.1, 7.8, 9.0, 11.5, 15.0, 16.2, 18.6],
}
return pd.DataFrame(data)
@pytest.fixture
def pd_df_reg_one2one_data():
"""Create a pandas dataframe with points that are along a one to one
line."""
data = {
"A": [0.0, 2.0],
"B": [0.0, 2.0],
}
return | pd.DataFrame(data) | pandas.DataFrame |
import datetime
import logging
import pandas as pd
from django.core.exceptions import ValidationError
from django.db import transaction
from reversion import revisions as reversion
from xlrd import XLRDError
from app.productdb.models import Product, CURRENCY_CHOICES, ProductGroup, ProductMigrationSource, ProductMigrationOption
from app.productdb.models import Vendor
logger = logging.getLogger("productdb")
class InvalidExcelFileFormat(Exception):
"""Exception thrown if there is an issue with the low level file format"""
pass
class InvalidImportFormatException(Exception):
"""Exception thrown if the format of the Excel file for the import is invalid"""
pass
class BaseExcelImporter:
"""
Base class for the Excel Import
"""
sheetname = "products"
required_keys = {"product id", "description", "list price", "vendor"}
import_converter = None
drop_na_columns = None
workbook = None
path = None
valid_file = False
user_for_revision = None
__wb_data_frame__ = None
import_result_messages = None
def __init__(self, path_to_excel_file=None, user_for_revision=None):
self.path_to_excel_file = path_to_excel_file
if self.import_result_messages is None:
self.import_result_messages = []
if self.import_converter is None:
self.import_converter = {}
if self.drop_na_columns is None:
self.drop_na_columns = []
if user_for_revision:
self.user_for_revision = user_for_revision
def _load_workbook(self):
try:
self.workbook = pd.ExcelFile(self.path_to_excel_file)
except XLRDError as ex:
logger.error("invalid format of excel file '%s' (%s)" % (self.path_to_excel_file, ex), exc_info=True)
raise InvalidExcelFileFormat("invalid file format") from ex
except Exception:
logger.fatal("unable to read workbook at '%s'" % self.path_to_excel_file, exc_info=True)
raise
def _create_data_frame(self):
self.__wb_data_frame__ = self.workbook.parse(
self.sheetname, converters=self.import_converter
)
# normalize the column names (all lowercase, strip whitespace if any)
self.__wb_data_frame__.columns = [x.lower() for x in self.__wb_data_frame__.columns]
self.__wb_data_frame__.columns = [x.strip() for x in self.__wb_data_frame__.columns]
# drop NA columns if defined
if len(self.drop_na_columns) != 0:
self.__wb_data_frame__.dropna(axis=0, subset=self.drop_na_columns, inplace=True)
def verify_file(self):
if self.workbook is None:
self._load_workbook()
self.valid_file = False
sheets = self.workbook.sheet_names
# verify worksheet that is required
if self.sheetname not in sheets:
raise InvalidImportFormatException("sheet '%s' not found" % self.sheetname)
# verify keys in file
dframe = self.workbook.parse(self.sheetname)
keys = [x.lower() for x in set(dframe.keys())]
if len(self.required_keys.intersection(keys)) != len(self.required_keys):
req_key_str = ", ".join(sorted(self.required_keys))
raise InvalidImportFormatException("not all required keys are found in the Excel file, required keys "
"are: %s" % req_key_str)
self.valid_file = True
def is_valid_file(self):
return self.valid_file
@staticmethod
def _import_datetime_column_from_file(row_key, row, target_key, product):
"""
helper method to import an optional columns from the excel file
"""
changed = False
faulty_entry = False
msg = ""
try:
if row_key in row:
if not pd.isnull(row[row_key]):
currval = getattr(product, target_key)
if (type(row[row_key]) is pd.tslib.Timestamp) or (type(row[row_key]) is datetime.datetime):
newval = row[row_key].date()
else:
newval = None
if currval != newval:
setattr(product, target_key, row[row_key].date())
changed = True
except Exception as ex: # catch any exception
faulty_entry = True
msg = "cannot set %s for <code>%s</code> (%s)" % (row_key, row["product id"], ex)
return changed, faulty_entry, msg
def import_to_database(self, status_callback=None, update_only=False):
"""
Base method that is triggered for the update
"""
pass
class ProductsExcelImporter(BaseExcelImporter):
"""
Excel Importer class for Products
"""
sheetname = "products"
required_keys = {"product id", "description", "list price", "vendor"}
import_converter = {
"product id": str,
"description": str,
"list price": str,
"currency": str,
"vendor": str
}
drop_na_columns = ["product id"]
valid_imported_products = 0
invalid_products = 0
@property
def amount_of_products(self):
return len(self.__wb_data_frame__) if self.__wb_data_frame__ is not None else -1
def import_to_database(self, status_callback=None, update_only=False):
"""
Import products from the associated excel sheet to the database
:param status_callback: optional status message callback function
:param update_only: don't create new entries
"""
if self.workbook is None:
self._load_workbook()
if self.__wb_data_frame__ is None:
self._create_data_frame()
self.valid_imported_products = 0
self.invalid_products = 0
self.import_result_messages.clear()
amount_of_entries = len(self.__wb_data_frame__.index)
# process entries in file
current_entry = 1
for index, row in self.__wb_data_frame__.iterrows():
# update status message if defined
if status_callback and (current_entry % 100 == 0):
status_callback("Process entry <strong>%s</strong> of "
"<strong>%s</strong>..." % (current_entry, amount_of_entries))
faulty_entry = False # indicates an invalid entry
created = False # indicates that the product was created
skip = False # skip the current entry (used in update_only mode)
msg = "import successful" # message to describe the result of the product import
if update_only:
try:
p = Product.objects.get(product_id=row["product id"])
except Product.DoesNotExist:
# element doesn't exist
skip = True
except Exception as ex: # catch any exception
logger.warn("unexpected exception occurred during the lookup "
"of product %s (%s)" % (row["product id"], ex))
else:
p, created = Product.objects.get_or_create(product_id=row["product id"])
changed = created
if not skip:
# apply changes (only if a value is set, otherwise ignore it)
row_key = "description"
try:
# set the description value
if not pd.isnull(row[row_key]):
if p.description != row[row_key]:
p.description = row[row_key]
changed = True
# determine the list price and currency from the excel file
row_key = "list price"
new_currency = "USD" # default in model
if not pd.isnull(row[row_key]):
if type(row[row_key]) == float:
new_price = row[row_key]
elif type(row[row_key]) == int:
new_price = float(row[row_key])
elif type(row[row_key]) == str:
price = row[row_key].split(" ")
if len(price) == 1:
# only a number
new_price = float(row[row_key])
elif len(price) == 2:
# contains a number and a currency
try:
new_price = float(price[0])
except:
raise Exception("cannot convert price information to float")
# check valid currency value
valid_currency = True if price[1].upper() in dict(CURRENCY_CHOICES).keys() else False
if valid_currency:
new_currency = price[1].upper()
else:
raise Exception("cannot set currency unknown value %s" % price[1].upper())
else:
raise Exception("invalid format for list price, detected multiple spaces")
else:
logger.debug("list price data type for %s identified as %s" % (
row["product id"],
str(type(row[row_key]))
))
raise Exception("invalid data-type for list price")
else:
new_price = None
row_key = "currency"
if row_key in row:
if not pd.isnull(row[row_key]):
# check valid currency value
valid_currency = True if row[row_key].upper() in dict(CURRENCY_CHOICES).keys() else False
if valid_currency:
new_currency = row[row_key].upper()
else:
raise Exception("cannot set currency unknown value %s" % row[row_key].upper())
# apply the new list price and currency if required
if new_price is not None:
if p.list_price != new_price:
p.list_price = new_price
changed = True
if p.currency != new_currency:
p.currency = new_currency
changed = True
# set vendor to unassigned (ID 0) if no Vendor is provided and the product was created
row_key = "vendor"
if | pd.isnull(row[row_key]) | pandas.isnull |
"""
GUI code modified based on https://github.com/miili/StreamPick
For earthquake PKiKP coda quality evaluation and stack
"""
import os
import pickle
import pandas as pd
import numpy as np
# GUI import
import PyQt5
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sys
import signal
import scipy
import gpar
from gpar.util import util
from itertools import cycle
#figure plot import
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.transforms import offset_copy
from matplotlib.widgets import RectangleSelector
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
# from mpl_toolkits.axesgrid1 import make_axes_locatable
#obspy import
from obspy.taup import TauPyModel
import obspy
from obspy.core.trace import Trace
from obspy.core.stream import Stream
from obspy.core import read
from obspy.core import AttribDict
signal.signal(signal.SIGINT, signal.SIG_DFL)
# color = list(mcolors.cnames.values())
color = ['red', 'blue', 'green','yellow','cyan','magenta','purple']
#class for event first evaluation
class glanceEQ(QtWidgets.QMainWindow):
def __init__(self, array=None, parent=None, ap=None):
if ap is None:
self.qApp = QtWidgets.QApplication(sys.argv)
else:
self.qApp = ap
self.KeepGoing = False
if isinstance(array, str):
ar = util.loadArray(array)
elif isinstance(array, gpar.arrayProcess.Array):
ar = array
else:
msg = 'Define Array instance = gpar.arrayPropocess.Array() or a path to a pickle file'
raise ValueError(msg)
self.array = ar
self.eve_type = ['A','B','C','D']
self._shortcuts = {'eve_next': 'n',
'eve_prev': 'p',
'trim_apply': 'w',
'gain_up': 'u',
'gain_down': 'd',
'strip': 's',
'A':'a',
'B':'b',
'C':'c',
'D':'d'}
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = None
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,
'smooth':4.0,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QtWidgets.QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.show()
def _killLayout():
pass
def _initEqList(self):
self._eqlist = []
for _eve in self._events:
self._eqlist.append(_eve.ID)
self._eqlist.sort()
def _initPlots(self):
self.fig = Figure(facecolor='.86',dpi=100, frameon=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
# connect the events
self.fig.canvas.mpl_connect('scroll_event', self._pltOnScroll)
self.fig.canvas.mpl_connect('motion_notify_event', self._pltOnDrag)
self.fig.canvas.mpl_connect('button_release_event', self._pltOnButtonRelease)
def _initMenu(self):
# Next and Prev Earthquake
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['eve_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextEvent)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QPushButton('Prev >>',
shortcut=self._shortcuts['eve_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevEvent)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Earthquake drop-down
self.evecb = QComboBox(self)
for eve in self._eqlist:
self.evecb.addItem(eve)
self.evecb.activated.connect(self._pltEvent)
self.evecb.setMaximumWidth(1000)
self.evecb.setMinimumWidth(80)
# coda strip button
self.codabtn = QtWidgets.QPushButton('Strip',
shortcut=self._shortcuts['strip'],parent=self.main_widget)
self.codabtn.setToolTip('shortcut <b>s</b>')
self.codabtn.clicked.connect(self._appStrip)
self.codacb = QComboBox(self)
for med in ['all', 'coda','twoline']:
self.codacb.addItem(med)
self.codacb.activated.connect(self._selectMethod)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.wincb = QComboBox(self)
self.wincb.activated.connect(self._changeStrip)
self._updateWindow()
# edit/delete coda selected window
winEdit = QtWidgets.QPushButton('Coda Window')
winEdit.resize(winEdit.sizeHint())
winEdit.clicked.connect(self._editTimeWindow)
winDelt = QtWidgets.QPushButton('Delete')
winDelt.resize(winDelt.sizeHint())
winDelt.clicked.connect(self._deleteWin)
# Coda level
_radbtn = []
for _o in self.eve_type:
_radbtn.append(QRadioButton(_o.upper(), shortcut=self._shortcuts[_o.upper()]))
_radbtn[-1].setToolTip('Level: '+_o)
self.levelGrp = QButtonGroup()
self.levelGrp.setExclusive(True)
levelbtn = QHBoxLayout()
for _i, _btn in enumerate(_radbtn):
self.levelGrp.addButton(_btn, _i)
levelbtn.addWidget(_btn)
# plot slide beam figure button
self.sbcb = QComboBox(self)
for btype in ['beam', 'slide', 'vespetrum','strip']:
self.sbcb.addItem(btype)
self.sbcb.activated.connect(self._updatePlot)
self.vepcb = QComboBox(self)
for scale in ['log10', 'log','sqrt','beam']:
self.vepcb.addItem(scale)
self.vepcb.activated.connect(self._updatePlot )
self.vepcb.setEnabled(False)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.ampmin = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=1)
self.ampmax = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=3)
self.ampmin.valueChanged.connect(self._updatePlot)
self.ampmax.valueChanged.connect(self._updatePlot)
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
# self._initAmp()
self.sbcb.activated.connect(self._activeAmp)
self.ttbtn = QtWidgets.QPushButton('Phases', parent=self.main_widget)
self.ttbtn.setCheckable(True)
self.ttbtn.clicked.connect(self._updatePlot)
# Arrange buttons
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Event'))
self.btnbar.addWidget(self.evecb)
##
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.codabtn)
self.btnbar.addWidget(self.codacb)
self.btnbar.addWidget(self.wincb)
self.btnbar.addWidget(winEdit)
self.btnbar.addWidget(winDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Level: '))
self.btnbar2.addLayout(levelbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('TYPE'))
self.btnbar2.addWidget(self.sbcb)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Scale'))
self.btnbar2.addWidget(self.vepcb)
self.btnbar2.addWidget(QLabel('AMP'))
self.btnbar2.addWidget(self.ampmin)
self.btnbar2.addWidget(self.ampmax)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ttbtn)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save', self._saveFile)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save as', self._saveFileFormat)
fileMenu.addSeparator()
fileMenu.addAction(QIcon().fromTheme('document-open'),
'Load array', self._openArray)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-open'),
'Load Strip Pickle File', self._openFile)
fileMenu.addSeparator()
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Plot', self._savePlot)
fileMenu.addSeparator()
quit = QAction(QIcon().fromTheme('application-exit')," &Exit", self)
fileMenu.addAction(quit)
fileMenu.triggered[QAction].connect(self.closeArray)
def _hardExist(self):
self.deleteLater()
def _activeAmp(self):
if self.sbcb.currentText() == 'vespetrum':
self.ampmin.setEnabled(True)
self.ampmax.setEnabled(True)
self.vepcb.setEnabled(True)
if self.vepcb.currentText() == 'beam':
self.ampmax.setMaximum(100000)
# self.ampmax.setValue(1000)
self.ampmax.setSingleStep(500)
# self.ampmin.setValue(10)
self.ampmin.setMaximum(100000)
self.ampmin.setSingleStep(500)
elif self.vepcb.currentText() == 'sqrt':
self.ampmax.setMaximum(300)
# self.ampmax.setValue(30)
self.ampmax.setSingleStep(5)
# self.ampmin.setValue(3)
self.ampmin.setMaximum(300)
self.ampmin.setSingleStep(5)
elif self.vepcb.currentText() == 'log':
self.ampmax.setMaximum(12)
# # self.ampmax.setValue(7)
self.ampmax.setSingleStep(1)
# # self.ampmin.setValue(2)
self.ampmin.setMaximum(12)
self.ampmin.setSingleStep(1)
elif self.vepcb.currentText() == 'log10':
self.ampmax.setSingleStep(0.5)
self.ampmin.setSingleStep(0.5)
self.ampmax.setMaximum(5)
self.ampmin.setMaximum(5)
else:
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
self.vepcb.setEnabled(False)
def _createStatusBar(self):
"""
Creates the status bar
"""
sb =QStatusBar()
sb.setFixedHeight(18)
self.setStatusBar(sb)
self.statusBar().showMessage('Ready')
def _selectMethod(self, index):
self._method = self.codacb.currentText()
self.sbcb.setCurrentIndex(3)
self._updatePlot()
def _changeStrip(self,index):
if index == len(self.trinWin):
return self._newTrim()
else:
return self._appStrip()
def _newTrim(self):
"""
Creat new strip window
"""
newWin = self.defWindow(self)
if newWin.exec_():
self.trinWin.append(newWin.getValues())
self._updateWindow()
self.wincb.setCurrentIndex(len(self.trinWin)-1)
self._appStrip()
def _editTimeWindow(self):
"""
Edit existing coda selection window
"""
_i = self.wincb.currentIndex()
this_window = self.trinWin[_i]
editWindow = self.defWindow(self, this_window)
if editWindow.exec_():
self.trinWin[_i] = editWindow.getValues()
self._updateWindow()
self.wincb.setCurrentIndex(_i)
self._appStrip()
def _deleteWin(self):
"""
Delete window
"""
pass
_i = self.wincb.currentIndex()
def _updateWindow(self):
self.wincb.clear()
self.wincb.setCurrentIndex(-1)
for _i, _f in enumerate(self.trinWin):
self.wincb.addItem('Noise %.2f sec - Coda %.2f sec' %(_f['noise'], _f['coda']))
self.wincb.addItem('Create new Window')
def _appStrip(self, button=True, draw=True):
"""
Apply coda strip
"""
_method = self.codacb.currentText()
_j = self.wincb.currentIndex()
self._eventInfo(self._current_id)
self._current_strip = True
spts = int(self.trinWin[_j]['smooth'] / self._current_delta )
codaStrip(self._current_event, method=_method, window=spts,
siglen=self.trinWin[_j]['coda'], noise=self.trinWin[_j]['noise'],beamphase=self.beamphase,
model=self.trinWin[_j]['model'], stime=self.trinWin[_j]['stime'], etime=self.trinWin[_j]['etime'],)
self._btype = 'strip'
self.sbcb.setCurrentIndex(3)
self._setCodaStrip()
self._updatePlot()
def _pltEvent(self):
"""
Plot event from DropDown Menu
"""
_i = self.evecb.currentIndex()
while next(self._eventCycle) != self._eqlist[_i]:
pass
self._eventInfo(self._eqlist[_i])
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip=True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
self._drawFig()
def _pltPrevEvent(self):
"""
Plot previous events
"""
_j = self.evecb.currentIndex()
for _i in range(len(self._eqlist) - 1):
prevEvent = next(self._eventCycle)
self._eventInfo(prevEvent)
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
if _j == 0:
_n = len(self.evecb) - 1
self.evecb.setCurrentIndex(_n)
else:
self.evecb.setCurrentIndex(_j-1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _pltNextEvent(self):
_id = self._current_event.ID
level = self.eve_type[self.levelGrp.checkedId()]
if level == 'D':
self._current_strip = True
self._setCodaStrip()
else:
# if len(self._stripDF) != 0:
# existDF = self._stripDF[(self._stripDF.ID == _id)]
# else:
# existDF = pd.DataFrame()
# if len(existDF) == 0:
if not self._current_strip:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice is QMessageBox.Yes:
self._current_strip = True
self._appStrip()
return
self._eventInfo(next(self._eventCycle))
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
_i = self.evecb.currentIndex()
if _i == len(self.evecb) - 1:
self.evecb.setCurrentIndex(0)
else:
self.evecb.setCurrentIndex(_i+1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _eventInfo(self, eqid):
"""
Copies the array process result from the current Earthquake object
"""
for eve in self._events:
if eve.ID == eqid:
event = eve
self._current_event = event
self.beamphase = event.beamphase
self._current_id = eqid
if not hasattr(event, 'beam'):
return
self._current_beam = event.beam
filts = {}
for tr in self._current_beam:
filts[tr.stats.station] = tr.stats.channel
self._current_filts = filts
self._current_ID = event.ID
self._current_dis = event.dis
self._current_p = event.rayp
self._current_bb = event.bb
self._current_bakAz = event.baz
self._current_delta = event.delta
if hasattr(event, 'slideSt'):
self._current_slide = event.slideSt
if hasattr(event, 'energy'):
self._current_energy = event.energy
self._current_time = event.slantTime
self._current_K = event.slantK
self._current_type = event.slantType
def _setCodaStrip(self):
if not self._current_strip:
return
event = self._current_event
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) !=0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = | pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B']) | pandas.DataFrame |
#! /usr/bin/env python
# <NAME>
# February 22, 2016
# Vanderbilt University
"""
Tools for converting pandas DataFrames to .hdf5 files, and converting from
one type of hdf5 file to `pandas_hdf5` file format
"""
from __future__ import print_function, division, absolute_import
__author__ =['<NAME>']
__copyright__ =["Copyright 2017 <NAME>, geometry"]
__email__ =['<EMAIL>']
__maintainer__ =['<NAME>']
__all__ =["read_pandas_hdf5","read_hdf5_file_to_pandas_DF",\
"pandas_file_to_hdf5_file","hdf5_file_to_pandas_file",\
"pandas_df_to_hdf5_file","concadenate_pd_df"]
import numpy as num
import pandas as pd
import h5py
from . import file_dir_check as fd
import os
def read_pandas_hdf5(hdf5_file, key=None, ret=False):
"""
Reads a `.hdf5` file that contains one or many datasets, and converts into
a pandas DataFrame. It assumes that the file is a PyTable
Parameters
----------
hdf5_file: string
Path to `.hdf5` file containing one or many pandas DataFrame(s).
key: string
If provided, it will extract `key` as a pandas DataFrame
ret: boolean, (default=False)
Option to return key of the file.
Returns
-------
pd_dataframe: pandas DataFrame object
DataFrame from `hdf5_file` with under the `key` directory.
"""
Program_Message = fd.Program_Msg(__file__)
fd.File_Exists(hdf5_file)
# Checking number of keys
hdf5_file_obj = | pd.HDFStore(hdf5_file) | pandas.HDFStore |
import unittest
import pandas as pd
import numpy as np
from tickcounter.questionnaire import Encoder
from pandas.testing import assert_frame_equal, assert_series_equal
class TestEncoder(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestEncoder, cls).setUpClass()
cls.original = pd.read_csv("test/test_data/time_management/data.csv")
cls.descrip = pd.read_csv("test/test_data/time_management/descrip.csv")
cls.scale_a = {"Strong Agree": 5, "Agree": 4, "Neither": 3, "Disagree": 2, "Strong Disagree": 1}
cls.scale_b = {"Strong Agree": 1, "Agree": 2, "Neither": 3, "Disagree": 4, "Strong Disagree": 5}
cls.question_col = [str(i) for i in range(6, 18)]
cls.encoder_1 = Encoder({
"Strong Agree": 5,
"Agree": 4,
"Neither": 3,
"Disagree": 2,
"Strong Disagree": 1
}, neutral=3, default=3, name="Agreeness", dtype=int)
cls.encoder_2 = Encoder(template=TestEncoder.encoder_1, invert=True, dtype=int)
cls.encoder_3 = Encoder(encoding=TestEncoder.encoder_1.encoding, neutral=3, dtype=int)
def setUp(self):
self.df = TestEncoder.original.copy()
def test_transform_default_df(self):
result = TestEncoder.encoder_1.transform(self.df)
# The original df should not be mutated.
assert_frame_equal(TestEncoder.original, self.df)
expected = self.df.copy()
expected[TestEncoder.question_col] = self.df[TestEncoder.question_col].replace(TestEncoder.scale_a)
expected[TestEncoder.question_col] = expected[TestEncoder.question_col].fillna(3)
assert_frame_equal(result, expected, check_dtype=False)
def test_transform_default_ss(self):
result = TestEncoder.encoder_2.transform(self.df['6'])
assert_series_equal(TestEncoder.original['6'], self.df['6'])
expected = self.df['6'].replace(TestEncoder.scale_b)
expected.fillna(3, inplace=True)
assert_series_equal(result, expected, check_dtype=False)
def test_transform_columns(self):
result = TestEncoder.encoder_3.transform(self.df, columns=['6', '7', '8'])
assert_frame_equal(TestEncoder.original, self.df)
expected = self.df.copy()
expected[['6', '7', '8']] = self.df[['6', '7', '8']].replace(TestEncoder.scale_a)
assert_frame_equal(result, expected, check_dtype=False)
def test_transform_ignore_list(self):
result = TestEncoder.encoder_1.transform(self.df, ignore_list=['6', '7', '8'])
assert_frame_equal(TestEncoder.original, self.df)
expected = self.df.copy()
expected[TestEncoder.question_col] = self.df[TestEncoder.question_col].replace(TestEncoder.scale_a)
expected[TestEncoder.question_col] = expected[TestEncoder.question_col].fillna(3)
expected[['6', '7', '8']] = self.df[['6', '7', '8']]
assert_frame_equal(result, expected, check_dtype=False)
def test_transform_return_rule(self):
result, rule = TestEncoder.encoder_1.transform(self.df, return_rule=True)
assert_frame_equal(TestEncoder.original, self.df)
expected = pd.Series(index=self.df.columns, dtype='str')
expected[TestEncoder.question_col] = 'Agreeness'
assert_series_equal(rule, expected, check_dtype=False)
def test_transform_mode(self):
df_new = self.df.copy()
df_new[['7', '8', '9']] = df_new[['7', '8', '9']].replace("Strong Agree", "Agree")
df_before = df_new.copy()
result_1, rule_1 = TestEncoder.encoder_3.transform(df_new, mode='any', return_rule=True)
result_2, rule_2 = TestEncoder.encoder_3.transform(df_new, mode='strict', return_rule=True)
assert_frame_equal(df_before, df_new)
expected_1 = df_new.copy()
expected_1[TestEncoder.question_col] = df_new[TestEncoder.question_col].replace(TestEncoder.scale_a)
assert_frame_equal(result_1, expected_1, check_dtype=False)
expected_2 = expected_1.copy()
expected_2[["7", "8", "9"]] = df_new[["7", "8", "9"]]
| assert_frame_equal(result_2, expected_2, check_dtype=False) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
# Created on Feb-24-19 22:06
# pca.py
# @author: <NAME>
'''
import pandas as pd
def pca(df, num, **kw):
max_value = df.max()[0]
min_value = df.min()[0]
window_size = int(kw['algorithm_param']['PCA']['window_size'])
dl_out = []
loc = 0
while loc < len(df):
x = [i for i in df[0][loc:loc + window_size]]
window_max = max(x)
window_min = min(x)
if window_max - window_min < (max_value - min_value) * 0.1 * 2:
x = [(window_max + window_min) / 2] * window_size
else:
pass
new_data = | pd.DataFrame(x) | pandas.DataFrame |
import pandas as pd
ser = pd.Series(["NTU", "NCKU", "NCU", "NYCU"])
# Using drop() function with list method.
ser = ser.drop(3)
print(ser)
# Using drop() with argument index.
ser = | pd.Series(["NTU", "NCKU", "NCU", "NYCU"], index=["Bes", "Dec", "Thr", "Flo"]) | pandas.Series |
"""
@brief test log(time=400s)
"""
import os
import unittest
from logging import getLogger
from pandas import DataFrame
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import (
get_temp_folder, ExtTestCase, skipif_appveyor)
from sklearn.ensemble import AdaBoostRegressor
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets, summary_report
from mlprodict.tools.asv_options_helper import get_opset_number_from_onnx
ignored_warnings = (UserWarning, ConvergenceWarning,
RuntimeWarning, FutureWarning)
class TestOnnxrtValidateOnnxRuntime(ExtTestCase):
@skipif_appveyor('crashes')
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_KMeans(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
op = get_opset_number_from_onnx()
rows = list(enumerate_validated_operator_opsets(
verbose, models={"KMeans"},
fLOG=myprint,
runtime='onnxruntime2', debug=True,
filter_exp=lambda m, p: '-64' not in p,
opset_min=op, opset_max=op))
self.assertGreater(len(rows), 1)
# self.assertGreater(len(buffer), 1)
@skipif_appveyor('crashes')
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_BernoulliNB(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
debug = False
rows = list(enumerate_validated_operator_opsets(
verbose, models={"BernoulliNB"},
fLOG=myprint,
runtime='onnxruntime2', debug=debug))
self.assertGreater(len(rows), 1)
self.assertGreater(len(buffer), 1 if debug else 0)
@skipif_appveyor('crashes')
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_AdaBoostRegressor(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
debug = False
rows = list(enumerate_validated_operator_opsets(
verbose, models={"AdaBoostRegressor"},
fLOG=myprint,
runtime='onnxruntime2', debug=debug,
filter_exp=lambda m, p: "-64" not in p))
self.assertGreater(len(rows), 1)
self.assertGreater(len(buffer), 1 if debug else 0)
@skipif_appveyor('crashes')
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_LogisticRegression(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"LogisticRegression"},
fLOG=myprint,
runtime='onnxruntime2', debug=False,
filter_exp=lambda m, p: '-64' not in p))
self.assertGreater(len(rows), 1)
# self.assertGreater(len(buffer), 1)
@skipif_appveyor('crashes')
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_all_onnxruntime(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
temp = get_temp_folder(
__file__, "temp_validate_sklearn_operators_all_onnxruntime2")
if False: # pylint: disable=W0125
rows = list(enumerate_validated_operator_opsets(
verbose, models={"PLSCanonical"},
fLOG=fLOG,
runtime='onnxruntime2', debug=True))
else:
rows = []
for row in enumerate_validated_operator_opsets(
verbose, debug=None, fLOG=fLOG, runtime='onnxruntime2',
benchmark=False, dump_folder=temp,
filter_exp=lambda m, s: m not in {AdaBoostRegressor,
GaussianProcessClassifier}):
rows.append(row)
if len(rows) > 30:
break
self.assertGreater(len(rows), 1)
df = | DataFrame(rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = | pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0) | pandas.read_excel |
import numpy as np
import pandas as pd
import random
c_u = 100
m_u = 1
c_p = 0.1
m_p = 0.01
censor_mean = 103
censor_sig = 0
possible_xes = [0,1,2,3,4,5,6,7]
nrows = 100000
x = np.random.choice(possible_xes, nrows)
u = c_u + m_u*x
p = c_p + m_p*x
true_y = np.random.normal(u,u*p,nrows)
censor_y = np.random.normal(censor_mean, censor_sig, nrows)
dfDict = {'x':x, 'true_y':true_y, 'censor_y': censor_y}
df = | pd.DataFrame(dfDict) | pandas.DataFrame |
from flask import Flask, redirect, request, url_for,render_template
from application import app, db
from application.models import Products,Orders,Customers #,SummaryOrder,OrdersSummary,ItemTable,OrdersTable,,CustomersTable
import sqlalchemy as sql
import pandas as pd
from datetime import datetime
@app.route('/')
def home():
return render_template('home.html',title='home')
# create customers
@app.route('/customers/add', methods=['GET','POST'])
def add_customer():
return ('<h1>Add New Customer</h1><br>' + render_template('customerform.html',title='add_customer')
+('<br><br> <a href="/customers" type="button">Return to Customers home</a> </br>')
+ ('<br> <a href="/customers/update2" type="button">Update customer records</a> </br>')
+ ('<br> <a href="/" type="button">Return to home</a> </br>'))
@app.route('/customers/add/customer',methods=['GET','POST'])
def add_customers():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
if request.method=='POST':
if len(df.loc[(df.first_name == request.form['first_name']) & (df.last_name == request.form['last_name']) & ((df.customer_dob == request.form['customer_dob'])|(df.customer_address == request.form['customer_address']))]) == 0:
new_first_name = request.form['first_name']
new_last_name = request.form['last_name']
new_customer_address = request.form['customer_address']
new_customer_dob = request.form['customer_dob']
new_customer = Customers(first_name=new_first_name,last_name=new_last_name,customer_address=new_customer_address,customer_dob=new_customer_dob)#,prepaid_balance=new_prepaid_balance)
db.session.add(new_customer)
db.session.commit()
return redirect(url_for('read_customers'))
else:
return ("<h4><br>"+"It looks like " + str(request.form['first_name']) + " " + str(request.form['last_name'])+ " already exists in the system." + "</h4>" + '<a href="/customers/add" type="button">Try again?</a> </br>'
+ ('<br><br> <a href="/customers/update2" type="button">Update customer records</a> </br>')+('<br> <a href="/customers" type="button">Return to Customers home</a> </br>'))
# read customers
@app.route('/customers')
def read_customers():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df.rename(columns={'id':'Customer ID','first_name':'First Name','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df.to_html()
return ('<h1>Customers</h1><br>')+html+('<br> <a href="/customers/add" type="button">Add new customer</a> </br>')+('<br> <a href="/customers/update2" type="button">Edit customer records (Update/Delete)</a> </br>')+('<br><br><br> <a href="/products">Navigate to Products</a><br><br>')+('<a href="/orders">Navigate to Orders</a>')+('<br><br> <a href="/" type="button">Return to Home</a> </br>')
# update customers
@app.route('/customers/update2')
def customer_update_page():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df1 = df.copy()
df1['Update'] = 'update'
df1['Delete'] = 'delete'
for n in range(len(df1)):
df1.iloc[n,-1] = "<a href=/customers/delete/"+ str(df1.loc[n,'id']) + ">delete</a>"
df1.iloc[n,-2] = "<a href=/customers/update/"+ str(df1.loc[n,'id']) + ">update</a>"
df1.rename(columns={'id':'Customer ID','first_name':'First Name','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df1.to_html(render_links=True,escape=False)
return ('<h1>Update Customers</h1><br>')+ html + ('<br> <a href="/customers">Back to Customers</a> </br>') + ('<br> <a href="/products">Navigate to Products</a> </br>') + ('<br> <a href="/orders">Navigate to Orders</a> </br>')
@app.route('/customers/update', methods = ['GET','POST'])
def update_customer():
update_record = Customers.query.filter_by(id=request.form['entry']).first()
if request.method=='POST':
update_record = Customers.query.filter_by(id=request.form['entry']).first()
update_record.first_name = request.form['first_name']
update_record.last_name = request.form['last_name']
update_record.customer_address = request.form['customer_address']
update_record.customer_dob = request.form['customer_dob']
db.session.commit()
return redirect(url_for('read_customers'))
@app.route('/customers/update/<int:customer_record>',methods=['GET','POST'])
def customer_update1(customer_record):
people = str(customer_record)
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('customers', sql_engine)
df1 = df.loc[df.id==int(customer_record),:]
df1.rename(columns={'id':'Customer ID','first_name':'<NAME>','last_name':'Surname','customer_address':'Address','customer_dob':'Date of Birth'},inplace=True)
html = df1.to_html(escape=False)
record_no = customer_record
return ('<h1>Update Customers</h1><br>')+ html + "<br><br>" + render_template('customer_update.html',value=record_no) +('<br> <a href="/customers">Back to Customers</a> </br>')+('<br> <a href="/products">Navigate to Products</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')
# delete customers
@app.route('/customers/delete/<int:customer_>')
def delete_customers(customer_):
if Orders.query.filter_by(fk_customer_id=customer_).count() == 0:
customer_to_delete = Customers.query.filter_by(id=customer_).first()
db.session.delete(customer_to_delete)
db.session.commit()
return redirect(url_for('read_customers'))
else:
return "Oops! The customer you tried to delete has already placed an order. Please update the orders records if you need to remove this customer." +('<br> <a href="/customers">Return to Customers?</a> </br>')
# create products
@app.route('/products/add', methods=['GET','POST'])
def add_product():
if request.method == 'POST':
page = ''
return '<h1>Add New Product</h1><br>'+ render_template('stockform.html',title='add_item')+('<br><br> <a href="/products" type="button">Return to Products home</a> </br>')+ ('<br> <a href="/products/update2" type="button">Update product records</a> </br>')
@app.route('/products/add/item',methods=['GET','POST'])
def add_products():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
if request.method=='POST':
if len(df.loc[(df.product_name == request.form['product_name']) & (df.product_brand == request.form['brand'])]) == 0:
new_product_name = request.form['product_name']
new_product_brand = request.form['brand']
new_product_quantity = request.form['quantity']
new_product_itemcost = request.form['itemcost']
new_product_price = request.form['price']
new_product = Products(product_name=new_product_name,product_brand=new_product_brand,quantity_in_stock=new_product_quantity,cost_per_item=new_product_itemcost,price=new_product_price)
db.session.add(new_product)
db.session.commit()
return redirect(url_for('read_products'))
else:
return ("<h4><br>"+"It looks like " + str(request.form['brand']) + " " + str(request.form['product_name'])+ " already exists in the system." + "</h4>" + '<a href="/products/add" type="button">Try again?</a> </br>'
+ ('<br><br> <a href="/products/update2" type="button">Update products records</a> </br>')+('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>'))
# read products
@app.route('/products')
def read_products():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df.price = ('£'+df.price.astype('str')).str.ljust(5,'0')
df.cost_per_item = ('£'+df.cost_per_item.astype('str')).str.ljust(5,'0')
df.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df.to_html()
return ('<h1>Products</h1><br>')+html+('<br> <a href="/products/add">Add new item to stocklist</a> </br>')+('<br> <a href="/products/update2">Edit stocklist (Update/Delete)</a> </br><br>')+('<br><br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <a href="/customers">Navigate to Customers</a> </br>') +('<br><br> <a href="/" type="button">Return to Home</a> </br>')
# update products
@app.route('/products/update2')
def products_update_page():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df1 = df.copy()
df1['Update'] = 'update'
df1['Delete'] = 'delete'
for n in range(len(df1)):
df1.iloc[n,-1] = "<a href=/products/delete/"+ str(df1.loc[n,'id']) + ">delete</a>"
df1.iloc[n,-2] = "<a href=/products/update/"+ str(df1.loc[n,'id']) + ">update</a>"
df1.price = ('£' + df1.price.astype('str')).str.ljust(5,'0')
df1.cost_per_item = ('£' + df1.cost_per_item.astype('str')).str.ljust(5,'0')
df1.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df1.to_html(render_links=True,escape=False)
return ('<h1>Update Product List</h1><br>')+ html +('<br> <a href="/products">Back to Products home</a> </br>')+('<br> <br><a href="/customers">Navigate to Customers</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
@app.route('/products/update', methods = ['GET','POST'])
def update_product():
if request.method=='POST':
update_record = Products.query.filter_by(id=request.form['entry']).first()
update_record.product_name = request.form['product_name']
update_record.product_brand = request.form['product_brand']
update_record.price = request.form['price']
update_record.quantity_in_stock = request.form['quantity_in_stock']
update_record.cost_per_item = request.form['cost_per_item']
db.session.commit()
return redirect(url_for('products_update_page'))
@app.route('/products/update/<int:product_record>',methods=['GET','POST'])
def product_update1(product_record):
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df1 = df.loc[df.id==int(product_record),:]
df1.rename(columns={'id':'Product ID','product_name':'Product','product_brand':'Brand','quantity_in_stock':'Quantity in stock','cost_per_item':'Individual Cost','price':'Price'},inplace=True)
html = df1.to_html(escape=False)
record_no = product_record
return ('<h1>Update Products List</h1><br>')+html + "<br><br>" + render_template('product_update.html', value1 = record_no) + ('<br> <a href="/products">Back to Products</a> </br>')+('<br> <a href="/customers">Navigate to Customers</a> </br>')+('<br> <a href="/orders">Navigate to Orders</a> </br>')+('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
# delete products
@app.route('/products/delete/<int:product_>',methods=['GET','POST'])
def delete_products(product_):
if Orders.query.filter_by(fk_product_id=product_).count() == 0:
product_to_delete = Products.query.filter_by(id=product_).first()
db.session.delete(product_to_delete)
db.session.commit()
return redirect(url_for('read_products'))
else: return "Oops! You tried to delete a product that has already been purchased"+('<br> <br><a href="/products/update2">Try Again?</a> </br>')+('<br> <br><br><a href="/products">Return to Products</a> </br>') +('<br> <a href="/products" type="button">Return to Products home</a> </br>')+('<br> <br><a href="/" type="button">Return to Home</a> </br>')
# create orders
@app.route('/orders/add', methods = ['GET','POST'])
def add_order():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('products', sql_engine)
df.price = ('£' + df.price.astype('str')).str.ljust(5,'0')
df.rename(columns={'id':'Product ID'})
df2 = pd.read_sql_table('customers', sql_engine,parse_dates='customer_dob')
df['_______________________'] = ''
df_join = pd.concat([df,df2],axis=1).fillna('.')
date = datetime.today().strftime('%Y-%m-%d')
df_join['Age'] = (datetime.today() - df_join.customer_dob).astype('str').str.split(' ').str[0]
df_join.Age = (df_join.Age.astype('int')/365).astype('int')
df_join.drop(columns=['cost_per_item','customer_dob','customer_address'],inplace=True)
df_join.rename(columns={'id':'Customer ID','product_name':'Product','price':'Price','product_brand':'Brand','quantity_in_stock':'Quantity in stock','first_name':'<NAME>','last_name':'<NAME>'},inplace=True)
html = df_join.to_html(escape=False)
return '<h1>Add New Order</h1><br>' + render_template('orderform.html',title='add_order', value = date) + '<br><br>' + html +('<br> <a href="/products">Navigate to Products</a> </br>')+('<br> <a href="/customers">Navigate to Customers</a> </br>')
@app.route('/orders/add/order',methods=['GET','POST'])
def add_orders():
if request.method=='POST':
if int(request.form['quantity_ordered']) <= int(Products.query.filter_by(id = int(request.form['fk_product_id'])).first().quantity_in_stock):
new_purchase_date = request.form['date']
new_product_price = request.form['price']
new_cash_payment = request.form['cash_payment']
new_fk_customer_id = request.form['fk_customer_id']
new_fk_product_id = request.form['fk_product_id']
new_quantity_ordered = request.form['quantity_ordered']
if round(float(request.form['cash_payment']),2) == round(float(Products.query.filter_by(id = new_fk_product_id).first().price) * (float(request.form['quantity_ordered'])),2):
if round(float(new_product_price),2) == round(float(Products.query.filter_by(id = new_fk_product_id).first().price),2):
Products.query.filter_by(id = int(new_fk_product_id)).first().quantity_in_stock = int(Products.query.filter_by(id = int(new_fk_product_id)).first().quantity_in_stock) - int(new_quantity_ordered)
db.session.commit()
new_order = Orders(purchase_date=new_purchase_date,price=new_product_price,cash_payment=new_cash_payment,quantity_ordered=new_quantity_ordered,fk_customer_id=new_fk_customer_id,fk_product_id=new_fk_product_id)#,prepaid_payment=new_prepaid_payment
db.session.add(new_order)
db.session.commit()
return redirect(url_for('read_orders'))
else:
return str(round(float(Products.query.filter_by(id = new_fk_product_id).first().price),2))+ "Oops, that wasn't the right price"+('<br> <a href="/orders/add">Try again?</a> </br>')
else:
return ("<h3><br>Oops, that wasn't right. The total price should be " + "£"+str(round((float(Products.query.filter_by(id = new_fk_product_id).first().price)) * float(request.form['quantity_ordered']),2)).ljust(4,'0') +"</h3>"+
('<br> <a href="/orders/add">Try Again?</a> </br>') +
('<br> <a href="/orders/update2">Update Orders table</a> </br>') +
('<br> <br><a href="/orders">Return to Orders home</a> </br>')+
('<br> <a href="/">Return to Home</a> </br>'))
else:
return ("<h3><br>Sorry, there isn't enough of that product in stock</h3>" +
('<br> <a href="/orders/add">Try Again?</a> </br>') +
('<br> <a href="/orders/update2">Update Orders table</a> </br>') +
('<br> <br><a href="/orders">Return to Orders home</a> </br>')+
('<br> <a href="/">Return to Home</a> </br>'))
### read orders
@app.route('/orders')
def read_orders():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('orders', sql_engine)
df1 = pd.read_sql_table('customers', sql_engine)
df2 = pd.read_sql_table('products', sql_engine)
df_join = pd.merge(left=(pd.merge(df,df1,how='left',left_on='fk_customer_id',right_on='id')),right=df2,how='left',left_on='fk_product_id',right_on='id')[['purchase_date','first_name','last_name','product_name','product_brand','price_x','quantity_ordered','id_x']]
df_join.price_x = ('£'+df_join.price_x.astype('str')).str.ljust(5,'0')
df_join.rename(columns={'purchase_date':'Date','first_name':'First Name','last_name':'Surname','product_name':'Product','product_brand':'Brand','price_x':'Price','quantity_ordered':'Quantity','id_x':'Order ID'},inplace=True)
html = df_join.to_html()
return ('<h1>Orders</h1><br>')+ html + ('<br><a href="/orders/add">Add new order</a> </br><br>')+('<a href="/orders/update2">Edit order records (Update/Delete)</a> </br>')+('<br><br><br> <a href="/products">Navigate to Products</a> </br><br>')+(' <a href="/customers">Navigate to Customers</a> </br>')+('<br> <br><a href="/">Return to Home</a> </br>')
# update order
@app.route('/orders/update2')
def orders_update_page():
connect_string ="mysql+pymysql://root:[email protected]/Tuckshop"
sql_engine = sql.create_engine(connect_string)
df = pd.read_sql_table('orders', sql_engine)
df1 = | pd.read_sql_table('customers', sql_engine) | pandas.read_sql_table |
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
import pandas as pd
import numpy as np
import argparse
import requests
import tempfile
import logging
import sklearn
import os
logger = logging.getLogger('__name__')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
logger.info(f'Using Sklearn version: {sklearn.__version__}')
if __name__ == '__main__':
logger.info('Sklearn Preprocessing Job [Start]')
base_dir = '/opt/ml/processing'
df = pd.read_csv(f'{base_dir}/input/abalone.csv')
y = df.pop('rings')
cols = df.columns
logger.info(f'Columns = {cols}')
numeric_features = list(df.columns)
numeric_features.remove('sex')
numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['sex']
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocess = ColumnTransformer(transformers=[('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
X_pre = preprocess.fit_transform(df)
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
np.random.shuffle(X)
train, validation, test = np.split(X, [int(0.7 * len(X)), int(0.85 * len(X))])
pd.DataFrame(train).to_csv(f'{base_dir}/train/train.csv', header=False, index=False)
pd.DataFrame(validation).to_csv(f'{base_dir}/validation/validation.csv', header=False, index=False)
| pd.DataFrame(test) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" Simple multi-area model for Nordic electricity market
Created on Wed Jan 16 11:31:07 2019
@author: elisn
Notes:
1 - For conversion between dates (YYYYMMDD:HH) and weeks (YYYY:WW) weeks are counted as starting during the first hour
in a year and lasting 7 days, except for the last week which covers the remaining hours in the year. Thus all years
are assumed to have 52 weeks. This definition is not according to ISO calendar standard but is legacy from the
first version of the model, probably changing it would not significantly change the results. Also note that the
MAF inflow data used also does not follow ISO calendar standard for weeks but counts weeks as starting with Sundays.
2 - It is not known if the ENTSO-E reservoir data corresponds to the reservoir level at the beginning/end of the week.
This can decrease the accuracy of the model for short time periods but does not affect much when simulating a whole year
A request has been made to find the answer from ENTSO-E
3 - For the exchange GB-NL, February 20-27 2016, the flows and scheduled exchanges are outside the implicitly
allocated day ahead capacity, it's not known why
"""
### EXTERNAL LIBRARIES ###
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pathlib import Path
import datetime
import os
import pyomo.environ as pye
from contextlib import redirect_stdout
##############################################################
######### MODULES FROM POWER_DATABASES #######################
##############################################################
import maf_hydro_data
import maf_pecd_data
import entsoe_transparency_db as entsoe
from help_functions import compact_xaxis_ticks, \
week_to_range, str_to_date, intersection, duration_curve, interp_time, \
interpolate_weekly_values, time_to_bin, err_func, curtailment_statistics
### INTERNAL MODULES ###
from offer_curves import SupplyCurve
from model_definitions import MWtoGW, GWtoMW, cm_per_inch, std_fig_size, area_to_country, country_to_areas, entsoe_type_map, synchronous_areas, colors, \
nordpool_capacities, generators_def, solver_executables, solver_stats, bidz2maf_pecd, co2_price_ets, \
new_trans_cap, GWtoTW, TWtoGW, all_areas
from help_classes import EmptyObject, Error
from week_conversion import WeekDef
class Model:
""" Contains all data processing not related to specific solver api (gurobi/pyomo)
NAMING CONVENTIONS:
df_xxx - dataframe obtained from external database
TIME CONVENTIONS:
For average energy quantities, time stamp marks beginning of the (hourly) interval. This is consistent with
convention in databases, since the beginning of the hour has been used to time stamp hourly data.
starttime - beginning of first period in model
endtime - end of last period in model
timerange - all hours modelled (beginning of hour)
idx_time - index of timerange, used to create time set in optimization model
timerange_p1 - all hours including endtime hour
Note: the data used is retrieved for all hours in timerange plus one extra hour, to allow for interpolation of
the data to higher resolution
"""
def __init__(self,name='default',path='D:/NordicModel/Results',db_path='D:/Data',
data_path='D:/NordicModel/InputData'):
self.name = name
self.data_path = Path(data_path)
self.db_path = Path(db_path)
self.res_path = Path(path) / name
self.fig_path = self.res_path / 'Figures'
self.root_path = self.res_path # points to root directory of this model
self.res_path.mkdir(exist_ok=True,parents=True)
self.fig_path.mkdir(exist_ok=True,parents=True)
self.runs = [] # store results from multiple model runs
self.res_time = {} # store runtime info
def update_path(self,path='D:/NordicModel/Results/case'):
""" Update path where figures and results are stored, without changing root path """
self.res_path = Path(path)
self.fig_path = self.res_path / 'Figures'
self.res_path.mkdir(exist_ok=True)
self.fig_path.mkdir(exist_ok=True)
def default_options(self):
""" Set default options for model """
############# BASIC OPTIONS ##############
self.opt_solver = 'ipopt' # solver to use, must be installed
self.opt_api = 'pyomo' # pyomo/gurobi (gurobi api only works if solver is also gurobi)
self.opt_solver_opts = {} # options to pass to solver (with pyomo api)
self.opt_start = '20180101'
self.opt_end = '20180108'
self.opt_weather_year = 2016 # used to get maf data, inflow data, and solar merra data
self.opt_load_scale = 1 # scale load by this factor
self.opt_loss = 0 # Fraction of energy lost in transmission
self.opt_nonnegative_data = ['inflow']
self.opt_countries = ['SE','DK','NO','FI','EE','LT','LV','PL','DE','NL','GB'] # modelled countries
self.opt_use_maf_pecd = False # use solar and wind data from MAF2020
self.opt_impute_limit = 30 # maximum number of values to interpolate in data
self.opt_impute_constant = { # constants used to impute remaining missing values in input data
'exchange':0, # for external exchanges
'solar':0,
}
self.opt_run_initialization = False # run low resolution model to get values for initialization
self.opt_init_delta = 168
# Note: initialization is useful for some solvers (e.g. ipopt) but may not be for others (e.g. gurobi)
self.opt_db_files = {
'capacity':'capacity.db',
'prices':'prices.db',
'exchange':'exchange.db',
'gen':'gen.db',
'unit':'unit.db',
'load':'load.db',
'reservoir':'reservoir.db',
'inflow':'inflow.db',
'maf_hydro':'maf_hydro.db',
'maf_pecd':'maf_pecd.db',
}
self.opt_err_labl = 'MAE' # should be consistent with the error computed in err_func
########## COST OPTIONS ##########################
self.opt_costfit_tag = '2019' # use this costfit from the input parameters
self.opt_hydro_cost = False # include fitted hydro costs, not properly implemented
self.opt_default_thermal_cost = 40 # default value for thermal cost
self.opt_loadshed_cost = 3000 # cost for demand curtailment
self.opt_nuclear_cost = 7.35 # default value for nuclear cost
self.opt_wind_cost = 1 # low wind cost in EUR/MWh to favour wind curtailment over solar
self.opt_use_var_cost = True # use variable costs
# Source for variable cost data: data['costfit_shifted']['tag']
# replace extreme cost fits (e.g. decreasing mc or very sharply increasing mc with fuel-based constant MC)
self.opt_overwrite_bad_costfits = True
self.opt_c2_min = 1e-5
self.opt_c2_max = 0.5
# specify co2 price, this is added to the price coefficient MC(p)=k*p+m+(co2_price-co2_price(offset_year))
self.opt_co2_price = None
self.opt_co2_price_offset_year = 2016 # if set to year, this assumes m already contains the cost for that year
############ TECHNICAL LIMITS #########################
self.opt_capacity_year = 2019 # use generation capacity from entsoe for this year
self.opt_hvdc_max_ramp = 600 # 600 MW/hour
self.opt_pmax_type = 'capacity'
self.opt_pmax_type_hydro = 'stats'
# Options for pmax: 'stats' - from gen_stats.xlsx (production statistics)
# 'capacity' - from entsoe capacity per type database
# For hydro the source for the maximum capacity is chosen separately
self.opt_pmin_zero = False # put pmin = 0
######### NUCLEAR OPTIONS ################
self.opt_nucl_min_lvl = 0.65 # nuclear can ramp down to this level
self.opt_nucl_ramp = None # overwrite nuclear ramp rate (%/hour)
self.opt_nucl_add_cap = {
'SE3':0,
'FI':0,
'DE':0,
} # add this firm capacity to nuclear generation
# option to compute nuclear max levels from individual units for some areas, can be used to deactivate certain
# nuclear reactors in order to simulate scenarios, requires production data for individual units
self.opt_nucl_individual_units = []
# exclude these nuclear reactors when deciding maximum generation levels - only possible with opt_nucl_individual_units
self.opt_nucl_units_exclude = []
#self.opt_nucl_units_exclude = ['Ringhals block 1 G11','Ringhals block 1 G12','Ringhals block 2 G21','Ringhals block 2 G22']
######### HYDRO OPTIONS #################
self.opt_reservoir_offset = 168
self.opt_reservoir_data_normalized = True # use normalized reservoir data
self.opt_default_inflow = 100
self.opt_default_inflow_area = { # GWh/week, per area
'DE':346, # 180 TWh yearly production
'PL':45,
'GB':107,
}
self.opt_use_maf_inflow = False # use MAF inflow data or inflow calculated from ENTSO-E data
# inflow interpolation:
# constant (i.e. constant for one week)
# linear (linear ramp rate between weeks)
self.opt_inflow_interp = 'linear'
self.opt_hydro_daily = False # daily reservoir constraints (instead of hourly)
self.opt_reservoir_start_fill = 0.5 # if reservoir data does not exist, assume default filling value
self.opt_reservoir_end_fill = 0.5
# share of inflow which is run of river, if no data available
self.opt_ror_fraction = {
'SE1':0.13,
'SE2':0.21,
'SE3':0.27,
'SE4':0.3,
'NO1':0.25,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'FI':0.27,
'LV':0.4,
'LT':0.5,
'PL':0.8,
'DE':0.9,
'GB':0.4,
}
self.opt_reservoir_capacity = { # GWh
'NO1':6507,
'NO2':33388,
'NO3':8737,
'NO4':19321,
'NO5':16459,
'SE1':13688,
'SE2':15037,
'SE3':2517,
'SE4':216,
'FI':4512,
'LT':12.2,
'LV':11.2,
'PL':2.3,
'DE':1263,
'GB':26.4,
}
# pumping capacity
self.opt_pump_capacity = { # in MW, from MAF data
'PL':1660,
'DE':7960,
'GB':2680,
'NO1':130,
'NO2':430,
'NO3':70,
'NO5':470,
'LT':720,
}
self.opt_pump_reservoir = { # in GWh
'PL':6.3,
'DE':20,
}
# pumping efficiency
self.opt_pump_efficiency = 0.75
############# RESERVE OPTIONS ################
self.opt_use_reserves = False # include reserve requirements
self.opt_country_reserves = False # reserves by country instead of by area (more flexibility)
self.opt_reserves_fcrn = { # this is the allocation of 600 MW FCR-N
'SE':245,
'NO':215,
'DK':0,
'FI':140,
}
self.opt_reserves_fcrd = 1200 # FCR-D, allocated in same proportion as FCR-N
######## EXTERNAL AREAS OPTIONS #################
# the price will be set for these price areas, and the export/import will be variable instead of fixed
self.opt_set_external_price = ['DE','PL']
self.opt_default_prices = {
'PL':40, # use this price for external connections if no other is avaialable
'RU':40,
'DE':40,
'NL':40,
'GB':40,
}
self.opt_exchange_data_type = 'flow'
########### TRANSFER CAPACITY OPTIONS #####################
self.opt_use_var_exchange_cap = True
self.opt_nominal_capacity_connections = [('NL','GB'),]
# these connections will always use nomianl exchange capacity
self.opt_min_exchange_cap = 100 # minimum variable transfer capacity (MW)
# may be set to >= 2018 to include additional future transmission capacity,
# from new_trans_cap in model_definitions
self.opt_exchange_cap_year = None
########## WIND OPTIONS #############
self.opt_wind_scale_factor = {
'SE1':1,
'SE2':1,
'SE3':1,
'SE4':1,
}
self.opt_wind_capacity_onsh = {
'DK1':3725,
'DK2':756,
'EE':329,
'FI':2422,
'LT':540,
'LV':84,
'NO1':166,
'NO2':1145,
'NO3':1090,
'NO4':668,
'NO5':0,
'SE1':1838,
'SE2':3849,
'SE3':2780,
'SE4':1581,
'PL':5952,
'NL':3973,
'DE':53338,
'GB':14282,
}
self.opt_wind_capacity_offsh = {
'DK1':1277,
'DK2':423,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':1709,
'DE':7504,
'GB':10383,
}
########### SOLAR OPTIONS #############
# Note: the solar capacities only apply if opt_use_maf_pecd is True, otherwise ENTSO-E production data is used for solar
# manually specify solar capacity for areas:
self.opt_solar_cap_by_area = {
'DK1':878, # from ENTSO-E
'DK2':422,
'EE':164,
'FI':215,
'LT':169,
'LV':11,
'SE1':9, # from Energiåret 2020 (energiföretagen)
'SE2':67,
'SE3':774,
'SE4':240,
'PL':1310,
'NL':5710,
'DE':48376,
'GB':13563,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics 2020
'DK':1079,
'FI':215,
'NO':90,
'SE':644,
'LV':3,
'LT':103,
'EE':107,}
########## INERTIA OPTIONS ####################
self.opt_use_inertia_constr = False # inertia constraints
self.opt_min_kinetic_energy = 113 # GWs
# calculation of kinetic energy: Ek = H*P/(cf*pf)
# inertia constants from Persson (2017) Kinetic Energy Estimation in the Nordic System
self.opt_inertia_constants = {
'SE':{'Hydro':4.5,'Thermal':2.9,'Nuclear':6.2,},
'NO':{'Hydro':2.9,'Thermal':2.5,},
'FI':{'Hydro':2.8,'Thermal':4.4,'Nuclear':6.6,},
'DK':{'Thermal':4.5,},
}
# assumption about power factor pf
self.opt_inertia_pf = {
'SE':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'NO':{'Hydro':0.9,'Thermal':0.9,},
'FI':{'Hydro':0.9,'Thermal':0.9,'Nuclear':0.9,},
'DK':{'Thermal':0.9,},
}
# assumption about capacity factor cf
self.opt_inertia_cf = {
'SE':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'NO':{'Hydro':0.8,'Thermal':1,},
'FI':{'Hydro':0.8,'Thermal':1,'Nuclear':1,},
'DK':{'Thermal':1,},
}
####### ROUNDING VALUES ##############
self.opt_bound_cut = { # round values below this threshold to zero, to avoid small coefficients
'max_SOLAR':1e-4,
'max_WIND':1e-4,
'min_PG':1e-4,
}
######## FIGURE OPTIONS ##################
self.fopt_no_plots = False
self.fopt_plots = {
'gentype':True,
'gentot':True,
'gentot_bar':False,
'renewables':False,
'transfer_internal':True,
'transfer_external':True,
'reservoir':False,
'price':False,
'losses':False,
'load_curtailment':False,
'inertia':False,
'hydro_duration':False,
'wind_curtailment':False,
}
self.fopt_plot_weeks = []
self.fopt_use_titles = True
self.fopt_show_rmse = True # also show absolute RMSE on fopt_plots
self.fopt_eps = False
self.fopt_print_text = False # print model to text file
self.fopt_print_dual_text = False # print dual to text file
self.fopt_dpi_qual = 1000
# control inset in plot
self.fopt_inset_date = None
self.fopt_inset_days = 5
self.fopt_calc_rmse = { # some rmse calculations need additional data
'price':True,
'transfer':True
}
self.fopt_rmse_transfer_data_type = 'flow'
##### OPTIONS TO PRINT OUTPUT ######
self.opt_print = {
'init':True,
'solver':True,
'setup':True,
'postprocess':True,
'check':True,
}
self.default_pp_opt()
def default_pp_opt(self):
########## OPTIONS CONTROLLING POST PROCESSING ###############
self.pp_opt = EmptyObject()
self.pp_opt.get_vars = ['SPILLAGE','PG','RES','X1','X2','WIND','XEXT','LS','SOLAR','HROR','PUMP','REL','PRES']
self.pp_opt.inst_vars = ['RES','PRES']
self.pp_opt.daily_vars = ['RES','SPILLAGE'] # daily variables if opt_hydro_daily is True
# Note: duals only obtained only if the constraint exists (some constraints are optional)
self.pp_opt.get_duals = ['POWER_BALANCE','RESERVOIR_BALANCE','HVDC_RAMP','GEN_RAMP',
'RESERVES_UP','RESERVES_DW','FIX_RESERVOIR','INERTIA']
self.pp_opt.get_cur_vars = ['WIND','SOLAR','HROR']
def effective_reservoir_range(self):
# effective ranges, based on min and max reservoir values from entso-e data
self.opt_reservoir_capacity = { # GWh
'SE1':11326,
'SE2':13533,
'SE3':1790,
'SE4':180,
'FI':2952,
'NO1':6078,
'NO2':21671,
'NO3':7719,
'NO4':14676,
'NO5':14090,
'LT':11.8,
'LV':9.4,
'DE':2430,
'PL':2800,
'GB':4100,
}
def vre_cap_2016(self):
""" Set wind and solar capacities to values from 2016, for validation of model with MAF data for this year """
pass
# SOLAR CAPACITY
self.opt_solar_cap_by_area = {
'DK1':421, # from ENTSO-E
'DK2':180,
'PL':77,
'NL':1429,
'DE':40679,
'GB':11914,
}
# if solar capacity for an area is not specified, the aggregated value
# for that country is used, weighted by the areas share of total load
self.opt_solar_cap_by_country = { # from IRENA Capacity Statistics
'DK':851,
'FI':39,
'NO':27,
'SE':153,
'LV':1,
'LT':70,
'EE':10,
}
# MAF WIND CAPACITY
self.opt_wind_capacity_onsh = {
'DK1':2966,
'DK2':608,
'EE':375,
'FI':2422,
'LT':366,
'LV':55,
'NO1':0,
'NO2':261,
'NO3':361,
'NO4':251,
'NO5':0,
'SE1':524,
'SE2':2289,
'SE3':2098,
'SE4':1609,
'PL':5494,
'NL':3284,
'DE':45435,
'GB':10833,
}
self.opt_wind_capacity_offsh = {
'DK1':843,
'DK2':428,
'EE':0,
'FI':0,
'LT':0,
'LV':0,
'NO1':0,
'NO2':0,
'NO3':0,
'NO4':0,
'NO5':0,
'SE1':0,
'SE2':0,
'SE3':0,
'SE4':0,
'PL':0,
'NL':357,
'DE':4000,
'GB':5293,
}
def run(self,save_model=False):
""" Run single case of model, for current settings """
pass
self.res_time = {}
t_0 = time.time()
self.setup()
self.res_time['pre'] = time.time() - t_0
t__0 = time.time()
self.setup_child_model()
self.res_time['cm'] = time.time() - t__0
self.solve()
t__0 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t__0
self.res_time['tot'] = time.time() - t_0
if save_model:
self.save_model()
def run_years(self,years=range(2015,2017),append=False,save_full_model=False):
""" Run model using weather data for multiple years between start and end
save_full_model: Save full model using save_model for start year in root path
"""
start = years[0]
self.opt_weather_year = start
self.update_path(self.root_path/f'{start}')
# run first instance of model
self.run()
self.save_model_run(append=append)
if save_full_model:
self.update_path(self.root_path)
self.save_model()
# update weather data and run remaining instances
for year in years[1:]:
self.update_path(self.root_path/f'{year}')
self.re_run_year(year=year)
self.save_model_run(append=append)
def re_run_year(self,year=2015):
""" Update the weather year and re-run model """
print(f'---- RE-RUN YEAR {year} -----')
self.res_time = {}
t_0 = time.time()
self.opt_weather_year = year
self.setup_weather_indices()
self.get_inflow_data()
self.setup_inflow()
self.setup_run_of_river()
self.setup_inflow_feasibility()
self.max_HROR = {
(a,t):self.ror_hourly.at[self.timerange[t],a]*MWtoGW for a in self.ror_areas for t in self.idx_time
}
self.setup_solar()
self.setup_wind()
self.max_SOLAR = {
(a,t):self.solar.at[self.timerange[t],a] for a in self.solar_areas for t in self.idx_time
}
self.max_WIND = {
(a,t):self.wind.at[self.timerange[t],a]*self.opt_wind_scale_factor[a]
for a in self.wind_areas for t in self.idx_time
}
for name in ['max_WIND','max_SOLAR']:
self.round_bound(name)
#%%
if self.opt_run_initialization:
self.run_init_model()
#%%
self.res_time['pre'] = time.time() - t_0
t_1 = time.time()
self.cm.update_inflow()
self.cm.update_ror(self.max_HROR)
self.cm.update_solar(self.max_SOLAR)
self.cm.update_wind(self.max_WIND)
self.res_time['cm'] = time.time() - t_1
#%% rerun model
self.solve()
t_1 = time.time()
self.post_process()
self.res_time['post'] = time.time() - t_1
self.res_time['tot'] = time.time() - t_0
print(f'------ FINISHED YEAR {year} --------')
def load_results_years(self,vars=['res_PG','res_LS'],years=None):
""" Get given results for all yearly runs"""
res = {
v:{} for v in vars
}
exist_years = []
for y in [y for y in os.listdir(self.root_path) if os.path.isdir(self.root_path / y)]:
try:
exist_years.append(int(y))
except Exception:
pass
if years is None:
years = exist_years
else:
years = [y for y in exist_years if y in years]
# get results from all runs
for y in years:
self.load_model_run(y)
for v in vars:
res[v][y] = self.__getattribute__(v)
return res
def round_bound(self,name):
prt = self.opt_print['setup']
if name in self.opt_bound_cut:
thrs = self.opt_bound_cut[name]
dic = self.__getattribute__(name)
count = 0
for i,val in dic.items():
if val > 0 and val < thrs:
dic[i] = 0
count += 1
if count and prt:
print(f'Rounded {count} values to zero in {name}')
def save_model(self):
"""
Dump all model results to pickle file. Also save options, gen data etc., as well as self.runs
Can produce very large file if several runs are stored in self.runs
The valus saved are sufficient to rerun all plot functions, after first calling setup_data
"""
d = {}
save_vars = ['runs','ror_areas','generators_def','hydrores','areas','wind_areas','solar_areas','pump_res_areas',
'pump_areas','ror_reserve_areas','nuclear_areas','resareas','syncareas','gen_in_area',
'xtrans_int','xtrans_ext','rescountries','reservoir_capacity','pump_reservoir','fixed_transfer_connections',
'fixed_price_connections','area_sep_str','solar_capacity',
]
vars = [v for v in dir(self) if v.split('_',1)[0] in ['res','gen','idx','opt','fopt','dual','max','min'] or v in save_vars]
for v in vars:
d[v] = self.__getattribute__(v)
with open(self.root_path/f'results.pkl','wb') as f:
pickle.dump(d,f)
def save_model_run(self,append=False):
"""
Dump results from current model run in results.pkl
If append=True, results are also appended to list in self.runs
Storing many runs in self.runs can consume lots of memory, so it may
be better just to save the pickle files and load them when needed
"""
# save_entities = ['inflow_hourly','weeks','inflow','inflow_hourly_tmp','ror_hourly']
save_entities = []
run = {
v:self.__getattribute__(v) for v in [ v for v in dir(self) if v.split('_',1)[0] == 'res' or v in save_entities]
}
run['opt_weather_year'] = self.opt_weather_year
if append:
self.runs.append(run)
with open(self.res_path/f'results.pkl','wb') as f:
pickle.dump(run,f)
def load_model(self):
with open(self.res_path/f'results.pkl','rb') as f:
d = pickle.load(f)
for v in d:
self.__setattr__(v,d[v])
def load_model_run(self,year=2015):
self.res_path = self.root_path / f'{year}'
self.load_model()
self.res_path = self.root_path
def redo_plots(self):
print('----- REDO PLOTS -----')
self.load_model()
self.setup_indices()
self.setup_weather_indices()
self.setup_data()
self.get_rmse_data()
self.plot_figures()
def setup_child_model(self):
""" Create the Pyomo/Gorubi model object """
api = self.opt_api
solver = self.opt_solver
# Choose child model "cm" class depending on api type
if api == 'gurobi' and solver == 'gurobi':
from gurobi_model import GurobiModel
self.cm = GurobiModel(name=self.name)
else:
if api == 'gurobi':
print(f'WARNING: Can only use gurobi api with gurobi, using pyomo api!')
from pyomo_model import PyomoModel
self.cm = PyomoModel()
self.cm.setup_opt_problem(self)
def setup(self):
pass
prt = self.opt_print['setup']
self.vars_df_up_bound = {
'WIND':['wind_areas','idx_time'],
'SOLAR':['solar_areas','idx_time'],
'LS':['areas','idx_time'],
'HROR':['ror_areas','idx_time'],
}
print('----- SETUP -------------')
self.setup_indices()
self.setup_weather_indices()
self.setup_transmission()
if prt:
print('----- SETUP DATA --------')
self.setup_data()
if prt:
print('----- SETUP GEN ---------')
self.setup_gen()
if prt:
print('----- SETUP RESERVES ----')
self.setup_reserves()
if prt:
print('----- SETUP HYDRO -------')
self.setup_hydro()
if prt:
print('----- SETUP WIND --------')
self.setup_wind()
if prt:
print('----- SETUP SOLAR -------')
self.setup_solar()
if prt:
print('----- SETUP RESERVOIR ---')
self.setup_reservoir_values()
if prt:
print('----- SETUP INFLOW ------')
self.setup_inflow()
if prt:
print('----- SETUP ROR --------')
self.setup_run_of_river()
self.setup_inflow_feasibility()
if prt:
print('----- SETUP BOUNDS -----')
self.setup_bounds()
if self.opt_run_initialization:
self.run_init_model()
print('----- SETUP COMPLETE ----')
self.print_hydro_table()
self.print_renewable_table()
def solve(self):
""" Solve model """
print(' ----- STARTING SOLVER -----')
prt = self.opt_print['solver']
solver = self.opt_solver
if not hasattr(self,'cm'):
print('Model does not have child model, run "setup_child_model"')
return None
elif self.cm.api == 'pyomo': # pyomo model
## DECLARE DUAL
if not hasattr(self.cm,'dual'):
self.cm.dual = pye.Suffix(direction=pye.Suffix.IMPORT)
## SOLVE MODEL
if solver in solver_executables: # give explicit solver path
opt = pye.SolverFactory(solver,executable=solver_executables[solver],options=self.opt_solver_opts)
else:
opt = pye.SolverFactory(solver,options=self.opt_solver_opts)
res = opt.solve(self.cm, tee=prt)
if 'Time' in res['solver'][0]:
self.res_time['solver'] = res['solver'][0]['Time']
else:
self.res_time['solver'] = np.nan
self.res_stats = {
name:res['problem'][0][solver_stats['pyomo'][name]] for name in solver_stats['pyomo']
}
else: # gurobi model
if not prt:
self.cm.gm.setParam('OutputFlag',0)
self.cm.gm.optimize()
self.res_time['solver'] = self.cm.gm.Runtime
self.res_stats = {
name:self.cm.gm.getAttr(solver_stats['gurobi'][name]) for name in solver_stats['gurobi']
}
print(' ----- FINISHED SOLVER -----')
def post_process(self):
""" Post-processing of optimization results and plotting of figures """
print('----- POST PROCESS ------')
prt = self.opt_print['postprocess']
############### RESULTS ##########################
self.res_residuals = {} # residuals to check supply == demand
self.res_rmse_area = pd.DataFrame(dtype=float,index=self.areas,columns=['Prod','Hydro','Thermal','Nuclear','Price'])
self.res_rmse_intcon = pd.DataFrame(index=self.xtrans_int.index,columns=['From','To','RMSE'])
# self.res_rmse_intcon.loc[:,['From','To']] = self.xtrans_int.loc[:,['from','to']]
self.res_rmse_intcon['From'] = self.xtrans_int['from']
self.res_rmse_intcon['To'] = self.xtrans_int['to']
self.res_rmse_extcon = pd.DataFrame(index=self.xtrans_ext.index,columns=['From','To','RMSE'])
# self.res_rmse_extcon.loc[:,['From','To']] = self.xtrans_ext.loc[:,['from','to']]
self.res_rmse_extcon['From'] = self.xtrans_ext['from']
self.res_rmse_extcon['To'] = self.xtrans_ext['to']
self.res_rmse_area_norm = self.res_rmse_area.copy()
self.res_rmse_intcon_norm = pd.Series(index=self.xtrans_int.index)
self.res_rmse_extcon_norm = pd.Series(index=self.xtrans_ext.index)
# if given path, override
self.get_df_bounds()
self.get_results_from_child()
self.get_rmse_data()
if prt:
print('----- POST CALC. -------')
self.post_process_calculations()
# some more curtailment stats
print('----- PLOT FIGURES -----')
self.plot_figures()
#self.plot_offer_curves(self.supply_curve_hour)
self.print_rmse()
# writing output takes too long for large models
if self.fopt_print_dual_text:
with open(self.res_path / 'dual.txt','w') as f:
with redirect_stdout(f):
self.dual.display()
if self.fopt_print_text:
with open(self.res_path / 'model.txt','w') as f:
with redirect_stdout(f):
self.pprint()
if self.opt_print['check']:
print('----- CHECK RESULTS ----')
print(f'Maximum residual: {max([self.res_residuals[area] for area in self.res_residuals])}')
print(f'Average losses: {np.mean(self.res_losses):0.4f} %')
print('Errors:')
print(f'Production: {self.res_rmse_area["Prod"].mean():0.4f}')
print(f'Hydro: {self.res_rmse_area["Hydro"].mean():0.4f}')
print(f'Thermal: {self.res_rmse_area["Thermal"].mean():0.4f}')
print(f'Transfer: {self.res_rmse_intcon["RMSE"].mean():0.4f}')
print(f'External transfer: {self.res_rmse_extcon["RMSE"].mean():0.4f}')
print(f'Price: {self.res_rmse_area["Price"].mean():0.4f}')
def run_init_model(self):
t_0 = time.time()
prt = self.opt_print['init']
if prt:
print('------- RUN INIT MODEL --------')
self.setup_init_model()
self.solve_init_model()
self.postprocess_init_model()
if prt:
print('------- INIT MODEL COMPLETE ---')
self.res_time['ini'] = time.time() - t_0
def setup_init_model(self):
"""
This sets up a low resolution model, which is solved to get values with which to initialize the hourly model
:return:
"""
from pyomo_init_model import PyomoInitModel
print_output = self.opt_print['init']
self.ini = EmptyObject()
t0 = time.time()
delta = self.opt_init_delta
if print_output:
print(f'Time step of {delta} hours')
pass
self.timerange_lr = [self.timerange[t] for t in range(0,self.nPeriods,delta)]
# delta = 168 # resolution of model
# compute number of periods
self.nPeriods_lr = int(np.ceil(self.nPeriods / delta))
# map hour indices to periods
p2i = {}
i2p = {}
for pidx in range(self.nPeriods_lr):
# h2p[i] = range()
p2i[pidx] = range(pidx*delta,min((pidx+1)*delta,self.nPeriods))
for pidx in p2i:
for i in p2i[pidx]:
i2p[i] = pidx
self.p2i = p2i
self.i2p = i2p
entities = [e for e in ['solar','wind','exchange','exchange_capacity','demand','ror_hourly','inflow_hourly'] \
if hasattr(self,e)]
for name in entities:
df = self.__getattribute__(name)
self.__setattr__(f'{name}_lr',df.resample(f'{delta}H').mean())
if self.opt_use_var_cost:
self.gen_c1_lr = self.gen_c1.resample(f'{delta}H').mean()
self.gen_c2_lr = self.gen_c2.resample(f'{delta}H').mean()
# interpolate reservoir values for initialization
self.reservoir_interp_lr = interp_time(self.timerange_lr,self.reservoir_fix)
self.setup_bounds(lowres=True)
self.cmi = PyomoInitModel()
self.cmi.setup_opt_problem(self)
self.ini.time_setup = time.time() - t0
def solve_init_model(self):
print_output = self.opt_print['init']
solver = self.opt_solver
self.cmi.dual = pye.Suffix(direction=pye.Suffix.IMPORT)
## SOLVE MODEL
if solver in solver_executables:
opt = pye.SolverFactory(solver,executable=solver_executables[solver])
else:
opt = pye.SolverFactory(solver)
t0 = time.time()
self.ini.res = opt.solve(self.cmi, tee=print_output)
self.ini.time_solve = time.time() - t0
def postprocess_init_model(self):
pass
print_output = self.opt_print['init']
t0 = time.time()
""" Get result variables, duals, and bounds from optimization problem """
mo = self.ini
mo.obj = self.cmi.get_objective_value()
# read results into dataframes
if print_output:
print('Reading results into Panda data frames')
for v in self.pp_opt.get_vars:
entity = self.cmi.get_variable(f'var_{v}')
# convert to date index
entity.index = self.timerange_lr
mo.__setattr__(f'res_{v}',entity)
# increment time index of instantaneous variables
for var in [v for v in self.pp_opt.inst_vars if v in self.pp_opt.get_vars]:
entity = mo.__getattribute__(f'res_{var}')
entity.index += datetime.timedelta(hours=self.opt_init_delta)
# get dual variables
if print_output:
print('Getting dual variables')
for v in self.pp_opt.get_duals:
constr = f'constr_{v}'
if hasattr(self.cmi,constr):
entity = self.cmi.get_dual(constr)
# convert to date index
if v not in ['FIX_RESERVOIR']:
entity.index = self.timerange_lr
mo.__setattr__(f'dual_{constr}',entity)
# dic[f'dual_{constr}'] = entity
# interpolate reservoir values
mo.reservoir_interp = pd.DataFrame(dtype=float,index=self.timerange_p1,columns=self.hydrores)
mo.reservoir_interp.loc[self.timerange[0],:] = self.reservoir_fix.loc[self.timerange[0],:]
mo.reservoir_interp.loc[self.timerange_p1[-1],:] = self.reservoir_fix.loc[self.timerange_p1[-1],:]
mo.reservoir_interp.loc[mo.res_RES.index[:-1],:] = np.array(mo.res_RES.loc[mo.res_RES.index[:-1],:])
mo.reservoir_interp.interpolate(inplace=True)
mo.time_post = time.time() - t0
def setup_indices(self):
prt = self.opt_print['setup']
self.starttime = self.opt_start + ':00'
self.endtime = (str_to_date(self.opt_end) + datetime.timedelta(hours=24)).strftime('%Y%m%d') + ':00'
# defined quantities from options
self.areas = []
for c in self.opt_countries:
for a in country_to_areas[c]:
self.areas.append(a)
self.single_area_countries = [c for c in self.opt_countries if country_to_areas[c].__len__() == 1]
self.multi_area_countries = [c for c in self.opt_countries if country_to_areas[c].__len__() > 1]
self.syncareas = [a for a in self.areas if a in synchronous_areas]
self.country_to_areas = { # country to areas for countries included in model
c:country_to_areas[c] for c in self.opt_countries
}
self.area_to_country = {
a:area_to_country[a] for a in self.areas
}
self.hydrores = [area for area in self.areas if 'Hydro' in generators_def[area]]
# note: period with index 0 is starting period
# self.start = str_to_date(self.starttime) + datetime.timedelta(hours=-1)
# self.end = str_to_date(self.endtime)
self.timerange = pd.date_range(start=str_to_date(self.starttime),
end=str_to_date(self.endtime)+datetime.timedelta(hours=-1),freq='H')
self.timerange_p1 = pd.date_range(start=str_to_date(self.starttime),
end=str_to_date(self.endtime),freq='H')
self.nPeriods = self.timerange.__len__()
self.idx_time = range(self.nPeriods)
# day_fmt = '%Y%m%d'
self.daysrange_p1 = pd.date_range(start=self.timerange_p1[0],end=self.timerange_p1[-1],freq='D')
self.daysrange = self.daysrange_p1[:-1]
self.nDays = self.daysrange_p1.__len__() - 1
self.idx_day = range(self.nDays)
# map hours to days
self.hour2day = {
t:int(np.floor_divide(t,24)) for t in self.idx_time
}
self.day2hour = {
d:[t for t in self.idx_time if self.hour2day[t] == d] for d in self.idx_day
}
#%% set start/end time for weather data (wind,solar,hydro)
start_year = int(self.opt_start[:4])
self.start_year = start_year
def setup_weather_indices(self):
""" Setup indices related to weather year, which effects the inflow data and (for the MAF data and Merra solar data)
the wind and solar production
"""
start_year = self.start_year
# check that weather year is within maf range
if (self.opt_use_maf_inflow or self.opt_use_maf_pecd) and \
(self.opt_weather_year > 2016 or self.opt_weather_year < 1982):
print(f'WARNING: No maf data for {self.opt_weather_year}, setting weather year to 2016')
self.opt_weather_year = 2016
self.weather_year_diff = self.opt_weather_year - start_year
sfmt = '%Y%m%d:%H'
self.starttime2 = (datetime.datetime(year=start_year+self.weather_year_diff,
month=int(self.starttime[4:6]),
day=int(self.starttime[6:8]))).strftime(sfmt)
self.endtime2 = (datetime.datetime(year=int(self.endtime[:4])+self.weather_year_diff,
month=int(self.endtime[4:6]),
day=int(self.endtime[6:8]))).strftime(sfmt)
# week to date conversion
if self.opt_use_maf_inflow:
# maf data starts on Sunday
self.wd = WeekDef(week_start=7,proper_week=False)
else:
# use ISO week definition
self.wd = WeekDef(week_start=4,proper_week=True)
# get week range covering whole simulation period
# Note: since the simulated year may be leap year, we may need one more day of data, hence get extra week
start = self.starttime2
end = (datetime.datetime.strptime(self.endtime2,sfmt)+datetime.timedelta(days=7)).strftime(sfmt)
self.weeks = self.wd.range2weeks(start,end,sout=True)
self.widxs = self.wd.range2weeks(start,end,sout=False)
# find index offset, we will interpolate inflow data for the whole range in weeks/widxs
# and then use df[inflow_offset:inflow_offset+nPeriods] as the inflow data
self.inflow_offset = int((datetime.datetime.strptime(self.starttime2,sfmt)-self.widxs[0]).seconds/3600)
dstart = str_to_date(self.starttime2)
self.daysrange_weather_year = pd.date_range(start=dstart,end=dstart+datetime.timedelta(days=self.nDays-1),freq='D')
def get_inflow_data(self):
if self.opt_use_maf_inflow: # get MAF inflow data
# get both weekly and daily maf inflow data
self.inflow,self.inflow_daily_maf = self.maf_hydro_db.select_inflow_bidz_wrap(starttime=self.weeks[0],
endtime=self.weeks[-1],
areas=self.hydrores,
wd=self.wd,date_index=True)
else: # entsoe inflow
self.inflow = self.inflow_db.select_inflow_data(starttime=self.weeks[0],
endtime=self.weeks[-1],
areas=self.hydrores,
table='inflow',wd=self.wd,date_index=True)
def db_exists(self,db='prices.db'):
# Check that database exists
pass
if not os.path.isfile(self.data_path / db):
raise Error(f"Database {db} does not exist!")
def setup_data(self):
prt = self.opt_print['setup']
# Check that databases exist
for db in [f for f in self.opt_db_files if f != 'unit']:
self.db_exists(self.db_path / self.opt_db_files[db])
self.price_db = entsoe.Database(db=self.db_path / self.opt_db_files['prices'])
self.exchange_db = entsoe.Database(db=self.db_path / self.opt_db_files['exchange'])
self.load_db = entsoe.Database(db=self.db_path / self.opt_db_files['load'])
self.reservoir_db = entsoe.Database(db=self.db_path / self.opt_db_files['reservoir'])
self.inflow_db = entsoe.Database(db=self.db_path / self.opt_db_files['inflow'])
self.gen_db = entsoe.Database(db=Path(self.db_path) / self.opt_db_files['gen'])
self.maf_pecd_db = maf_pecd_data.Database(db=Path(self.db_path) / self.opt_db_files['maf_pecd'])
self.maf_hydro_db = maf_hydro_data.Database(db=Path(self.db_path) / self.opt_db_files['maf_hydro'])
self.capacity_db = entsoe.Database(db=Path(self.db_path) / self.opt_db_files['capacity'])
if self.opt_nucl_individual_units:
self.db_exists(self.db_path / self.opt_db_files['unit'])
self.unit_db = entsoe.DatabaseGenUnit(db=Path(self.db_path)/self.opt_db_files['unit'])
starttime = self.starttime
endtime = self.endtime
cet = False
if prt:
print('Loading Excel data')
self.load_shares = pd.read_excel(self.data_path / f'load_shares.xlsx',index_col=0,squeeze=True)
for a in self.areas:
if a not in self.load_shares.index:
self.load_shares.at[a] = 1
# load generation statistics
self.stats = pd.read_excel(self.data_path / 'gen_stats.xlsx',header=[0,1],index_col=0,sheet_name=f'{self.opt_capacity_year}')
# load entsoe capacities
self.gen_capacity = self.capacity_db.select_capacity_wrap(areas=self.areas,year=self.opt_capacity_year)
if prt:
print('Loading GenPerType data')
# Used to plot generation per type and to complement missing Nordpool data
# aggregate hydro and thermal generation
self.entsoe_data = self.gen_db.select_gen_per_type_wrap_v2(starttime=starttime,endtime=endtime,
type_map=entsoe_type_map,cet_time=cet,drop_data=False,
areas=self.areas,print_output=prt,drop_pc=95)
if prt:
print('Loading demand data')
# demand data
self.demand = self.load_db.select_load_wrap(starttime=starttime,endtime=endtime,cet_time=cet,areas=self.areas,print_output=prt)
# reservoir content
self.reservoir = self.reservoir_db.select_reservoir_wrap(starttime=starttime,endtime=endtime,
areas=self.areas,cet_time=cet,normalize=self.opt_reservoir_data_normalized,offset=self.opt_reservoir_offset)
# Load production data for individual units
if self.opt_nucl_individual_units:
self.prod_per_unit,self.units = self.unit_db.select_data(start=starttime,end=endtime,
countries=[c for c in self.opt_countries if \
sum([1 for a in country_to_areas[c] if a in self.opt_nucl_individual_units])])
if prt:
print('Loading external price data')
# price data - only needed for external areas with variable transfer
self.price_external = self.price_db.select_price_data(starttime=starttime,endtime=endtime,cet_time=cet,
areas=self.opt_set_external_price)
# get flows for fixed connections
if prt:
print('Loading external exchange data')
self.exchange = self.exchange_db.select_flow_data( \
connections=list(self.xtrans_ext.loc[self.fixed_transfer_connections,'label_fw']),
starttime=starttime,
endtime=endtime,
table=self.opt_exchange_data_type,
cet_time=cet,
area_sep=self.area_sep_str)
if prt:
print('Loading exchange capacities')
# load exchange capacity
if self.opt_use_var_exchange_cap:
self.exchange_capacity = self.exchange_db.select_flow_data(table='capacity',area_sep=self.area_sep_str,cet_time=cet,
starttime=starttime,endtime=endtime,print_output=prt,
connections=list(self.xtrans_int['label_fw'])+list(self.xtrans_int['label_bw']) + \
list(self.xtrans_ext.loc[self.fixed_price_connections,'label_fw'])+ \
list(self.xtrans_ext.loc[self.fixed_price_connections,'label_bw']),
drop_na_col=True)
if prt:
print('Loading inflow data')
self.get_inflow_data()
impute_list = ['reservoir','inflow','demand','price_external','exchange']
# if self.opt_use_var_exchange_cap:
# self.impute_capacity_values()
# interpolate missing values in data
self.impute_values(impute_list,limit=self.opt_impute_limit,prt=prt)
# scale up demand
self.demand = self.demand * self.opt_load_scale
# replace negative values with zeros
for name in self.opt_nonnegative_data:
entity = self.__getattribute__(name)
entity.clip(0,inplace=True)
def setup_gen(self):
prt = self.opt_print['setup']
if prt:
print('Setting up generators')
stats = self.stats
self.generators_def = generators_def
# get generator data
nGen = 0
gidx = 1
self.gen_data = pd.DataFrame(index=range(1,nGen+1),
columns=['area','gtype','c2','c1','c0','pmax','pmin','rampup','rampdown'])
# load cost fit
with open(self.data_path/f'costfit/{self.opt_costfit_tag}_fit.pkl','rb') as f:
self.costfit = pickle.load(f)
for area in self.areas:
for gtype in self.generators_def[area]:
if (gtype == 'Hydro' and self.opt_pmax_type_hydro == 'stats') or \
(gtype != 'Hydro' and self.opt_pmax_type == 'stats'):
pmax = stats.at['max',(area,gtype)]
else:
pmax = self.gen_capacity.at[area,gtype]
if np.isnan(pmax): # missing values, use from stats
pmax = stats.at['max',(area,gtype)]
if prt:
print(f'No entso-e capacity value for {area} {gtype}')
pmin = stats.at['min',(area,gtype)]
if pmin > pmax: # pmin value from production stats may exceed capacity, fix this
pmin = 0
rampup = stats.at['maxramp',(area,gtype)]
rampdown = stats.at['minramp',(area,gtype)]
# cost coefficients
c0 = 0
if gtype == 'Nuclear':
c2 = 0
c1 = self.opt_nuclear_cost
elif gtype == 'Hydro':
c2 = 0
c1 = 0
else: # Thermal
c2 = self.costfit[area][gtype]['k']/2
c1 = self.costfit[area][gtype]['mavg']
if self.opt_co2_price is not None and self.opt_co2_price > 0:
c1 += self.opt_co2_price*self.opt_co2_intensity
if self.opt_co2_price_offset_year is not None:
c1 -= co2_price_ets[self.opt_co2_price_offset_year]*self.opt_co2_intensity
# check if cost parameters are strange, e.g. decreasing marginal cost
if c2 < 0 or np.isnan(c2):
c2 = 0
c1 = self.opt_default_thermal_cost
if prt:
print(f'Using default constant MC costs for {area} {gtype}')
self.gen_data = self.gen_data.append(pd.DataFrame(columns=self.gen_data.columns,index=[gidx],
data=[[area,gtype,c2,c1,c0,pmax,pmin,rampup,rampdown]]))
gidx += 1
self.nGen = gidx - 1
self.idx_gen = range(1,self.nGen+1)
self.idx_thermal_gen = [g for g in self.idx_gen if self.gen_data.at[g,'gtype'] == 'Thermal']
# generators with non-zero marignal cost
self.idx_cost_gen = [g for g in self.idx_gen if not self.gen_data.at[g,'gtype'] == 'Hydro']
if self.opt_pmin_zero:
self.gen_data.loc[:,'pmin'] = 0
# set maximum nuclear capacity based on this week
for g in self.gen_data.index:
if self.gen_data.at[g,'gtype'] == 'Nuclear':
# note: this is maximum for whole period, is actually not used
# instead weekly maximum values are used
self.gen_data.at[g,'pmax'] = self.entsoe_data[self.gen_data.at[g,'area']]['Nuclear'].max()
# overwrite nuclear cost
if not self.opt_nuclear_cost is None:
self.gen_data.at[g,'c1'] = self.opt_nuclear_cost
# overwrite nuclear ramp rate
if not self.opt_nucl_ramp is None:
self.gen_data.at[g,'rampup'] = self.gen_data.at[g,'pmax'] * self.opt_nucl_ramp/100
self.gen_data.at[g,'rampdown'] = - self.gen_data.at[g,'pmax'] * self.opt_nucl_ramp/100
def tag_gen_cost():
pass
if prt:
print('Setting up generator variable costs')
# generator variable costs
if self.opt_use_var_cost:
self.gen_c2 = pd.DataFrame(dtype=float,index=self.timerange,columns=self.gen_data.index)
self.gen_c1 = pd.DataFrame(dtype=float,index=self.timerange,columns=self.gen_data.index)
for g in self.idx_gen:
area = self.gen_data.at[g,'area']
gtype = self.gen_data.at[g,'gtype']
print_flag_areas = []
binstart = str_to_date(self.costfit['starttime'])
binsize = self.costfit['binsize']
for t in self.idx_time:
# it is assumed costs are fitted for correct year
# get costs depending on type
if gtype == 'Thermal': # variable cost data
dt = self.timerange[t]
c2 = self.costfit[area][gtype]['k']/2
c1 = self.costfit[area]['Thermal']['m'][time_to_bin(
dt,binstart=binstart,binsize=binsize)]
if self.opt_co2_price is not None and self.opt_co2_price > 0:
c1 += self.opt_co2_price*self.opt_co2_intensity
if self.opt_co2_price_offset_year is not None:
c1 -= co2_price_ets[self.opt_co2_price_offset_year]*self.opt_co2_intensity
if self.opt_overwrite_bad_costfits and (c2 < self.opt_c2_min or c2 > self.opt_c2_max):
# use default cost
c2 = self.gen_data.at[g,'c2']
c1 = self.gen_data.at[g,'c1']
# show message about overwrite
if area not in print_flag_areas:
print_flag_areas.append(area)
if prt:
print(f'Using constant costs for {area}')
else: # use constant costs from gen_data
c2 = self.gen_data.at[g,'c2']
c1 = self.gen_data.at[g,'c1']
self.gen_c2.at[self.timerange[t],g] = c2
self.gen_c1.at[self.timerange[t],g] = c1
# calculate maximum nuclear generation per week
# USE INDIVIDUAL NUCLEAR GENERATION DATA
def tag_nuclear():
pass
if prt:
print('Setting up nuclear generation')
self.nuclear_areas = [a for a in self.areas if 'Nuclear' in self.generators_def[a]]
for a in self.nuclear_areas:
if a not in self.opt_nucl_add_cap:
self.opt_nucl_add_cap[a] = 0.0
#%%
# variable nuclear limit
self.nuclear_hourly = pd.DataFrame(dtype=float,index=self.timerange_p1,columns=self.nuclear_areas)
# fix values for 1 day intervals, the do linear interpolation
self.nuclear_units = {}
for a in self.nuclear_areas:
if a in self.opt_nucl_individual_units:
self.nuclear_units[a] = [idx for idx in self.units.index if self.units.at[idx,'type'] == 'Nuclear'
and self.units.at[idx,'country'] == self.area_to_country[a]
and self.units.at[idx,'name'] not in self.opt_nucl_units_exclude]
max_rolling = self.prod_per_unit.loc[:,self.nuclear_units[a]].sum(axis=1).rolling(
window=168,min_periods=1,center=True).max()
else:
max_rolling = self.entsoe_data[a]['Nuclear'].rolling(window=168,min_periods=1,center=True).max()
for d in self.daysrange_p1:
self.nuclear_hourly.at[d,a] = max_rolling.at[d] + self.opt_nucl_add_cap[a]
# interpolate linearly
self.nuclear_hourly.interpolate(inplace=True)
# combined generators - define which generator units make up generators with ramp constraints
# Note: Several units of the same type can be used within an area, e.g. in order to create a piecewise linear
# cost function for that type and area. Some constraints, e.g. ramp constraints, should then be enforced on
# the aggregate production of those generators. For this reason there is a set for the "combined generators"
self.gen_comb = {}
idx = 1
for area in self.areas:
for gtype in self.generators_def[area]:
# find all generator units which belong to this generator
units = []
for gen in self.gen_data.index:
if self.gen_data.at[gen,'area'] == area and self.gen_data.at[gen,'gtype'] == gtype:
units.append(gen)
self.gen_comb[idx] = units
idx += 1
self.nGenComb = idx-1
self.gen_data_comb = pd.DataFrame(index=range(1,self.nGenComb+1),columns=['rampup','rampdown'])
for index in self.gen_data_comb.index:
self.gen_data_comb.at[index,'rampup'] = self.gen_data.at[self.gen_comb[index][0],'rampup']
self.gen_data_comb.at[index,'rampdown'] = self.gen_data.at[self.gen_comb[index][0],'rampdown']
# generators in each area
self.gen_in_area = {}
for area in self.areas:
self.gen_in_area[area] = [g for g in range(1,self.nGen+1) if self.gen_data.at[g,'area'] == area]
def setup_reserves(self):
prt = self.opt_print['setup']
for c in self.opt_countries:
if c not in self.opt_reserves_fcrn:
self.opt_reserves_fcrn[c] = 0
# amount of reserves per area
self.reserve_data = pd.DataFrame(index=self.areas,columns=['FCR-N','FCR-D','Rp','Rn'])
for area in self.reserve_data.index:
if 'SE' in area:
country = 'SE'
elif 'NO' in area:
country = 'NO'
elif 'DK' in area:
country = 'DK'
else:
country = area
#all_areas = [a for a in self.areas if country in a]
gen_hydro = [idx for idx in self.gen_data.index if self.gen_data.at[idx,'gtype'] == 'Hydro']
gen_area = [idx for idx in self.gen_data.index if self.gen_data.at[idx,'area'] == area]
gen_country = [idx for idx in self.gen_data.index if country in self.gen_data.at[idx,'area']]
# allocate in proportion to share of hydro generation
if self.gen_data.loc[intersection(gen_hydro,gen_country),'pmax'].sum() > 0:
self.reserve_data.at[area,'FCR-N'] = self.opt_reserves_fcrn[country]* \
self.gen_data.loc[intersection(gen_hydro,gen_area),'pmax'].sum() / self.gen_data.loc[intersection(gen_hydro,gen_country),'pmax'].sum()
else:
self.reserve_data.at[area,'FCR-N'] = 0
# FCR-D in proportion to FCR-N
self.reserve_data.at[area,'FCR-D'] = self.reserve_data.at[area,'FCR-N'] * self.opt_reserves_fcrd / np.sum([self.opt_reserves_fcrn[a] for a in self.opt_reserves_fcrn])
self.reserve_data.at[area,'Rp'] = self.reserve_data.at[area,'FCR-N'] + self.reserve_data.at[area,'FCR-D']
self.reserve_data.at[area,'Rn'] = self.reserve_data.at[area,'FCR-N']
# areas with reserves
self.resareas = [area for area in self.areas if self.reserve_data.at[area,'FCR-N'] > 0]
# generators providing reserves in each area
self.reserve_gens = {}
for area in self.resareas:
self.reserve_gens[area] = [gen for gen in self.gen_data.index if self.gen_data.at[gen,'area'] == area and self.gen_data.at[gen,'gtype'] == 'Hydro']
# countries with reserves
self.rescountries = [c for c in self.opt_reserves_fcrn if self.opt_reserves_fcrn[c] > 0]
# generators providing reserves in each country
self.reserve_gens_country = {}
for c in self.rescountries:
self.reserve_gens_country[c] = [gen for gen in self.gen_data.index if self.gen_data.at[gen,'area'] in self.country_to_areas[c] and self.gen_data.at[gen,'gtype'] == 'Hydro']
self.reserve_country_data = pd.DataFrame(index=self.rescountries,columns=self.reserve_data.columns)
for c in self.reserve_country_data.columns:
for i in self.reserve_country_data.index:
self.reserve_country_data.at[i,c] = self.reserve_data.loc[self.country_to_areas[i],c].sum()
def setup_hydro(self):
prt = self.opt_print['setup']
self.reservoir_capacity = self.opt_reservoir_capacity.copy()
# get missing reservoir capacity as maximum reservoir value
self.reservoir_max = self.reservoir_db.select_max(table_type='reservoir',areas=self.areas)
for a in self.hydrores:
if a not in self.reservoir_capacity or not self.reservoir_capacity[a]:
self.reservoir_capacity[a] = self.reservoir_max.at[a]
self.pump_reservoir = self.opt_pump_reservoir.copy()
# reservoir content in TWh
for i,val in self.reservoir_capacity.items():
self.reservoir_capacity[i] = val * GWtoTW
for i,val in self.pump_reservoir.items():
self.pump_reservoir[i] = val * GWtoTW
# One reservoir per area with hydro
# Mapping from each reservoir to its connected hydro stations
self.reservoir2hydro = {}
for area in self.hydrores:
self.reservoir2hydro[area] = []
for idx,gen in self.gen_data.iterrows():
if gen['area'] == area and gen['gtype'] == 'Hydro':
self.reservoir2hydro[area].append(idx)
for a in self.hydrores:
if a not in self.opt_ror_fraction:
self.opt_ror_fraction[a] = 0
# areas with run of river hydro
self.ror_areas = [a for a in self.areas if a in self.opt_ror_fraction and self.opt_ror_fraction[a] > 0]
self.ror_countries = [c for c in self.opt_countries if sum([1 for a in self.country_to_areas[c] if a in self.ror_areas])]
# check which areas have hydro run of river with reserves
# Note: Run of river hydro is modelled as separate production (like wind and solar),
# and is not entered within the set of generators. However, when enforcing upward reserve constraints,
# run of river production still decreases the potential for providing upward reserves from hydro production.
# Thus
self.ror_reserve_areas = []
for a in self.resareas:
#if a in self.set_HYDRO_AREA:
if self.area_to_country[a] in self.ror_countries and 'Hydro' in self.generators_def[a]:
self.ror_reserve_areas.append(a)
self.ror_reserve_countries = []
for c in self.rescountries:
if c in self.ror_countries:
self.ror_reserve_countries.append(c)
# HYDRO GENERATORS
# store data specific to hydro reservoirs in self.hydro_data
self.hydro_data = pd.DataFrame(index=self.hydrores,columns=['reservoir','waterval'])
# reservoir capacity
for area in self.hydro_data.index:
self.hydro_data.at[area,'reservoir'] = self.reservoir_capacity[area]
# PUMP HYDRO
# areas with pumping
self.pump_areas = [
a for a in self.hydrores if a in self.opt_pump_capacity and self.opt_pump_capacity[a] > 0
]
# areas with separate reservoir for pumping
self.pump_res_areas = [a for a in self.pump_areas \
if a in self.opt_pump_reservoir and self.opt_pump_reservoir[a] > 0]
# areas with pumping in inflow reservoir
self.pump_nores_areas = [a for a in self.pump_areas if a not in self.pump_res_areas]
def setup_run_of_river(self):
prt = self.opt_print['setup']
self.ror_hourly = pd.DataFrame(dtype=float,index=self.timerange,columns=self.ror_areas)
for area in self.ror_areas:
self.ror_hourly[area] = self.opt_ror_fraction[area] * \
self.inflow_hourly.loc[self.timerange,area] * GWtoMW
if self.opt_hydro_daily:
self.ror_daily = self.ror_hourly.resample('D').sum()
def setup_inflow_feasibility(self):
for area in self.hydrores:
hgens = [g for g in self.idx_gen if self.gen_data.at[g,'area'] == area and self.gen_data.at[g,'gtype'] == 'Hydro']
pmin = self.gen_data.loc[hgens,'pmin'].sum()
if area in self.ror_areas:
minprod = np.array(self.ror_hourly[area])
minprod[minprod <= pmin] = pmin
minprod_tot = np.sum(minprod)*MWtoGW
else:
minprod_tot = pmin*self.nPeriods*MWtoGW
# hydro may also have to keep negative reserves
if self.opt_use_reserves and not self.opt_country_reserves:
# TODO: Factor 1 should be enough??
minprod_tot += self.reserve_data.at[area,'Rn']*MWtoGW*self.nPeriods*2
res_incr = TWtoGW*(self.reservoir_fix.at[self.reservoir_fix.index[1],area] - self.reservoir_fix.at[self.reservoir_fix.index[0],area])
inflow_tot = self.inflow_hourly[area].sum()
if minprod_tot + res_incr > inflow_tot:
incr_val = (minprod_tot + res_incr - inflow_tot)*1.01/self.nPeriods
print(f'WARNING: Total inflow for {area} cannot satisfy minimum production and start/end reservoir values'
+ f'\nIncreasing inflow by {incr_val:0.3f} GWh/h to avoid infeasibility!')
self.inflow_hourly[area] = self.inflow_hourly[area] + incr_val
if self.opt_hydro_daily:
self.inflow_daily[area] = self.inflow_daily[area] + incr_val*24
def setup_wind(self):
prt = self.opt_print['setup']
# wind data
if self.opt_use_maf_pecd:
self.wind_areas = [a for a in self.areas if self.opt_wind_capacity_onsh[a] or self.opt_wind_capacity_offsh[a]]
self.maf_pecd_onsh_areas = list(set([bidz2maf_pecd[a] for a in self.wind_areas if self.opt_wind_capacity_onsh[a]]))
self.maf_pecd_offsh_areas = list(set([bidz2maf_pecd[a] for a in self.wind_areas if self.opt_wind_capacity_offsh[a]]))
#% get maf wind data
self.onsh_maf = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='onshore',get_areas=self.maf_pecd_onsh_areas)
self.offsh_maf = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='offshore',get_areas=self.maf_pecd_offsh_areas)
# scale with capacity, add offshore and onshore
self.wind_maf_raw = pd.DataFrame(0.0,index=self.onsh_maf.index,columns=self.wind_areas)
for a in self.wind_maf_raw.columns:
ma = bidz2maf_pecd[a]
if self.opt_wind_capacity_onsh[a]:
self.wind_maf_raw[a] += self.onsh_maf[ma]*self.opt_wind_capacity_onsh[a]
if self.opt_wind_capacity_offsh[a]:
self.wind_maf_raw[a] += self.offsh_maf[ma]*self.opt_wind_capacity_offsh[a]
self.wind = self.copy_data_to_model_year(self.wind_maf_raw)*MWtoGW
else: # use Entso-e data
self.wind_areas = [a for a in self.areas if 'Wind' in self.entsoe_data[a]]
self.wind = pd.DataFrame(index=self.entsoe_data['SE3'].index,columns=self.wind_areas)
for a in self.wind_areas:
self.wind[a] = self.entsoe_data[a]['Wind'] * MWtoGW
self.impute_values(['wind'],limit=self.opt_impute_limit,prt=prt)
for a in self.wind_areas:
if a not in self.opt_wind_scale_factor:
self.opt_wind_scale_factor[a] = 1
def setup_solar(self):
prt = self.opt_print['setup']
if prt:
print('Setting up solar generation')
self.solar_capacity = {}
for c in ['SE','DK','NO','FI']:
for a in country_to_areas[c]:
self.solar_capacity[a] = self.load_shares.at[a]*self.opt_solar_cap_by_country[c]*MWtoGW
# adjust solar capacities with given values per area
for a in self.opt_solar_cap_by_area:
self.solar_capacity[a] = self.opt_solar_cap_by_area[a]*MWtoGW
if self.opt_use_maf_pecd:
self.solar_areas = [a for a in self.solar_capacity if self.solar_capacity[a] > 0 and a in self.areas]
self.maf_pecd_solar_areas = list(set([bidz2maf_pecd[a] for a in self.solar_areas]))
self.solar_maf_raw = self.maf_pecd_db.select_pecd_data(starttime=self.starttime2,endtime=self.endtime2,data_type='pv',get_areas=self.maf_pecd_solar_areas)
self.solar_maf_mapped = pd.DataFrame(dtype=float,columns=self.solar_areas,index=self.solar_maf_raw.index)
for a in self.solar_areas:
self.solar_maf_mapped[a] = self.solar_maf_raw[bidz2maf_pecd[a]]*self.solar_capacity[a]
self.solar = self.copy_data_to_model_year(self.solar_maf_mapped)
else:
self.solar_areas = [a for a in self.areas if 'Solar' in self.entsoe_data[a].columns]
# use entsoe data
self.solar = pd.DataFrame(0.0,index=self.timerange_p1,columns=self.solar_areas)
for a in self.solar_areas:
self.solar[a] = self.entsoe_data[a]['Solar']*MWtoGW
self.impute_values(['solar'],limit=self.opt_impute_limit,prt=prt)
def setup_transmission(self):
prt = self.opt_print['setup']
if prt:
print('Setting up transmission capacity')
internal_connections = []
external_connections = []
for row in nordpool_capacities.iterrows():
if row[1]['from'] in self.areas and row[1]['to'] in self.areas:
internal_connections.append(row[0])
elif row[1]['from'] in self.areas or row[1]['to'] in self.areas:
external_connections.append(row[0])
self.xtrans_int = nordpool_capacities.loc[internal_connections,:]
self.xtrans_ext = nordpool_capacities.loc[external_connections,:]
self.xtrans_int['c1_change'] = 0
self.xtrans_int['c2_change'] = 0
self.xtrans_ext['c1_change'] = 0
self.xtrans_ext['c2_change'] = 0
# add future transmission capacity
if not self.opt_exchange_cap_year is None:
#update_transfer_capacity(self,year=self.opt_exchange_cap_year)
xtrans_int_new,xtrans_ext_new = self.update_transfer_capacity(year=self.opt_exchange_cap_year)
# Note: c1_change, c2_change are treated as firm capacities when using variable capacity limits
self.xtrans_int = xtrans_int_new
self.xtrans_ext = xtrans_ext_new
# internal connections
self.nXint = self.xtrans_int.__len__()
self.idx_xint = range(1,self.nXint+1)
self.xtrans_int.index = self.idx_xint
# external connections
self.nXext = self.xtrans_ext.__len__()
self.idx_xext = range(1,self.nXext+1)
self.xtrans_ext.index = self.idx_xext
# internal forward connections for each area
self.xintf = {}
for area in self.areas:
self.xintf[area] = [c for c in range(1,self.nXint+1) if self.xtrans_int.at[c,'from'] == area]
# internal reverse connections for each area
self.xintr = {}
for area in self.areas:
self.xintr[area] = [c for c in range(1,self.nXint+1) if self.xtrans_int.at[c,'to'] == area]
## SETUP EXTERNAL CONNECTIONS ##
# external connections -> parameter
# make sure from area is always internal area
# for row in self.xtrans_ext.iterrows():
# if row[1]['from'] not in self.areas:
# self.xtrans_ext.at[row[1],'from'] = row[1]['to']
# self.xtrans_ext.at[row[1],'to'] = row[1]['from']
# hvdc ramping rates for external hvdc connections
for c in self.xtrans_ext.index:
if self.xtrans_ext.at[c,'to'] == 'DE' and self.xtrans_ext.at[c,'from'] == 'DK1':
self.xtrans_ext.at[c,'ramp'] = 1e6 # = inf
else:
self.xtrans_ext.at[c,'ramp'] = self.opt_hvdc_max_ramp
## SETUPT EXTERNAL CONNECTIONS ##
prt = self.opt_print['setup']
if prt:
print('Setting up external connections')
self.fixed_price_connections = [idx for idx in self.xtrans_ext.index if self.xtrans_ext.at[idx,'to'] in self.opt_set_external_price]
self.fixed_transfer_connections = [idx for idx in self.xtrans_ext.index if self.xtrans_ext.at[idx,'to'] not in self.opt_set_external_price]
# Create sets mapping areas to transfer connections
self.xext_ft = {}
self.xext_fp = {}
for area in self.areas:
self.xext_ft[area] = [c for c in range(1,self.nXext+1) if self.xtrans_ext.at[c,'from'] == area and c in self.fixed_transfer_connections]
self.xext_fp[area] = [c for c in range(1,self.nXext+1) if self.xtrans_ext.at[c,'from'] == area and c in self.fixed_price_connections]
self.area_sep_str = '->' # string separating areas in labels for connections
self.xtrans_ext['label_fw'] = ''
self.xtrans_ext['label_bw'] = ''
for i in self.xtrans_ext.index:
self.xtrans_ext.at[i,'label_fw'] = f"{self.xtrans_ext.at[i,'from']}{self.area_sep_str}{self.xtrans_ext.at[i,'to']}"
self.xtrans_ext.at[i,'label_bw'] = f"{self.xtrans_ext.at[i,'to']}{self.area_sep_str}{self.xtrans_ext.at[i,'from']}"
self.xtrans_int['label_fw'] = ''
self.xtrans_int['label_bw'] = ''
for i in self.xtrans_int.index:
self.xtrans_int.at[i,'label_fw'] = f"{self.xtrans_int.at[i,'from']}{self.area_sep_str}{self.xtrans_int.at[i,'to']}"
self.xtrans_int.at[i,'label_bw'] = f"{self.xtrans_int.at[i,'to']}{self.area_sep_str}{self.xtrans_int.at[i,'from']}"
## HVDC RAMPING ##
# hvdc ramping restrictions - maximum 600 MW/hour, also joint restriction for konti-skan and skagerak
#Konti-Skan SE3 <-> DK1 - int 10
#Skagerrak NO2 <-> DK1 - int 19
#NorNed NO2 <-> NL - ext
#Kontek GER <-> DK2 -ext
#SwePol SE4 <-> PL - ext
#Baltic Cable SE4 <-> GER - ext
#Estlink FI <-> EE - ext
#Storebelt DK1 <-> DK2 - int 18
#NordBalt LT <-> SE4 - ext
#LitPol PL <-> LT - ext
self.combined_hvdc = {
1:[10,19],
2:[10],
3:[19],
4:[18],
}
def update_transfer_capacity(self,year=2035,tc_table=new_trans_cap):
""" Update transfer capacity using future expansion plans, considering
all new capacity until given year """
xtrans_int = pd.DataFrame(data=0,index=self.xtrans_int.index,columns=['from','to','c1','c2','c1_change','c2_change'])
xtrans_ext = pd.DataFrame(data=0,index=self.xtrans_ext.index,columns=['from','to','c1','c2','c1_change','c2_change'])
xtrans_int.loc[:,['from','to','c1','c2']] = self.xtrans_int.loc[:,['from','to','c1','c2']]
xtrans_ext.loc[:,['from','to','c1','c2']] = self.xtrans_ext.loc[:,['from','to','c1','c2']]
# drop ramp rate column (shouldn't exist unless function is called after setup)
if 'ramp' in xtrans_ext.columns:
xtrans_ext = xtrans_ext.drop(columns=['ramp'])
for cidx in tc_table.index:
if tc_table.at[cidx,'year'] <= year:
# add capacity
# check if it's external or internal connection
if tc_table.at[cidx,'from'] in self.areas and tc_table.at[cidx,'to'] in self.areas:
# internal connection
# find connection
new_conn = []
found_conn = False
for idx in xtrans_int.index:
if xtrans_int.at[idx,'from'] == tc_table.at[cidx,'from'] and xtrans_int.at[idx,'to'] == tc_table.at[cidx,'to']:
# add to existing connection
xtrans_int.at[idx,'c1_change'] += tc_table.at[cidx,'c1_change']
xtrans_int.at[idx,'c2_change'] += tc_table.at[cidx,'c2_change']
found_conn = True
break
elif xtrans_int.at[idx,'from'] == tc_table.at[cidx,'to'] and xtrans_int.at[idx,'to'] == tc_table.at[cidx,'from']:
# add to existing connection, reverse direction
xtrans_int.at[idx,'c1_change'] += tc_table.at[cidx,'c2_change']
xtrans_int.at[idx,'c2_change'] += tc_table.at[cidx,'c1_change']
found_conn = True
break
if not found_conn:
# add new internal connection
if tc_table.at[cidx,'from'] in self.areas:
new_conn.append([tc_table.at[cidx,'from'],tc_table.at[cidx,'to'],0,0,tc_table.at[cidx,'c1_change'],tc_table.at[cidx,'c2_change'],])
else:
new_conn.append([tc_table.at[cidx,'to'],tc_table.at[cidx,'from'],0,0,tc_table.at[cidx,'c2_change'],tc_table.at[cidx,'c1_change'],])
xtrans_int = xtrans_int.append(pd.DataFrame(new_conn,columns=['from','to','c1','c2','c1_change','c2_change']),ignore_index=True)
else:
# external connection
# find connection
new_conn = []
found_conn = False
for idx in xtrans_ext.index:
if xtrans_ext.at[idx,'from'] == tc_table.at[cidx,'from'] and xtrans_ext.at[idx,'to'] == tc_table.at[cidx,'to']:
# add to existing connection
xtrans_ext.at[idx,'c1_change'] += tc_table.at[cidx,'c1_change']
xtrans_ext.at[idx,'c2_change'] += tc_table.at[cidx,'c2_change']
found_conn = True
break
elif xtrans_ext.at[idx,'from'] == tc_table.at[cidx,'to'] and xtrans_ext.at[idx,'to'] == tc_table.at[cidx,'from']:
# add to existing connection, reverse direction
xtrans_ext.at[idx,'c1_change'] += tc_table.at[cidx,'c2_change']
xtrans_ext.at[idx,'c2_change'] += tc_table.at[cidx,'c1_change']
found_conn = True
break
if not found_conn:
# add new external connection
if tc_table.at[cidx,'from'] in self.areas:
new_conn.append([tc_table.at[cidx,'from'],tc_table.at[cidx,'to'],0,0,tc_table.at[cidx,'c1_change'],tc_table.at[cidx,'c2_change']])
else:
new_conn.append([tc_table.at[cidx,'to'],tc_table.at[cidx,'from'],0,0,tc_table.at[cidx,'c2_change'],tc_table.at[cidx,'c1_change']])
xtrans_ext = xtrans_ext.append(pd.DataFrame(new_conn,columns=['from','to','c1','c2','c1_change','c2_change']),ignore_index=True)
return xtrans_int,xtrans_ext
def setup_inflow(self):
prt = self.opt_print['setup']
if prt:
print('Interpolate_inflow')
self.interpolate_inflow()
if self.opt_use_maf_inflow:
self.add_hourly_maf_inflow()
if self.opt_hydro_daily:
# calculate daily inflow
self.inflow_daily = self.inflow_hourly.resample('D').sum()
def setup_bounds(self,lowres=False):
""" Create time-varying bounds for variables
Bounds are in the form of dicts with set values as indices, for setup of problem
using gurobi (or pyomo)
bound = {(i1,i2,...):value for i1 in set1.. }
"""
prt = self.opt_print['setup']
if lowres:
idx_time = range(self.nPeriods_lr)
timerange = self.timerange_lr
if self.opt_use_var_exchange_cap:
exchange_capacity = self.exchange_capacity_lr
solar = self.solar_lr
wind = self.wind_lr
demand = self.demand_lr
ror_hourly = self.ror_hourly_lr
else:
idx_time = self.idx_time
timerange = self.timerange
if self.opt_use_var_exchange_cap:
exchange_capacity = self.exchange_capacity
solar = self.solar
wind = self.wind
demand = self.demand
ror_hourly = self.ror_hourly
max_lims = {}
min_lims = {}
# limit for external variable connections
max_XEXT = {}
min_XEXT = {}
for c in self.fixed_price_connections:
c1 = self.xtrans_ext.at[c,'label_fw']
c2 = self.xtrans_ext.at[c,'label_bw']
a1 = c1.split(self.area_sep_str)[0]
a2 = c2.split(self.area_sep_str)[0]
if not self.opt_use_var_exchange_cap or c1 not in exchange_capacity.columns \
or (a1,a2) in self.opt_nominal_capacity_connections \
or (a2,a1) in self.opt_nominal_capacity_connections:
if self.opt_use_var_exchange_cap:
if prt:
print(f"Using fixed transfer capacity for {c1}")
for t in idx_time:
max_XEXT[(c,t)] = MWtoGW * self.xtrans_ext.loc[c,['c1','c1_change']].sum()
else:
for t in idx_time:
max_XEXT[(c,t)] = MWtoGW * max(self.opt_min_exchange_cap,
self.xtrans_ext.at[c,'c1_change']+exchange_capacity.at[timerange[t],c1])
if not self.opt_use_var_exchange_cap or c2 not in exchange_capacity.columns:
if self.opt_use_var_exchange_cap:
if prt:
print(f"Using fixed transfer capacity for {c1}")
for t in idx_time:
min_XEXT[(c,t)] = -MWtoGW * self.xtrans_ext.loc[c,['c2','c2_change']].sum()
else:
for t in idx_time:
min_XEXT[(c,t)] = -MWtoGW * max(self.opt_min_exchange_cap,
self.xtrans_ext.at[c,'c2_change']+exchange_capacity.at[timerange[t],c2])
max_lims['XEXT'] = max_XEXT
min_lims['XEXT'] = min_XEXT
# limit for internal connections
max_X1 = {}
max_X2 = {}
for c in self.idx_xint:
c1 = self.xtrans_int.at[c,'label_fw']
c2 = self.xtrans_int.at[c,'label_bw']
a1 = c1.split(self.area_sep_str)[0]
a2 = c2.split(self.area_sep_str)[0]
if not self.opt_use_var_exchange_cap or c1 not in exchange_capacity.columns \
or (a1,a2) in self.opt_nominal_capacity_connections \
or (a2,a1) in self.opt_nominal_capacity_connections:
if self.opt_use_var_exchange_cap:
if prt:
print(f"Using fixed transfer capacity for {c1}")
for t in idx_time:
max_X1[(c,t)] = MWtoGW * self.xtrans_int.loc[c,['c1','c1_change']].sum()
else: # variable capacity
for t in idx_time:
max_X1[(c,t)] = MWtoGW * max(self.opt_min_exchange_cap,
self.xtrans_int.at[c,'c1_change']+exchange_capacity.at[timerange[t],c1])
if not self.opt_use_var_exchange_cap or c2 not in exchange_capacity.columns \
or (a1,a2) in self.opt_nominal_capacity_connections \
or (a2,a1) in self.opt_nominal_capacity_connections:
if self.opt_use_var_exchange_cap:
if prt:
print(f"Using fixed transfer capacity for {c2}")
for t in idx_time:
max_X2[(c,t)] = MWtoGW * self.xtrans_int.loc[c,['c2','c2_change']].sum()
else:
for t in idx_time:
max_X2[(c,t)] = MWtoGW * max(self.opt_min_exchange_cap,
self.xtrans_int.at[c,'c2_change']+exchange_capacity.at[timerange[t],c2])
max_lims['X1'] = max_X1
max_lims['X2'] = max_X2
max_lims['SOLAR'] = {
(a,t):solar.at[timerange[t],a] for a in self.solar_areas for t in idx_time
}
max_lims['WIND'] = {
(a,t):wind.at[timerange[t],a]*self.opt_wind_scale_factor[a]
for a in self.wind_areas for t in idx_time
}
max_LS = {}
for area in self.areas:
for t in idx_time:
max_LS[(area,t)] = demand.at[timerange[t],area]*MWtoGW
max_lims['LS'] = max_LS
max_lims['HROR'] = {(a,t):self.ror_hourly.at[timerange[t],a]*MWtoGW for a in self.ror_areas for t in idx_time}
# generator limits
max_PG = {}
min_PG = {}
for g in self.idx_gen:
gtype = self.gen_data.at[g,'gtype']
area = self.gen_data.at[g,'area']
for t in idx_time:
time = timerange[t]
if gtype == 'Nuclear':
pmax = self.nuclear_hourly.at[time,area]
pmin = self.opt_nucl_min_lvl * pmax
elif gtype == 'Hydro':
pmax = self.gen_data.at[g,'pmax']
pmin = 0
else:
pmax = self.gen_data.at[g,'pmax']
pmin = self.gen_data.at[g,'pmin']
max_PG[(g,t)] = pmax*MWtoGW
min_PG[(g,t)] = pmin*MWtoGW
max_lims['PG'] = max_PG
min_lims['PG'] = min_PG
max_RES = {}
for a in self.hydrores:
if not self.opt_hydro_daily or lowres:
for t in idx_time:
max_RES[(a,t)] = self.reservoir_capacity[a]
else:
for t in self.idx_day:
max_RES[(a,t)] = self.reservoir_capacity[a]
max_lims['RES'] = max_RES
max_lims['PUMP'] = {
(a,t):self.opt_pump_capacity[a]*MWtoGW for a in self.pump_areas for t in self.idx_time
}
max_lims['PRES'] = {
(a,t):self.pump_reservoir[a] for a in self.pump_res_areas for t in self.idx_time
}
# round small values to zero
for name in self.opt_bound_cut:
thrs = self.opt_bound_cut[name]
round = False
count = 0
minmax = name.split('_')[0]
var = name.split('_')[1]
if minmax == 'max' and var in max_lims:
entity = max_lims[var]
round = True
elif minmax == 'min' and var in min_lims:
entity = min_lims[var]
round = True
if round:
for i,val in entity.items():
if val < thrs and val > 0:
entity[i] = 0
count += 1
if prt:
print(f'Rounded {count} values to zero in {name}')
if not lowres:
for name in max_lims:
self.__setattr__(f'max_{name}',max_lims[name])
for name in min_lims:
self.__setattr__(f'min_{name}',min_lims[name])
else:
for name in max_lims:
self.__setattr__(f'max_{name}_LR',max_lims[name])
for name in min_lims:
self.__setattr__(f'min_{name}_LR',min_lims[name])
def get_df_bounds(self):
prt = self.opt_print['postprocess']
"""
For variables in vars_df_up_bound also make data-frames with upper bounds, useful for doing calculations
e.g. up_SOLAR = pd.DataFrame(index=timerange,columns=solar_areas)
"""
for var in self.vars_df_up_bound:
if self.vars_df_up_bound[var][-1] == 'idx_time' and self.vars_df_up_bound[var].__len__() == 2:
cols = self.__getattribute__(self.vars_df_up_bound[var][0])
df = pd.DataFrame([[self.__getattribute__(f'max_{var}')[(c,t)] for c in cols] for t in self.idx_time],
columns=cols,index=self.timerange)
self.__setattr__(f'up_{var}',df)
else:
print(f'Get upper bound for variable {var} not implemented!')
def get_results_from_child(self):
""" Get result variables, duals, and bounds from optimization problem """
prt = self.opt_print['postprocess']
self.res_obj = self.cm.get_objective_value()
# read results into dataframes
if prt:
print('Reading results into Panda data frames')
for v in self.pp_opt.get_vars:
entity = self.cm.get_variable(f'var_{v}')
# convert to date index
if self.opt_hydro_daily and v in self.pp_opt.daily_vars:
entity.index = self.daysrange
else:
entity.index = self.timerange
setattr(self,f'res_{v}',entity)
# increment time index of instantaneous variables
for var in [v for v in self.pp_opt.inst_vars if v in self.pp_opt.get_vars]:
entity = getattr(self,f'res_{var}')
if self.opt_hydro_daily and var in self.pp_opt.daily_vars:
entity.index += datetime.timedelta(days=1)
else:
entity.index += datetime.timedelta(hours=1)
# get dual variables
if prt:
print('Getting dual variables')
for v in self.pp_opt.get_duals:
constr = f'constr_{v}'
if hasattr(self.cm,constr):
entity = self.cm.get_dual(constr)
# convert to date index
if v not in ['FIX_RESERVOIR']:
if entity.index.__len__() == self.nPeriods:
entity.index = self.timerange
elif entity.index.__len__() == self.nDays:
entity.index = self.daysrange
setattr(self,f'dual_{constr}',entity)
def get_rmse_data(self):
prt = self.opt_print['postprocess']
# get data used for rmse calculations
# production data
starttime = self.starttime
endtime = (str_to_date(self.endtime)+datetime.timedelta(hours=-1)).strftime('%Y%m%d:%H')
if self.fopt_plots['transfer_internal'] or self.fopt_plots['transfer_external'] or self.fopt_calc_rmse['transfer']:
# get transfer data, for internal and external variable connections
self.df_exchange_rmse = self.exchange_db.select_flow_data(
connections=[self.xtrans_ext.at[i,'label_fw'] for i in self.fixed_price_connections] \
+ [f"{self.xtrans_int.at[i,'from']}{self.area_sep_str}{self.xtrans_int.at[i,'to']}"
for i in self.xtrans_int.index],
starttime=starttime,
endtime=endtime,
table=self.fopt_rmse_transfer_data_type,
cet_time=True)
if self.fopt_calc_rmse['price'] or self.fopt_plots['price']:
# get interal price data
self.df_price_internal = self.price_db.select_price_data(starttime=starttime,
endtime=endtime,
cet_time=True,
areas=self.areas)
self.impute_values(['df_price_internal'],limit=self.opt_impute_limit,prt=prt)
def post_process_calculations(self):
prt = self.opt_print['postprocess']
## CALCULATE CURTAILMENT ##
for v in self.pp_opt.get_cur_vars:
df = self.__getattribute__(f'up_{v}') - self.__getattribute__(f'res_{v}')
self.__setattr__(f'res_cur_{v}',df)
## CALCULATE DEMAND ##
self.res_D = self.__getattribute__(f'up_LS') - self.__getattribute__('res_LS')
## CALCULATE COSTS ##
# cost of load shedding
self.res_loadshed_cost = MWtoGW*self.res_LS.sum().sum()
# cost of thermal generation
self.res_thermal_cost = MWtoGW*sum( sum( self.gen_data.at[g,'c2']*self.res_PG.at[t,g]**2/MWtoGW + self.gen_data.at[g,'c1']*self.res_PG.at[t,g] for t in self.timerange) for g in self.idx_cost_gen)
# net sales of power
self.res_exp_rev = MWtoGW*sum( sum(self.price_external.at[self.timerange[t],self.xtrans_ext.at[x,'to']]*self.res_XEXT.at[self.timerange[t],x] for t in self.idx_time) for x in self.fixed_price_connections)
# value of stored water
self.res_water_value = MWtoGW*sum( self.hydro_data.at[h,'waterval']*self.res_RES.at[self.res_RES.index[-1],h] for h in self.hydrores )
## CALCULATE GENERATION ##
self.res_gen = {}
for area in self.areas:
self.res_gen[area] = pd.DataFrame(index=self.timerange,columns=self.generators_def[area])
for gtype in self.generators_def[area]:
self.res_gen[area].loc[:,gtype] = self.res_PG.loc[:,
[gidx for gidx in self.idx_gen if
self.gen_data.at[gidx,'area']==area and
self.gen_data.at[gidx,'gtype'] == gtype]].sum(axis=1)
# get wind production
for area in self.res_WIND.columns:
self.res_gen[area].loc[:,'Wind'] = self.res_WIND.loc[:,area]
# get solar production
for area in self.res_SOLAR.columns:
self.res_gen[area].loc[:,'Solar'] = self.res_SOLAR.loc[:,area]
# get hydro ror
for area in self.res_HROR.columns:
self.res_gen[area].loc[:,'HROR'] = self.res_HROR.loc[:,area]
for area in self.pump_res_areas:
self.res_gen[area].loc[:,'REL'] = self.res_REL.loc[:,area]
## CALCULATE TRANSFERS ##
# get internal transmissions
self.res_xint = pd.DataFrame(dtype=float,index=self.timerange,columns=self.idx_xint)
for conn in self.idx_xint:
self.res_xint[conn] = self.res_X1.loc[:,conn] - self.res_X2.loc[:,conn]
# get external transmissions
self.res_xext = {}
for conn in self.idx_xext:
self.res_xext[conn] = pd.Series(index=self.timerange)
if conn in self.fixed_transfer_connections:
for t in self.idx_time:
self.res_xext[conn].at[self.timerange[t]] = MWtoGW * \
self.exchange.at[self.timerange[t],self.xtrans_ext.at[conn,'label_fw']]
else:
self.res_xext[conn] = self.res_XEXT.loc[:,conn]
def goto1():
pass
# get net exports for each area
self.res_exp = {}
for area in self.areas:
self.res_exp[area] = pd.DataFrame(0.0,index=self.timerange,columns=['Exports int','Exports ext'])
for area in self.areas:
for conn in self.xext_ft[area]:
self.res_exp[area].loc[:,'Exports ext'] = self.res_exp[area].loc[:,'Exports ext'] + self.res_xext[conn]
for conn in self.xext_fp[area]:
self.res_exp[area].loc[:,'Exports ext'] = self.res_exp[area].loc[:,'Exports ext'] + self.res_xext[conn]
# exports to internal regions
for conn in self.xintf[area]:
self.res_exp[area].loc[:,'Exports int'] = self.res_exp[area].loc[:,'Exports int'] \
+ self.res_X1.loc[:,conn] - (1-self.opt_loss)*self.res_X2.loc[:,conn]
# net export = export - (1-loss)*import
for conn in self.xintr[area]:
self.res_exp[area].loc[:,'Exports int'] = self.res_exp[area].loc[:,'Exports int'] \
+ self.res_X2.loc[:,conn] - (1-self.opt_loss)*self.res_X1.loc[:,conn]
## AGGREGATE GENERATION BY COUNTRIES
for c in self.multi_area_countries:
columns = []
all_columns = ['Nuclear','Thermal','Hydro','Wind','Solar','HROR','REL']
#col_idx = []
# get columns for this country
for area in self.areas:
if c in area:
for gtype in self.res_gen[area].columns:
if gtype not in columns:
columns.append(gtype)
self.res_gen[c] = pd.DataFrame(0.0,index=self.timerange,columns=[c for c in all_columns if c in columns])
for area in self.areas:
if c in area:
for gtype in self.res_gen[area].columns:
self.res_gen[c].loc[:,gtype] = self.res_gen[c].loc[:,gtype] + self.res_gen[area].loc[:,gtype]
# AGGREGATE EXPORTS
for c in self.multi_area_countries:
self.res_exp[c] = pd.DataFrame(0,index = self.timerange,columns=['Exports ext','Exports int'])
for area in self.areas:
if c in area:
self.res_exp[c].loc[:,'Exports int'] = self.res_exp[c].loc[:,'Exports int'] + self.res_exp[area].loc[:,'Exports int']
self.res_exp[c].loc[:,'Exports ext'] = self.res_exp[c].loc[:,'Exports ext'] + self.res_exp[area].loc[:,'Exports ext']
## TOTAL NET EXPORTS PER AREA (INCLUDING EXTERNAL REGIONS)
# get total transfer on each connection
self.res_xext_tot = pd.Series(index=self.xtrans_ext.index)
self.res_xint_tot = pd.Series(index=self.xtrans_int.index)
for idx in self.res_xext_tot.index:
self.res_xext_tot.at[idx] = self.res_xext[idx].sum()
for idx in self.res_xint_tot.index:
self.res_xint_tot.at[idx] = self.res_xint[idx].sum()
# for each area, internal and external, get net exports
ext_areas = list(set(self.xtrans_ext.loc[:,'to']))
self.res_net_exports = pd.Series(0,index = self.areas + ext_areas,dtype=float)
# loop over all connections
for idx in self.res_xint_tot.index:
self.res_net_exports.at[ self.xtrans_int.at[idx,'from'] ] = self.res_net_exports.at[ self.xtrans_int.at[idx,'from'] ] + self.res_xint_tot.at[idx]
self.res_net_exports.at[ self.xtrans_int.at[idx,'to'] ] = self.res_net_exports.at[ self.xtrans_int.at[idx,'to'] ] - self.res_xint_tot.at[idx]
for idx in self.res_xext_tot.index:
self.res_net_exports.at[ self.xtrans_ext.at[idx,'from'] ] = self.res_net_exports.at[ self.xtrans_ext.at[idx,'from'] ] + self.res_xext_tot.at[idx]
self.res_net_exports.at[ self.xtrans_ext.at[idx,'to'] ] = self.res_net_exports.at[ self.xtrans_ext.at[idx,'to'] ] - self.res_xext_tot.at[idx]
## RESIDUALS ##
# check production = demand + exports
for area in self.areas:
if area in self.pump_areas:
self.res_residuals[area] = max(np.abs(self.res_gen[area].sum(axis=1) - self.res_D.loc[:,area] - self.res_exp[area].sum(axis=1) \
- self.res_PUMP[area]))
else:
self.res_residuals[area] = max(np.abs(self.res_gen[area].sum(axis=1) - self.res_D.loc[:,area] - self.res_exp[area].sum(axis=1)))
## CALCULATE LOSSES AS FRACTION OF INTERNAL GENERATION ##
self.res_losses = 100 * self.opt_loss * (self.res_X1.sum(axis=1) + self.res_X2.sum(axis=1)) / (self.res_PG.sum(axis=1) + self.res_WIND.sum(axis=1))
## CALCULATE KINETIC ENERGY ##
self.res_ekin = pd.DataFrame(dtype=float,data=0,index=self.timerange,columns=['model','data'])
for a in self.syncareas:
for g in self.res_gen[a]:
if g in ['Thermal','Hydro','Nuclear']: # add inertia contribution
self.res_ekin.loc[:,'model'] += self.res_gen[a].loc[:,g] * \
self.opt_inertia_constants[self.area_to_country[a]][g] / \
self.opt_inertia_pf[self.area_to_country[a]][g] / \
self.opt_inertia_cf[self.area_to_country[a]][g]
self.res_ekin.loc[:,'data'] += self.entsoe_data[a].loc[:,g] * MWtoGW * \
self.opt_inertia_constants[self.area_to_country[a]][g] / \
self.opt_inertia_pf[self.area_to_country[a]][g] / \
self.opt_inertia_cf[self.area_to_country[a]][g]
# add contribution from run of river hydro
if a in self.ror_areas:
self.res_ekin.loc[:,'model'] += self.res_gen[a].loc[:,'HROR'] * \
self.opt_inertia_constants[self.area_to_country[a]]['Hydro'] / \
self.opt_inertia_pf[self.area_to_country[a]]['Hydro'] / \
self.opt_inertia_cf[self.area_to_country[a]]['Hydro']
# add contribution from pumped hydro
if a in self.pump_res_areas:
self.res_ekin.loc[:,'model'] += self.res_gen[a].loc[:,'REL'] * \
self.opt_inertia_constants[self.area_to_country[a]]['Hydro'] / \
self.opt_inertia_pf[self.area_to_country[a]]['Hydro'] / \
self.opt_inertia_cf[self.area_to_country[a]]['Hydro']
#### CURTAILMENT STATISTICS ####
curstat_cols = ['GWh','%','#','avg len','avg GW','avg %']
## WIND ##
self.wcur_tot = curtailment_statistics(self.res_cur_WIND.sum(axis=1),self.up_WIND.sum(axis=1),curstat_cols)
self.wcur_pa = pd.DataFrame(index=self.wind_areas,columns=curstat_cols)
self.wcur_pc = pd.DataFrame(index=self.opt_countries,columns=curstat_cols)
for a in self.wcur_pa.index:
self.wcur_pa.loc[a,:] = curtailment_statistics(self.res_cur_WIND[a],self.up_WIND[a],curstat_cols)
for c in self.wcur_pc.index:
al = [a for a in self.country_to_areas[c] if a in self.wind_areas]
self.wcur_pc.loc[c,:] = curtailment_statistics( \
self.res_cur_WIND.loc[:,al].sum(axis=1), \
self.up_WIND.loc[:,al].sum(axis=1), \
curstat_cols)
# SOLAR ##
self.scur_tot = curtailment_statistics(self.res_cur_SOLAR.sum(axis=1),self.up_SOLAR.sum(axis=1),curstat_cols)
self.scur_pa = pd.DataFrame(index=self.solar_areas,columns=curstat_cols)
self.scur_pc = pd.DataFrame(index=self.opt_countries,columns=curstat_cols)
for a in self.scur_pa.index:
self.scur_pa.loc[a,:] = curtailment_statistics(self.res_cur_SOLAR[a],self.up_SOLAR[a],curstat_cols)
for c in self.scur_pc.index:
al = [a for a in self.country_to_areas[c] if a in self.solar_areas]
self.scur_pc.loc[c,:] = curtailment_statistics(
self.res_cur_SOLAR.loc[:,al].sum(axis=1),
self.up_SOLAR.loc[:,al].sum(axis=1),
curstat_cols)
## TOTAL ## # NOTE: SOLAR HAS HIGHER PRIORITY -> NO SOLAR CURTAILMENT
stat_thrs = 1e-4
ls = ['wcur_pa','wcur_pc','wcur_tot','scur_pa','scur_pc','scur_tot']
for name in ls:
entity = self.__getattribute__(name)
if type(entity) is pd.core.frame.DataFrame:
entity[entity < stat_thrs] = 0
# Compute some extra curtailment statistics
# share of curtailment per area
self.res_cur_per_area = pd.Series(index=self.wind_areas)
for a in self.res_cur_per_area.index:
self.res_cur_per_area.at[a] = 100 * self.res_cur_WIND[a].sum() / self.wcur_tot.at['GWh']
# share of curtailment per month
self.res_cur_per_month = pd.Series(index=range(1,13))
for month in self.res_cur_per_month.index:
self.res_cur_per_month.at[month] = 100 * self.res_cur_WIND.loc[[d for d in self.timerange if d.month == month],:].sum().sum() / self.wcur_tot.at['GWh']
# share of curtailment per hour
self.res_cur_per_hour = pd.Series(index=range(24))
for hour in self.res_cur_per_hour.index:
self.res_cur_per_hour.at[hour] = 100 * self.res_cur_WIND.loc[[h for h in self.timerange if h.hour == hour]].sum().sum() / self.wcur_tot.at['GWh']
## CALCULATE PRICES ##
# for each price area: calculate total demand + net exports
# check which price is required to produce corresponding amount in this area
# put watervalues into hydro cost data
for g in self.gen_data.index:
if self.gen_data.at[g,'gtype'] == 'Hydro':
self.gen_data.at[g,'c2'] = 0
# get watervalues from duals
if hasattr(self,'dual_constr_FIX_RESERVOIR'):
self.gen_data.at[g,'c1'] = self.dual_constr_FIX_RESERVOIR.at[self.gen_data.at[g,'area']]/MWtoGW
# make supply curve for each region
# Currently not used. May be used for comparing model supply curves to Nordpool price curves
self.supply_curves = {}
for area in self.areas:
self.supply_curves[area] = SupplyCurve(gens=self.gen_data.loc[[g for g in self.gen_data.index if self.gen_data.at[g,'area'] == area],:])
self.supply_curves['Tot'] = SupplyCurve(gens=self.gen_data)
def add_hourly_maf_inflow(self):
prt = self.opt_print['setup']
for idx,date in enumerate(self.daysrange_weather_year):
for a in self.inflow_daily_maf.columns:
aidx = self.hydrores.index(a)
self.inflow_hourly.iloc[idx*24:(idx+1)*24,aidx] += self.inflow_daily_maf.at[date,a] / 24
def copy_data_to_model_year(self,df):
"""
Given a dataframe with data for the same timerange but a different year compared to the model, make a new
dataframe with the correct timerange. Note that it may be necessary to remove data for a leap day or add data
for a leap day, if the the dataframe is for a leap year and the model is not, or vice versa
Only works for data with hourly time resolution
"""
# copy solar data into current year, for those hours which exist in both weather and model timerange (may be different due to leap years)
df2 = pd.DataFrame(dtype=float,index=self.timerange_p1,columns=df.columns)
self.idx_tups = []
for t in self.timerange_p1:
try: # note: trying to create a datetime object for feb 29 in year which is not leap year will give error
tt = datetime.datetime(t.year+self.weather_year_diff,t.month,t.day,t.hour)
self.idx_tups.append((tt,t))
except ValueError:
pass
#
# self.idx_tups = [(wt,mt) for wt,mt in [(datetime.datetime(t.year+self.weather_year_diff,t.month,t.day,t.hour),t) for t in self.timerange_p1]
# if wt in df.index]
self.weather_idx = [t[0] for t in self.idx_tups]
self.model_idx = [t[1] for t in self.idx_tups]
df2.loc[self.model_idx,:] = np.array(df.loc[self.weather_idx,:])
# fill possible gaps due to leap year in model data but not in weather data
dfnan = df2.isna().sum(axis=1)
leap_days = [d for d in self.daysrange if d.month == 2 and d.day == 29]
for ld in leap_days:
start_idx = list(self.timerange).index(datetime.datetime(ld.year,ld.month,ld.day))
if dfnan.iat[start_idx] == df2.shape[1]: # all missing, fill nan values
if start_idx > 23: # fill with previous day
df2.iloc[start_idx:start_idx+24,:] = np.array(df2.iloc[start_idx-24:start_idx,:])
else: # fill with next day
df2.iloc[start_idx:start_idx+24,:] = np.array(df2.iloc[start_idx+24:start_idx+48,:])
return df2
def impute_capacity_values(self):
prt = self.opt_print['setup']
#%% impute exchange capacities with fixed values
nnan = self.exchange_capacity.isna().sum()
for c in self.exchange_capacity.columns:
if nnan.at[c]:
if nnan.at[c] > self.nPeriods // 4:
if prt:
print(f'Imputing {nnan.at[c]} missing capacity values for {c} with nominal values')
a1 = c.split(self.area_sep_str)[0]
a2 = c.split(self.area_sep_str)[1]
# find row in exchange capacity table
for i,s in nordpool_capacities.iterrows():
if s['from'] == a1 and s['to'] == a2:
cap = s['c1']
break
elif s['to'] == a1 and s['from'] == a2:
cap = s['c2']
break
self.exchange_capacity.loc[self.exchange_capacity[c].isna(),c] = cap
def setup_reservoir_values(self):
prt = self.opt_print['setup']
# get starting value and end value for reservoir content
dates = [str_to_date(self.starttime),str_to_date(self.endtime)]
self.reservoir_fix = pd.DataFrame(dtype=float,index=dates,columns=self.hydrores)
reservoir_tmp = interp_time(dates,self.reservoir)
if self.opt_reservoir_data_normalized:
for a in self.reservoir_fix.columns:
if a in reservoir_tmp.columns:
self.reservoir_fix.loc[dates,a] = reservoir_tmp.loc[dates,a]*self.reservoir_capacity[a]
else:
self.reservoir_fix.loc[dates,reservoir_tmp.columns] = GWtoTW*reservoir_tmp.loc[dates,reservoir_tmp.columns]
for a in self.hydrores:
if np.isnan(self.reservoir_fix.at[dates[0],a]):
if prt:
print(f'Using default reservoir filling rates for {a}')
self.reservoir_fix.at[dates[0],a] = self.opt_reservoir_start_fill*self.reservoir_capacity[a]
if np.isnan(self.reservoir_fix.at[dates[1],a]):
self.reservoir_fix.at[dates[1],a] = self.opt_reservoir_end_fill*self.reservoir_capacity[a]
# interpolate reservoir values for initialization of variables
if self.opt_hydro_daily:
tidx = self.daysrange_p1
else:
tidx = self.timerange_p1
self.reservoir_interp = interp_time(tidx,self.reservoir_fix)
def interpolate_inflow(self):
pass
prt = self.opt_print['setup']
## INTERPOLATE RESERVOIR INFLOW
if prt:
print('Interpolate weekly reservoir inflow')
self.inflow_hourly_tmp = interpolate_weekly_values(self.inflow,method=self.opt_inflow_interp)
# copy data to simulated year
self.inflow_hourly = pd.DataFrame(dtype=float,columns=self.hydrores,index=self.timerange)
icols = [i for i,c in enumerate(self.hydrores) if c in self.inflow_hourly_tmp.columns]
self.inflow_hourly.loc[:,self.inflow_hourly_tmp.columns] = \
np.array(self.inflow_hourly_tmp.iloc[self.inflow_offset:self.inflow_offset+self.nPeriods,icols])
self.inflow_hourly[self.inflow_hourly < 0] = 0 # replace negative values
# adding constant inflow, can be done after interpolation
for a in self.hydrores:
if a not in self.inflow_hourly_tmp.columns:
if a in self.opt_default_inflow_area:
self.inflow_hourly[a] = self.opt_default_inflow_area[a] / 168
else:
self.inflow_hourly[a] = self.opt_default_inflow / 168
def impute_values(self,data_series=['demand'],limit=20,prt=True):
# prt = self.opt_print['setup']
""" Replace missing values in data by linear interpolation """
for name in data_series:
if name in self.opt_impute_constant:
constant = self.opt_impute_constant[name]
else:
constant = None
entity = self.__getattribute__(name)
entity.interpolate(method='linear',inplace=True,limit=limit)
entity.fillna(method='bfill',limit=2,inplace=True)
entity.fillna(method='ffill',limit=2,inplace=True)
if constant is not None:
# fill all remaining nans with constant
nnan = entity.isna().sum().sum()
if nnan > 0:
if prt:
print(f'Imputing {nnan} constant values in {name}')
entity.fillna(constant,inplace=True)
else: # count missing values
nnan = entity.isna().sum().sum()
if nnan:
if prt:
print(f'Impute incomplete: {nnan} remaining missing values in {name}')
def print_rmse(self):
rmse_area = self.res_rmse_area.copy()
rmse_area.loc['Avg',:] = rmse_area.mean()
rmse_area_rel = self.res_rmse_area.copy()
norm_cols = ['Prod','Hydro','Thermal','Nuclear']
rmse_area_rel.loc[:,norm_cols] = rmse_area_rel.loc[:,norm_cols] / self.res_rmse_area_norm.loc[:,norm_cols]
rmse_area_rel.loc['Avg',:] = rmse_area_rel.mean()
rmse_conn = self.res_rmse_intcon.copy()
rmse_conn_rel = self.res_rmse_intcon.copy()
rmse_conn_ext = self.res_rmse_extcon.copy()
rmse_conn_ext_rel = self.res_rmse_extcon.copy()
rmse_conn_rel['RMSE'] = rmse_conn_rel['RMSE'] / self.res_rmse_intcon_norm
rmse_conn_ext_rel['RMSE'] = rmse_conn_ext_rel['RMSE'] / self.res_rmse_extcon_norm
rmse_conn.at['Avg', 'RMSE'] = np.mean(rmse_conn['RMSE'])
rmse_conn_ext.at['Avg', 'RMSE'] = np.mean(rmse_conn_ext['RMSE'])
rmse_conn_rel.at['Avg', 'RMSE'] = np.mean(rmse_conn_rel['RMSE'])
rmse_conn_ext_rel.at['Avg', 'RMSE'] = np.mean(rmse_conn_ext_rel['RMSE'])
#%%
na_rep = '-'
area_formatters = {
'Hydro':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Thermal':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Nuclear':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Prod':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Price':lambda x:f'{x:0.1f}' if not np.isnan(x) else na_rep,
}
area_formatters_rel = {
'Hydro':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Thermal':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Nuclear':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Prod':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
'Price':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
}
conn_formatters = {
'From':lambda x:x if type(x) is str else na_rep,
'To':lambda x:x if type(x) is str else na_rep,
'RMSE':lambda x:f'{x:0.3f}' if not np.isnan(x) else na_rep,
}
#%% print tables to text file
with open(self.res_path/f'errors.txt','wt') as f:
with redirect_stdout(f):
print(self.name + '\n')
print(f'-- AREA {self.opt_err_labl} --')
print(rmse_area)
print('\n')
print(f'-- AREA RELATIVE {self.opt_err_labl} --')
print(rmse_area_rel)
print('\n')
print(f'-- INTERNAL CONNECTION {self.opt_err_labl} --')
print(rmse_conn)
print('\n')
print(f'-- INTERNAL RELATIVE CONNECTION {self.opt_err_labl} --')
print(rmse_conn_rel)
print('\n')
print(f'-- EXTERNAL CONNECTION {self.opt_err_labl} --')
print(rmse_conn_ext)
print('\n')
print(f'-- EXTERNAL RELATIVE CONNECTION {self.opt_err_labl} --')
print(rmse_conn_ext_rel)
#%% print tables to latex file
with open(self.res_path/f'errors.tex','wt') as f:
with redirect_stdout(f):
print(self.name+'\n')
print(f'-- AREA {self.opt_err_labl} --')
rmse_area.to_latex(f,formatters=area_formatters,
header=['Prod. [GWh]','Hydro [GWh]','Thermal [GWh]','Nuclear [GWh]','Price [EUR/MWh]'])
print('\n')
print(f'-- AREA RELATIVE {self.opt_err_labl} --')
rmse_area_rel.to_latex(f,formatters=area_formatters_rel,
header=['Prod.','Hydro','Thermal','Nuclear','Price [EUR]'])
print('\n')
print(f'-- INTERNAL CONNECTION {self.opt_err_labl} --')
rmse_conn.to_latex(f,header=['From','To','RMSE [GWh]'],formatters=conn_formatters)
print('\n')
print(f'-- INTERNAL RELATIVE CONNECTION {self.opt_err_labl} --')
rmse_conn_rel.to_latex(f,header=['From','To','RMSE'],formatters=conn_formatters)
print('\n')
print(f'-- EXTERNAL CONNECTION {self.opt_err_labl} --')
rmse_conn_ext.to_latex(f,header=['From','To','RMSE [GWh]'],formatters=conn_formatters)
print('\n')
print(f'-- EXTERNAL RELATIVE CONNECTION {self.opt_err_labl} --')
rmse_conn_ext_rel.to_latex(f,header=['From','To','RMSE'],formatters=conn_formatters)
def print_hydro_table(self):
# path = 'D:/NordicModel/InputData/'
filename = 'hydro_table.txt'
df = pd.DataFrame(dtype=float,columns=['hydro','res','respump','pump','ror'],index=self.hydrores)
for a in self.hydrores:
gidx = next(g for g in self.idx_gen if self.gen_data.at[g,'area'] == a and self.gen_data.at[g,'gtype'] == 'Hydro')
df.at[a,'hydro'] = self.gen_data.at[gidx,'pmax']
if a in self.opt_pump_reservoir:
df.at[a,'respump'] = self.opt_pump_reservoir[a]
if a in self.opt_reservoir_capacity:
df.at[a,'res'] = self.opt_reservoir_capacity[a]
if a in self.ror_areas:
df.at[a,'ror'] = self.opt_ror_fraction[a]
if a in self.opt_pump_capacity:
df.at[a,'pump'] = self.opt_pump_capacity[a]
na_rep = '-'
headers = ['Hydro max (MWh)', 'Reservoir (GWh)','Pump reservoir (GWh)','Pump max (MWh)','ROR share']
with open(Path(self.res_path) / filename,'wt') as f:
df.to_latex(f,header=headers,
formatters={
'hydro':lambda x:str(np.round(x,1)) if not np.isnan(x) else na_rep,
'res':lambda x:str(np.round(x,1)) if not np.isnan(x) else na_rep,
'respump':lambda x:str(np.round(x,1)) if not np.isnan(x) else na_rep,
'pump':lambda x:str(np.round(x,1)) if not np.isnan(x) else na_rep,
'ror':lambda x:str(np.round(x,2)) if not np.isnan(x) else na_rep,
})
def print_renewable_table(self,countries=['SE','DK','NO','FI']):
#%% print wind and solar capacity tables, by area or country
areas = [a for a in all_areas if area_to_country[a] in countries]
if areas is None:
pass
wind_cap_area = pd.DataFrame(dtype=int,index=areas,columns=['onshore','offshore'])
wind_cap_country = pd.DataFrame(0,dtype=int,
index=countries,
columns=['onshore','offshore','solar'])
# pv_cap_area = pd.DataFrame(dtype=int,index=self.solar_areas,columns=['mw'])
# pv_cap_country = pd.DataFrame(0,dtype=int,index=[c for c in self.opt_countries if \
# sum([1 for a in self.solar_areas if a in self.country_to_areas[c]])],columns=['mw'])
for a in areas:
val = self.opt_wind_capacity_onsh[a]
wind_cap_area.at[a,'onshore'] = val
wind_cap_country.at[self.area_to_country[a],'onshore'] += val
val = self.opt_wind_capacity_offsh[a]
wind_cap_area.at[a,'offshore'] = val
wind_cap_country.at[self.area_to_country[a],'offshore'] += val
sval = int(np.round(self.solar_capacity[a]*GWtoMW,-1))
wind_cap_area.at[a,'solar'] = sval
wind_cap_country.at[self.area_to_country[a],'solar'] += sval
#%
wind_cap_area.loc['Tot',:] = wind_cap_area.sum()
wind_cap_country.loc['Tot',:] = wind_cap_country.sum()
# for a in self.solar_areas:
# val = int(self.solar_capacity[a]*GWtoMW)
# pv_cap_area.at[a,'mw'] = val
# pv_cap_country.at[self.area_to_country[a],'mw'] += val
wind_cap_area = wind_cap_area.astype(int)
wind_cap_country = wind_cap_country.astype(int)
with open(self.res_path/f'renewable_capacity.tex','wt') as f:
with redirect_stdout(f):
print('--- RENEWABLE CAPACITY BY AREA ---')
wind_cap_area.to_latex(f)
print('--- RENEWABLE CAPACITY BY COUNTRY ---')
wind_cap_country.to_latex(f)
# print('--- SOLAR CAPACITY BY AREA ---')
# pv_cap_area.to_latex(f)
# print('--- SOLAR CAPACITY BY COUNTRY ---')
# pv_cap_country.to_latex(f)
def get_fig(self):
f = self.f
# f = plt.gcf()
f.clf()
f.set_size_inches(6.4,4.8)
# f.set_size_inches(6.8,4.8)
# f.set_tight_layout(False)
# plt.figure(f)
# plt.tight_layout()
return f
def plot_figures(self):
prt = self.opt_print['postprocess']
self.plot_vals = EmptyObject()
plt.ioff() # don't create new figures
plt.rc('text', usetex=False)
self.f = plt.figure()
# for which categories to use inset
self.plot_vals.plot_inset = {
'Hydro':True,
'Thermal':True,
'Nuclear':False,
}
# settings without inset
self.plot_vals.legend_pos = 'best'
self.plot_vals.simple_plot_size = (6,4)
# settings for inset
self.plot_vals.myFmt = "%m-%d"
if not hasattr(self,'fopt_inset_date'): # default options for old models
self.fopt_inset_date = None
self.fopt_inset_days = 5
if self.fopt_inset_date is None:
self.fopt_inset_date = self.starttime
self.plot_vals.inset_idx = pd.date_range(start=str_to_date(self.fopt_inset_date),
end=min((str_to_date(self.fopt_inset_date) \
+ datetime.timedelta(days=self.fopt_inset_days)),
str_to_date(self.endtime)+datetime.timedelta(hours=-1)),
freq='H')
self.plot_vals.ax2_width = 0.55
# legend position with inset
self.plot_vals.bbox = (0.77,1.02) # horizontal, vertical
self.plot_vals.inset_height_ratio = [1,1.6]
self.plot_vals.leg_loc = (0.77,1.15) # legend position
self.plot_vals.xpad = 0.04 # more space between left edge and start of x-axes
self.plot_vals.colors = {'Hydro':'skyblue',
'Slow':'#ff7f0e',
'Fast':'#d62728',
'Nuclear':'#8c564b',
'Wind':'#2ca02c',
'Thermal':'#ff7f0e',
'Solar':'khaki',
'HROR':'darkorchid',
}
if self.fopt_show_rmse:
self.plot_vals.annstr = f'{self.opt_err_labl}: ' + '{1:.3}\n' + f'N{self.opt_err_labl}: ' + '{0:.3}'
else:
self.plot_vals.annstr = f'N{self.opt_err_labl}: ' + '{0:.3}'
# list of all possible plots, make sure all items are present in fopt_plots, set to False for missing values
all_plots = ['gentype','gentot','gentot_bar','renewables','transfer_internal','transfer_external',
'reservoir','price','losses','load_curtailment','inertia','hydro_duration','wind_curtailment'
]
for f in all_plots:
if f not in self.fopt_plots:
self.fopt_plots[f] = False
self.plot_gentype()
self.plot_gentot()
if self.fopt_plots['renewables'] and not self.fopt_no_plots:
self.plot_renewables()
self.plot_transfer()
self.plot_reservoir()
self.plot_price()
if self.fopt_plots['wind_curtailment'] and not self.fopt_no_plots:
self.plot_wind_curtailment()
self.plot_miscellaneous()
plt.ion()
def plot_gentype(self):
prt = self.opt_print['postprocess']
dummy = True
for area in self.areas:
for gtype in self.generators_def[area]:
if gtype == 'Hydro':
hydro_prod = pd.Series(0.0,index=self.timerange)
for var in ['Hydro','HROR','REL']:
if var in self.res_gen[area]:
hydro_prod += self.res_gen[area][var]
irmse,irmse_rel,norm=err_func(self.entsoe_data[area][gtype]*MWtoGW,hydro_prod)
else:
pass
irmse,irmse_rel,norm=err_func(self.entsoe_data[area][gtype]*MWtoGW,self.res_gen[area][gtype])
self.res_rmse_area.at[area,gtype] = irmse
self.res_rmse_area_norm.at[area,gtype] = norm
if self.fopt_plots['gentype'] and not self.fopt_no_plots:
if prt and dummy:
dummy = False
print('Plot gentype')
############ collect plot data ###########
plot_data = pd.DataFrame(index=self.timerange,columns=['res','ror','model','data','pump','rel','up','lo'])
if gtype == 'Hydro':
plot_data['res'] = self.res_gen[area]['Hydro']
if area in self.ror_areas:
plot_data['ror'] = self.res_gen[area]['HROR']
if area in self.pump_areas:
plot_data['pump'] = -self.res_PUMP[area]
if area in self.pump_res_areas:
plot_data['rel'] = self.res_gen[area]['REL']
plot_data['model'] = hydro_prod
else:
plot_data['model'] = self.res_gen[area][gtype]
plot_data['data'] = self.entsoe_data[area][gtype]*MWtoGW
if gtype == 'Hydro' and area in self.ror_areas:
hgens = [g for g in self.gen_data.index if self.gen_data.at[g,'area'] == area \
and self.gen_data.at[g,'gtype']=='Hydro']
plot_data['up'] = self.gen_data.loc[hgens,'pmax'].sum() * MWtoGW
plot_data['lo'] = self.gen_data.loc[hgens,'pmin'].sum() * MWtoGW
else:
plot_data['up'] = [sum([self.max_PG[(i,t)] for i in self.idx_gen
if self.gen_data.at[i,'area'] == area and self.gen_data.at[i,'gtype'] == gtype])
for t in self.idx_time]
plot_data['lo'] = [sum([self.min_PG[(i,t)] for i in self.idx_gen
if self.gen_data.at[i,'area'] == area and self.gen_data.at[i,'gtype'] == gtype])
for t in self.idx_time]
########### plot figure ############
if self.plot_vals.plot_inset[gtype]: # figure with inset
# get axes
# f = plt.gcf()
# f.clf()
f = self.get_fig()
ax2,ax1 = f.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
# f,(ax2,ax1) = plt.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
pos = ax2.get_position()
ax2.set_position([pos.x0+self.plot_vals.xpad,pos.y0,self.plot_vals.ax2_width,pos.height])
pos = ax1.get_position()
ax1.set_position([pos.x0+self.plot_vals.xpad,pos.y0,pos.width-self.plot_vals.xpad,pos.height])
# main plot
if gtype == 'Hydro':
if area in self.pump_areas and area in self.ror_areas:
if area in self.pump_res_areas:
ptypes = ['res','ror','rel','model','data','pump','up','lo']
pcolors = [colors['Hydro'],colors['HROR'],colors['REL'],'C0','C1',colors['PUMP'],'k','k']
pstyles = ['--',':','--','-','-','-','--','--']
plegends = ['reservoir','run of river','pump release','model','data','pumping']
else:
ptypes = ['res','ror','model','data','pump','up','lo']
pcolors = [colors['Hydro'],colors['HROR'],'C0','C1',colors['PUMP'],'k','k']
pstyles = ['--',':','-','-','-','--','--']
plegends = ['reservoir','run of river','model','data','pumping']
elif area in self.pump_areas: # pumping but no run of river
if area in self.pump_res_areas:
ptypes = ['res','rel','model','data','pump','up','lo']
pcolors = [colors['Hydro'],colors['REL'],'C0','C1',colors['PUMP'],'k','k']
pstyles = ['--','--','-','-','-','--','--']
plegends = ['reservoir','pump release','model','data','pumping']
else: # pumping, no run of river
ptypes = ['model','data','pump','up','lo']
pcolors = ['C0','C1',colors['PUMP'],'k','k']
pstyles = ['-','-','-','--','--']
plegends = ['model','data','pumping']
elif area in self.ror_areas: # only ror
ptypes = ['res','ror','model','data','up','lo']
pcolors = [colors['Hydro'],colors['HROR'],'C0','C1','k','k']
pstyles = ['--',':','-','-','--','--']
plegends = ['reservoir','run of river','model','data']
else: # standard plot, neither pumping or run of river
ptypes = ['model','data','up','lo']
pcolors = ['C0','C1','k','k']
pstyles = ['-','-','--','--']
plegends = ['model','data']
plot_data.loc[:,ptypes].plot(ax=ax1,color=pcolors,style=pstyles)
ax1.legend(plegends,title=self.plot_vals.annstr.format(irmse_rel,irmse),
bbox_to_anchor=self.plot_vals.bbox)
else:
plot_data.loc[:,['model','data','up','lo']].plot(ax=ax1,color=['C0','C1','k','k'],
style=['-','-','--','--'])
ax1.legend(['model','data',],title=self.plot_vals.annstr.format(irmse_rel,irmse),
bbox_to_anchor=self.plot_vals.bbox)
ax1.set_ylabel('GWh')
if self.fopt_use_titles:
f.suptitle('{2}: {1} production {0}'.format(area,gtype,self.name))
ax1.set_zorder(1)
ax1.grid()
# remove line breaks from ticks
compact_xaxis_ticks(f,ax1)
# inset
ax2.plot(plot_data.loc[self.plot_vals.inset_idx,['model','data']])
ax2.grid()
ax2.xaxis.set_major_formatter(mdates.DateFormatter(self.plot_vals.myFmt))
ax2.xaxis.set_major_locator(mdates.DayLocator())
else: # only one figure
f = self.get_fig()
ax = f.subplots()
if gtype == 'Hydro':
plot_data.plot(ax=ax,color=[colors['Hydro'],colors['HROR'],'C0','C1','k','k'],style=['--',':','-','-','--','--'])
plt.legend(['reservoir','run of river','model','data',],title=self.plot_vals.annstr.format(irmse_rel,irmse),loc=self.plot_vals.legend_pos)
else:
plot_data.loc[:,['model','data','up','lo']].plot(ax=ax,color=['C0','C1','k','k'],style=['-','-','--','--'])
plt.legend(['model','data',],title=self.plot_vals.annstr.format(irmse_rel,irmse),loc=self.plot_vals.legend_pos)
plt.grid()
if self.fopt_use_titles:
plt.title('{2}: {1} production {0}'.format(area,gtype,self.name))
plt.ylabel('GWh')
# plt.tight_layout()
# plt.gcf().set_size_inches(self.plot_vals.simple_plot_size[0],self.plot_vals.simple_plot_size[1])
compact_xaxis_ticks(f,ax)
############ save figure ##########
plt.savefig(self.fig_path / 'gen_by_type_{0}_{1}.png'.format(area,gtype))
if self.fopt_eps:
plt.savefig(self.fig_path / 'gen_by_type_{0}_{1}.eps'.format(area,gtype),
dpi=self.fopt_dpi_qual)
if not self.plot_vals.plot_inset[gtype]:
for w in self.fopt_plot_weeks:
t1,t2 = week_to_range(w,int(self.starttime[:4]))
plt.xlim([t1,t2])
plt.savefig(self.fig_path / 'gen_by_type_{0}_{1}_w{2}.png'.format(area,gtype,w))
if self.fopt_eps:
plt.savefig(self.fig_path / 'gen_by_type_{0}_{1}_w{2}.eps'.format(area,gtype,w),
dpi=self.fopt_dpi_qual)
# plt.clf()
def plot_gentot(self):
prt = self.opt_print['postprocess']
dummy = True
## PLOT GENERATION PER AREA ##
for area in self.areas:
irmse,irmse_rel,norm = err_func(self.entsoe_data[area]['Tot']*MWtoGW,self.res_gen[area].sum(axis=1))
self.res_rmse_area.at[area,'Prod'] = irmse
self.res_rmse_area_norm.at[area,'Prod'] = norm
if self.fopt_plots['gentot_bar'] and not self.fopt_no_plots:
if prt and dummy:
dummy = False
print('Plot gentot stacked')
# print(f'Bar {area}')
############ STACKED GENERATION OF DIFFERENT TYPES ############
# ax1 = f.add_axes()
f = self.get_fig()
ax1 = f.subplots()
self.res_gen[area].plot.area(ax=ax1,color = [colors[f] for f in self.res_gen[area].columns])
self.res_D[area].plot(ax=ax1,color='black',label='Demand')
self.res_exp[area].sum(axis=1).plot(ax=ax1,color='black',linestyle='--',label='Exports')
if area in self.pump_areas:
self.res_PUMP[area].plot(ax=ax1,color=colors['PUMP'],label='Pumping')
plt.legend(ax1.legendlabels)
if self.fopt_use_titles:
plt.title('{0}: Production {1}'.format(self.name,area))
plt.grid()
plt.ylabel('GWh')
ylim_padding = 0.5
ax1.set_ylim([min([min(l.get_ydata()) for l in ax1.lines])-ylim_padding,max([max(l.get_ydata()) for l in ax1.lines])+ylim_padding])
plt.savefig(self.fig_path/'gen_area_plot_{0}.png'.format(area))
if self.fopt_eps:
plt.savefig(self.fig_path/'gen_area_plot_{0}.eps'.format(area),dpi=self.fopt_dpi_qual)
for w in self.fopt_plot_weeks:
# find daterange for this week
t1,t2 = week_to_range(w,int(self.starttime[:4]))
plt.xlim([t1,t2])
plt.savefig(self.fig_path/'gen_area_plot_{0}_w{1}.png'.format(area,w))
if self.fopt_eps:
plt.savefig(self.fig_path/'gen_area_plot_{0}_w{1}.eps'.format(area,w))
dummy = True
for area in self.areas:
# f = self.get_fig()
######## TOTAL GENERATION, COMPARE NORDPOOL ##############
if self.fopt_plots['gentot'] and not self.fopt_no_plots:
if prt and dummy:
dummy = False
print('Plot total generation')
plot_data = pd.DataFrame(index=self.timerange,columns=['model','data'])
plot_data['model'] = self.res_gen[area].sum(axis=1)
plot_data['data'] = self.entsoe_data[area]['Tot']*MWtoGW
# get axes
f = self.get_fig()
# f = plt.gcf()
# f.clf()
# print(f'Gentot {area}')
ax2,ax1 = f.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
# f,(ax2,ax1) = plt.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
pos = ax2.get_position()
ax2.set_position([pos.x0+self.plot_vals.xpad,pos.y0,self.plot_vals.ax2_width,pos.height])
pos = ax1.get_position()
ax1.set_position([pos.x0+self.plot_vals.xpad,pos.y0,pos.width-self.plot_vals.xpad,pos.height])
irmse = self.res_rmse_area.at[area,'Prod']
norm = self.res_rmse_area_norm.at[area,'Prod']
irmse_rel = irmse/norm
# main plot
plot_data.plot(ax=ax1)
ax1.legend(['model','data'],title=self.plot_vals.annstr.format(irmse_rel,irmse),
loc=self.plot_vals.leg_loc)
ax1.set_ylabel('GWh')
if self.fopt_use_titles:
f.suptitle('{0}: Total production {1}'.format(self.name,area))
ax1.set_zorder(1)
ax1.grid()
# remove line breaks from ticks
compact_xaxis_ticks(f,ax1)
# inset
ax2.plot(plot_data.loc[self.plot_vals.inset_idx,:])
ax2.grid()
ax2.xaxis.set_major_formatter(mdates.DateFormatter(self.plot_vals.myFmt))
ax2.xaxis.set_major_locator(mdates.DayLocator())
plt.savefig(self.fig_path/f'gentot_{area}.png')
if self.fopt_eps:
plt.savefig(self.fig_path/f'gentot_{area}.eps',dpi=self.fopt_dpi_qual)
## PLOT GENERATION FOR COUNTRIES ##
if self.fopt_plots['gentot_bar'] and not self.fopt_no_plots:
if prt:
print('Plot gentot for countries')
for c in self.multi_area_countries:
# print(f'Bar {c}')
self.get_fig()
ax = f.subplots()
self.res_gen[c].plot.area(ax=ax,color = [colors[f] for f in self.res_gen[c].columns])
plt.plot(ax.lines[0].get_xdata(),self.res_D.loc[:,[col for col in self.demand.columns if c in col]].sum(axis=1),'k')
plt.plot(ax.lines[0].get_xdata(),self.res_exp[c].sum(axis=1),'--k')
plt.legend(ax.legendlabels + ['Demand','Export'])
ylim_padding = 0.5
ax.set_ylim([min([min(l.get_ydata()) for l in ax.lines])-ylim_padding,max([max(l.get_ydata()) for l in ax.lines])+ylim_padding])
plt.grid()
plt.ylabel('GWh/h')
if self.fopt_use_titles:
plt.title('{0}: Production {1}'.format(self.name,c))
plt.savefig(self.fig_path/'gen_area_plot_{0}.png'.format(c))
if self.fopt_eps:
plt.savefig(self.fig_path/'gen_area_plot_{0}.eps'.format(c),dpi=self.fopt_dpi_qual)
for w in self.fopt_plot_weeks:
t1,t2 = week_to_range(w,int(self.starttime[:4]))
plt.xlim([t1,t2])
plt.savefig(self.fig_path/'gen_area_plot_{0}_w{1}.png'.format(c,w))
if self.fopt_eps:
plt.savefig(self.fig_path/'gen_area_plot_{0}_w{1}.eps'.format(c,w),dpi=self.fopt_dpi_qual)
# using the bar plot creates a new figure
# plt.cla()
def plot_renewables(self):
prt = self.opt_print['postprocess']
if prt:
print('Plot renewables')
# plot wind and solar production
wcolor = 'royalblue'
scolor = 'gold'
for area in [a for a in self.areas if a in self.wind_areas or a in self.solar_areas]:
# get axes
f = self.get_fig()
ax2,ax1 = f.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
pos = ax2.get_position()
ax2.set_position([pos.x0,pos.y0,self.plot_vals.ax2_width,pos.height])
if area in self.wind_areas:
self.res_gen[area]['Wind'].plot(ax=ax1,label='Wind',color=wcolor)
if area in self.solar_areas:
self.res_gen[area]['Solar'].plot(ax=ax1,label='Solar',color=scolor)
ax1.legend(bbox_to_anchor=self.plot_vals.bbox)
ax1.set_ylabel('GWh')
if self.fopt_use_titles:
f.suptitle(f'{self.name}: Renewable generation {area}')
ax1.set_zorder(1)
ax1.grid()
# remove line breaks from ticks
compact_xaxis_ticks(f,ax1)
# inset
if area in self.wind_areas:
ax2.plot(self.res_gen[area]['Wind'].loc[self.plot_vals.inset_idx],color=wcolor)
if area in self.solar_areas:
ax2.plot(self.res_gen[area]['Solar'].loc[self.plot_vals.inset_idx],color=scolor)
ax2.grid()
ax2.xaxis.set_major_formatter(mdates.DateFormatter(self.plot_vals.myFmt))
ax2.xaxis.set_major_locator(mdates.DayLocator())
plt.savefig(self.fig_path/f'renewables_{area}.png')
if self.fopt_eps:
plt.savefig(self.fig_path/f'renewables_{area}.eps',dpi=self.fopt_dpi_qual)
def plot_transfer(self):
prt = self.opt_print['postprocess']
dummy = True
# get data
for conn in self.xtrans_int.index:
if self.fopt_plots['transfer_internal'] or self.fopt_calc_rmse['transfer']:
cname = self.xtrans_int.at[conn,'label_fw']
if cname in self.df_exchange_rmse.columns:
irmse,irmse_rel,norm = err_func(self.df_exchange_rmse[cname]*MWtoGW,self.res_xint[conn])
else:
irmse = np.nan
irmse_rel = np.nan
norm = np.nan
self.res_rmse_intcon.at[conn,'RMSE'] = irmse
self.res_rmse_intcon_norm.at[conn] = norm
if self.fopt_plots['transfer_internal'] and not self.fopt_no_plots:
if prt and dummy:
dummy = False
print('Plot transfer')
plot_data = pd.DataFrame(index=self.timerange,columns=['model','data','up','lo'])
plot_data['model'] = self.res_X1[conn]-self.res_X2[conn]
# if not self.xint_to_exchange[conn] is None:
if cname in self.df_exchange_rmse.columns:
plot_data['data'] = self.df_exchange_rmse.loc[:,cname]*MWtoGW
# plot_data['up'] = self.up_X1.loc[:,conn]
plot_data['up'] = [self.max_X1[(conn,t)] for t in self.idx_time]
# plot_data['lo'] = -self.up_X2.loc[:,conn]
plot_data['lo'] = [-self.max_X2[(conn,t)] for t in self.idx_time]
# get axes
# f = plt.gcf()
# f.clf()
f = self.get_fig()
ax2,ax1 = f.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
# f,(ax2,ax1) = plt.subplots(2,1,gridspec_kw={'height_ratios':self.plot_vals.inset_height_ratio})
pos = ax2.get_position()
ax2.set_position([pos.x0+self.plot_vals.xpad,pos.y0,self.plot_vals.ax2_width,pos.height])
pos = ax1.get_position()
ax1.set_position([pos.x0+self.plot_vals.xpad,pos.y0,pos.width-self.plot_vals.xpad,pos.height])
# main plot
plot_data.plot(ax=ax1,color=['C0','C1','k','k'],style=['-','-','--','--'])
if cname in self.df_exchange_rmse.columns:
labels = ['model','data']
else:
labels = ['model']
ax1.legend(labels,title=self.plot_vals.annstr.format(irmse_rel,irmse),bbox_to_anchor=self.plot_vals.bbox)
ax1.set_ylabel('GWh')
if self.fopt_use_titles:
f.suptitle('{2}: Transfer {0} -> {1}'.format(self.xtrans_int.at[conn,'from'],self.xtrans_int.at[conn,'to'],self.name))
ax1.set_zorder(1)
ax1.grid()
# remove line breaks from ticks
compact_xaxis_ticks(f,ax1)
# inset
ax2.plot(plot_data.loc[self.plot_vals.inset_idx,['model','data']])
ax2.grid()
ax2.xaxis.set_major_formatter(mdates.DateFormatter(self.plot_vals.myFmt))
ax2.xaxis.set_major_locator(mdates.DayLocator())
plt.savefig(self.fig_path/'xtrans_{0}-{1}.png'.format(self.xtrans_int.at[conn,'from'],self.xtrans_int.at[conn,'to']))
if self.fopt_eps:
plt.savefig(self.fig_path/'xtrans_{0}-{1}.eps'.format(self.xtrans_int.at[conn,'from'],self.xtrans_int.at[conn,'to']),dpi=self.fopt_dpi_qual)
# external variable connections
for conn in self.fixed_price_connections:
if self.fopt_plots['transfer_external'] or self.fopt_calc_rmse['transfer']:
cname = self.xtrans_ext.at[conn,'label_fw']
irmse,irmse_rel,norm=err_func(self.df_exchange_rmse[cname]*MWtoGW,self.res_XEXT[conn])
self.res_rmse_extcon.at[conn,'RMSE'] = irmse
self.res_rmse_extcon_norm.at[conn] = norm
if self.fopt_plots['transfer_external'] and not self.fopt_no_plots:
f = self.get_fig()
ax = self.res_XEXT[conn].plot()
pos = ax.get_position()
ax.set_position([pos.x0+self.plot_vals.xpad,pos.y0,pos.width-self.plot_vals.xpad,pos.height])
plt.grid()
plt.ylabel('GWh')
# plot nordpool values
# if not self.xext_to_exchange[conn] is None:
plt.plot(ax.lines[0].get_xdata(),self.df_exchange_rmse.loc[:,self.xtrans_ext.at[conn,'label_fw']]*MWtoGW)
# plot limits
# plt.plot([ax.lines[0].get_xdata()[0], ax.lines[0].get_xdata()[-1]],[self.xtrans_ext.at[conn,'c1']*MWtoGW,self.xtrans_ext.at[conn,'c1']*MWtoGW],color='black',linestyle='--')
# plt.plot([ax.lines[0].get_xdata()[0], ax.lines[0].get_xdata()[-1]],[-self.xtrans_ext.at[conn,'c2']*MWtoGW,-self.xtrans_ext.at[conn,'c2']*MWtoGW],color='black',linestyle='--')
# plt.plot(ax.lines[0].get_xdata(),self.lo_XEXT[conn],color='black',linestyle='--')
# plt.plot(ax.lines[0].get_xdata(),self.up_XEXT[conn],color='black',linestyle='--')
plt.plot(ax.lines[0].get_xdata(),[self.min_XEXT[conn,t] for t in self.idx_time],color='black',linestyle='--')
plt.plot(ax.lines[0].get_xdata(),[self.max_XEXT[conn,t] for t in self.idx_time],color='black',linestyle='--')
# if not self.xext_to_exchange[conn] is None:
plt.legend(['Model','Nordpool data','Max','Min'],title=self.plot_vals.annstr.format(irmse_rel,irmse),loc=self.plot_vals.legend_pos)
# else:
# plt.legend(['Model','Max','Min'],loc=self.plot_vals.legend_pos)
if self.fopt_use_titles:
plt.title('{2}: {0} -> {1}'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],self.name))
plt.savefig(self.fig_path/'xtrans_{0}-{1}.png'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to']))
if self.fopt_eps:
plt.savefig(self.fig_path/'xtrans_{0}-{1}.eps'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to']),dpi=self.fopt_dpi_qual)
for w in self.fopt_plot_weeks:
t1,t2 = week_to_range(w,int(self.starttime[:4]))
plt.xlim([t1,t2])
plt.savefig(self.fig_path/'xtrans_{0}-{1}_w{2}.png'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],w))
if self.fopt_eps:
plt.savefig(self.fig_path/'xtrans_{0}-{1}_w{2}.eps'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],w),dpi=self.fopt_dpi_qual)
# external fixed connections
for conn in self.fixed_transfer_connections:
if self.fopt_plots['transfer_external'] and not self.fopt_no_plots:
f = self.get_fig()
ax = self.res_xext[conn].plot()
pos = ax.get_position()
ax.set_position([pos.x0+self.plot_vals.xpad,pos.y0,pos.width-self.plot_vals.xpad,pos.height])
if self.fopt_use_titles:
plt.title('{2}: {0} -> {1} (fixed)'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],self.name))
plt.grid()
plt.ylabel('GWh')
compact_xaxis_ticks(plt.gcf(),ax)
plt.savefig(self.fig_path/'xtrans_{0}_{1}_fixed.png'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to']))
if self.fopt_eps:
plt.savefig(self.fig_path/'xtrans_{0}_{1}_fixed.eps'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to']),dpi=self.fopt_dpi_qual)
for w in self.fopt_plot_weeks:
t1,t2 = week_to_range(w,int(self.starttime[:4]))
plt.xlim([t1,t2])
plt.savefig(self.fig_path/'xtrans_{0}_{1}_fixed_w{2}.png'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],w))
if self.fopt_eps:
plt.savefig(self.fig_path/'xtrans_{0}_{1}_fixed_w{2}.eps'.format(self.xtrans_ext.at[conn,'from'],self.xtrans_ext.at[conn,'to'],w),dpi=self.fopt_dpi_qual)
# plt.clf()
def plot_reservoir(self):
prt = self.opt_print['postprocess']
from help_functions import interp_time
if self.fopt_plots['reservoir'] and not self.fopt_no_plots:
if prt:
print('Plot reservoirs')
if self.opt_reservoir_data_normalized:
tmp = interp_time(dates=self.timerange_p1,df=self.reservoir)
self.reservoir_hourly = | pd.DataFrame(dtype=float,index=self.timerange_p1,columns=self.reservoir.columns) | pandas.DataFrame |
import pandas as pd
import toffee
class SpectralLibrary():
"""
SpectralLibrary data type.
This is essentially just a wrapper around a pandas dataframes, `data`. It provides convinient inits from one
file type, common operations and a standard format with which to pass to procantoolbox figure factories.
"""
# This is the mass difference between the two isotopes of Carbon (C12 and C13)
C13C12_MASS_DIFF = 1.0033548
MINIMUM_HEADERS = [
'PrecursorMz',
'ProductMz',
'PrecursorCharge',
'ProductCharge',
'LibraryIntensity',
'NormalizedRetentionTime',
'ProteinId',
'PeptideSequence',
'ModifiedPeptideSequence',
'TransitionGroupId',
'TransitionId',
]
def __init__(self, df):
"""
If headers need to be renamed, they will be. Furthermore, a ProductCharge will also be added with
a default of 1 if it doesn't exist. This matches assumpitions made by OpenSwath.
"""
consistent_headers = {
'transition_group_id': 'TransitionGroupId',
'transition_name': 'TransitionId',
'FullUniModPeptideName': 'ModifiedPeptideSequence',
'ProteinName': 'ProteinId',
'Tr_recalibrated': 'NormalizedRetentionTime',
}
df = df.rename(columns=consistent_headers)
if 'ProductCharge' not in df.columns:
df['ProductCharge'] = 1
missing_headers = set(self.MINIMUM_HEADERS).difference(set(df.columns))
if len(missing_headers) > 0:
raise RuntimeError('Missing important headers: {}'.format(missing_headers))
self.data = df
@classmethod
def init_from_df(cls, df):
return cls(df)
@classmethod
def init_from_file(cls, srl_fname):
df = | pd.read_table(srl_fname) | pandas.read_table |
import argparse
import math
import sys
import pandas as pd
from scipy import stats
def calc_interval(df: pd.DataFrame) -> pd.DataFrame:
means = []
deltas = []
for _, items in df.items():
n = len(items)
mean = items.mean()
var = items.var()
if var == 0:
means.append(items.mean())
deltas.append(0)
continue
"""
t分布で信頼区間を計算
alpha: 何パーセント信頼区間か
df: t分布の自由度
loc: 平均 X bar
scale: 標準偏差 s
"""
lower, upper = stats.t.interval(alpha=0.95,
df=n - 1,
loc=mean,
scale=math.sqrt(var / n))
means.append(mean)
deltas.append(upper - mean)
return pd.DataFrame.from_dict({'mean': means,
'delta': deltas},
orient='index',
columns=df.columns)
def calc_pval(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
t_vals = []
p_vals = []
cols = []
for col in [col for col in df1.columns if col in df2.columns]:
"""
Welchのt検定 (サンプル間に対応なし & 等分散性なし)
"""
t_val, p_val = stats.ttest_ind(df2[col], df1[col], equal_var=False)
t_vals.append(t_val)
p_vals.append(p_val)
cols.append(col)
return pd.DataFrame.from_dict({'t-value': t_vals, 'p-value': p_vals},
orient='index',
columns=cols)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data', type=str, nargs='+',
help='1 or 2 csv files to test')
parser.add_argument('--precision', '--prec', default=4, type=int,
help='Number of digits after the decimal point')
parser.add_argument('--percent', action='store_true', default=False,
help='display values as percent (%)')
args = parser.parse_args()
pd.options.display.precision = args.precision
if len(args.data) == 1:
df_interval = calc_interval( | pd.read_csv(args.data[0]) | pandas.read_csv |
# This program loads the HILT data and parses it into a nice format
import argparse
import pathlib
import zipfile
import re
from datetime import datetime, date
import pandas as pd
import numpy as np
from sampex_microburst_widths import config
class Load_SAMPEX_HILT:
def __init__(self, load_date, extract=False,
time_index=True, verbose=False):
"""
Load the HILT data given a date. If this class will look for
a file with the "hhrrYYYYDOY*" filename pattern and open the
found csv file. If the file is zipped, it will first be unzipped.
If you want to extract the file as well, set extract=True.
time_index=True sets the time index of self.hilt to datetime objects
otherwise the index is just an enumerated list.
"""
self.load_date = load_date
self.verbose = verbose
# If date is in string format, convert to a pd.Timestamp object
if isinstance(self.load_date, str):
self.load_date = pd.to_datetime(self.load_date)
# Figure out how to calculate the day of year (DOY)
if isinstance(self.load_date, pd.Timestamp):
doy = str(self.load_date.dayofyear).zfill(3)
elif isinstance(self.load_date, (datetime, date) ):
doy = str(self.load_date.timetuple().tm_yday).zfill(3)
# Get the filename and search for it. If multiple or no
# unique files are found this will raise an assertion error.
file_name_glob = f'hhrr{self.load_date.year}{doy}*'
matched_files = list(
pathlib.Path(config.SAMPEX_DIR, 'hilt').rglob(file_name_glob)
)
# 1 if there is just one file, and 2 if there is a file.txt and
# file.txt.zip files.
assert len(matched_files) in [1, 2], (f'0 or >2 matched HILT files found.'
f'\n{file_name_glob}'
f'\nmatched_files={matched_files}')
self.file_path = matched_files[0]
# Load the zipped data and extract if a zip file was found.
if self.file_path.suffix == 'zip':
self.read_zip(self.file_path, extract=extract)
else:
self.read_csv(self.file_path)
# Parse the seconds of day time column to datetime objects
self.parse_time(time_index=time_index)
return
def read_zip(self, zip_path, extract=False):
"""
Open the zip file and load in the csv file. If extract=False than the file
will only be opened and not extracted to a text file in the
sampex/data/hilt directory.
"""
txt_name = zip_path.stem # Remove the .zip from the zip_path
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
if extract:
zip_ref.extractall(zip_path.parent)
#self.hilt = pd.read_csv(zip_path.parent / txt_name)
self.read_csv(zip_path.parent / txt_name)
else:
with zip_ref.open(txt_name) as f:
# self.hilt = pd.read_csv(f, sep=' ')
self.read_csv(f)
return
def read_csv(self, path):
"""
Reads in the CSV file given either the filename or the
zip file reference
"""
if self.verbose:
print(f'Loading SAMPEX HILT data from {self.load_date.date()} from {path.name}')
self.hilt = pd.read_csv(path, sep=' ')
return
def parse_time(self, time_index=True):
"""
Parse the seconds of day column to a datetime column.
If time_index=True, the time column will become the index.
"""
# Check if the seconds are monitonically increasing.
np_time = self.hilt['Time'].to_numpy()
if np.any(np_time[1:] < np_time[:-1]):
raise RuntimeError('The SAMPEX HITL data is not in order.')
# Convert seconds of day to a datetime object.
day_seconds_obj = pd.to_timedelta(self.hilt['Time'], unit='s')
self.hilt['Time'] = pd.Timestamp(self.load_date.date()) + day_seconds_obj
if time_index:
self.hilt.index = self.hilt['Time']
del(self.hilt['Time'])
return
def resolve_counts_state4(self):
"""
This function resolves the HILT counts to 20 ms resolution assuming
the data is in state4. The counts represent the sum from the 4 SSDs.
Data saved in self.hilt_resolved
"""
resolution_ms = 20E-3
# Resolve the counts using numpy (most efficient way with
# static memory allocation)
self.counts = np.nan*np.zeros(5*self.hilt.shape[0], dtype=int)
for i in [0, 1, 2, 3]:
self.counts[i::5] = self.hilt[f'Rate{i+1}']
# This line is different because rate5 is 100 ms SSD4 data.
self.counts[4::5] = self.hilt['Rate6']
# Resolve the time array.
self.times = np.nan*np.zeros(5*self.hilt.shape[0], dtype=object)
for i in [0, 1, 2, 3, 4]:
self.times[i::5] = self.hilt.index + pd.to_timedelta(resolution_ms*i, unit='s')
self.hilt_resolved = | pd.DataFrame(data={'counts':self.counts}, index=self.times) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not | is_platform_little_endian() | pandas.compat.is_platform_little_endian |
import unittest
from .. import simulate_endToEnd
from Bio.Seq import MutableSeq
from Bio import SeqIO
from Bio.Alphabet import generic_dna
import pandas as pd
import numpy as np
import mock
import os
class TestSimulateNormal(unittest.TestCase):
def setUp(self):
self.genome = {"chr1": MutableSeq("NNNNAGAGCTACGATGCTACGATGNNNNN", generic_dna),
"chr2": MutableSeq("NNNNNNAGAGCTACNNNGATGCGATGNN", generic_dna)}
def test_remove_Ns(self):
genome_out = {}
(genome_out['chr1'], offset) = simulate_endToEnd.remove_trailing_N_characters(self.genome['chr1'])
(genome_out['chr2'], offset) = simulate_endToEnd.remove_trailing_N_characters(self.genome['chr2'])
self.assertEqual(genome_out, {"chr1": MutableSeq("AGAGCTACGATGCTACGATG", generic_dna),
"chr2": MutableSeq("AGAGCTACNNNGATGCGATG", generic_dna)})
def test_subtract_beds(self):
lists = [['chr2', 6, 7, 'insertion', 'AAA', 2],['chr1', 6, 15, 'inversion', '-', 0]]
first_bed = | pd.DataFrame(lists) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.backends.backend_pdf import PdfPages
import math
from config import site, dates, folders
from os import listdir
from os.path import isfile, join
dir = "/home/surya/Programs/PycharmProjects/air_model/data/raw/guttannen/"
def lum2temp(y0):
df_in = pd.read_csv(dir + "lum_values.csv", sep=",")
# Correct values
mask = (df_in["X"]<2000)
df_in= df_in[mask]
k = df_in.loc[df_in["Y"] == df_in["Y"].max(), "X"].values
# Correct values
mask = (df_in["X"]<k[0])
df_in= df_in[mask]
x = df_in.X
y = df_in.Y
h = df_in.loc[df_in["Y"] == 200, "X"].values
x1 = x[:h[0]]
y1 = y[:h[0]]
A1 = np.vstack([x1, np.ones(len(x1))]).T
m1, c1 = np.linalg.lstsq(A1, y1, rcond=None)[0]
x2 = x[h[0]:]
y2 = y[h[0]:]
A2 = np.vstack([x2, np.ones(len(x2))]).T
m2, c2 = np.linalg.lstsq(A2, y2, rcond=None)[0]
if y0 >= 200:
x0 = (y0-c2)/m2
else:
x0 = (y0-c1)/m1
return x0
df_in_section_1 = pd.read_csv(dir + "Results_1.csv", sep=",")
df_in_section_2 = pd.read_csv(dir + "Results_2.csv", sep=",")
df_rad0 = pd.read_csv(dir + "Results_radiuslines0.csv", sep=",")
df_rad1 = pd.read_csv(dir + "Results_radiuslines1.csv", sep=",")
df_rad2 = pd.read_csv(dir + "Results_radiuslines2.csv", sep=",")
df_in5 = pd.read_csv(dir + "Results_dots.csv", sep=",")
df_in6 = pd.read_csv(dir + "Results_dots_2.csv", sep=",")
df_in7 = pd.read_csv(dir + "Results_rest.csv", sep=",")
# Thermal
df_th = pd.read_csv(dir + "Results_full_thermal.csv", sep=",")
df_lum = pd.read_csv(dir + "Results_lum_full.csv", sep=",")
df_err = pd.read_csv(dir + "Results_errors.csv", sep=",")
mypath = "/home/surya/Pictures/Guttannen_Jan"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
df_names = pd.DataFrame({"col": onlyfiles})
mypath2 = "/home/surya/Pictures/Guttannen_Feb"
onlyfiles2 = [f for f in listdir(mypath2) if isfile(join(mypath2, f))]
df_names2 = pd.DataFrame({"col": onlyfiles2})
mypath3 = "/home/surya/Pictures/Rest"
onlyfiles3 = [f for f in listdir(mypath3) if isfile(join(mypath3, f))]
df_names3 = pd.DataFrame({"col": onlyfiles3})
df_in_section_1["Label"] = df_in_section_1["Label"].str.split("m").str[-1]
df_in_section_2["Label"] = df_in_section_2["Label"].str.split("m").str[-1]
df_rad0["Label"] = df_rad0["Label"].str.split("m").str[-1]
df_rad1["Label"] = df_rad1["Label"].str.split("m").str[-1]
df_rad2["Label"] = df_rad2["Label"].str.split("m").str[-1]
df_th["Label"] = df_th["Label"].str.split("m").str[-1]
df_in5["Label"] = df_in5["Label"].str.split("m").str[-1]
df_in6["Label"] = df_in6["Label"].str.split("m").str[-1]
df_in7["Label"] = df_in7["Label"].str.split("m").str[-1]
df_names["Label"] = df_names["col"].str.split("m").str[-1]
df_names2["Label"] = df_names2["col"].str.split("m").str[-1]
df_names3["Label"] = df_names3["col"].str.split("m").str[-1]
df_in_section_1["Label"] = (
"2020-"
+ df_in_section_1["Label"].str[2:4]
+ "-"
+ df_in_section_1["Label"].str[4:6]
+ " "
+ df_in_section_1["Label"].str[6:8]
)
df_in_section_2["Label"] = (
"2020-"
+ df_in_section_2["Label"].str[2:4]
+ "-"
+ df_in_section_2["Label"].str[4:6]
+ " "
+ df_in_section_2["Label"].str[6:8]
)
df_rad0["Label"] = (
"2020-"
+ df_rad0["Label"].str[2:4]
+ "-"
+ df_rad0["Label"].str[4:6]
+ " "
+ df_rad0["Label"].str[6:8]
)
df_rad1["Label"] = (
"2020-"
+ df_rad1["Label"].str[2:4]
+ "-"
+ df_rad1["Label"].str[4:6]
+ " "
+ df_rad1["Label"].str[6:8]
)
df_rad2["Label"] = (
"2020-"
+ df_rad2["Label"].str[2:4]
+ "-"
+ df_rad2["Label"].str[4:6]
+ " "
+ df_rad2["Label"].str[6:8]
)
df_in5["Label"] = (
"2020-"
+ df_in5["Label"].str[2:4]
+ "-"
+ df_in5["Label"].str[4:6]
+ " "
+ df_in5["Label"].str[6:8]
)
df_in6["Label"] = (
"2020-"
+ df_in6["Label"].str[2:4]
+ "-"
+ df_in6["Label"].str[4:6]
+ " "
+ df_in6["Label"].str[6:8]
)
df_in7["Label"] = (
"2020-"
+ df_in7["Label"].str[2:4]
+ "-"
+ df_in7["Label"].str[4:6]
+ " "
+ df_in7["Label"].str[6:8]
)
df_names["Label"] = (
"2020-"
+ df_names["Label"].str[2:4]
+ "-"
+ df_names["Label"].str[4:6]
+ " "
+ df_names["Label"].str[6:8]
)
df_names2["Label"] = (
"2020-"
+ df_names2["Label"].str[2:4]
+ "-"
+ df_names2["Label"].str[4:6]
+ " "
+ df_names2["Label"].str[6:8]
)
df_names3["Label"] = (
"2020-"
+ df_names3["Label"].str[2:4]
+ "-"
+ df_names3["Label"].str[4:6]
+ " "
+ df_names3["Label"].str[6:8]
)
df_th["Label"] = (
"2020-"
+ df_th["Label"].str[2:4]
+ "-"
+ df_th["Label"].str[4:6]
+ " "
+ df_th["Label"].str[6:8]
)
df_in_section_1["When"] = pd.to_datetime(df_in_section_1["Label"], format="%Y-%m-%d %H")
df_in_section_2["When"] = pd.to_datetime(df_in_section_2["Label"], format="%Y-%m-%d %H")
df_rad0["When"] = pd.to_datetime(df_rad0["Label"], format="%Y-%m-%d %H")
df_rad1["When"] = pd.to_datetime(df_rad1["Label"], format="%Y-%m-%d %H")
df_rad2["When"] = pd.to_datetime(df_rad2["Label"], format="%Y-%m-%d %H")
df_th["When"] = | pd.to_datetime(df_th["Label"], format="%Y-%m-%d %H") | pandas.to_datetime |
import re
from decimal import Decimal
import math
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
from scipy.special import lambertw
from lmfit import Model, Parameters
from uncertainties import ufloat
def subsetDf(df, start, end):
result = df[(df[df.columns[0]] >= float(start)) & (df[df.columns[0]] <= float(end))].dropna(axis=0)
return result
def logWithZeros(x):
'''
return log10 of array that may contain zeros
'''
out = []
if len(x) > 0:
for xi in x:
if xi == 0.:
out.append(0.)
else:
out.append(np.log10(xi))
return np.array(out)
def johnson(x, ksp, kcat):
'''
implementation of the modified form of the Michaelis-Menten equation presented in <NAME>, <NAME> Org Chem 2019.
'''
return (ksp*x) / (1 + (ksp*x)/kcat)
def SM(x, km, vmax):
'''
implementation of the Schnell-Mendoza equation using the scipy lambertw function
'''
t = x[0]
so = x[1]
z = so / km * np.exp(so / km - vmax / km * t)
return km * lambertw(z)
def linear(x, m, b):
'''
straight line
'''
return m*x + b
def logarithmic(x, yo, b, to):
'''
logarithmic equation from Lu & Fei et. al, 2003
'''
return yo + b*np.log(1 + x*to)
def mmfit(x, km, vmax):
'''
Michaelis Menten equation
'''
return vmax * x / (km + x)
def icfit(x, bottom, top, slope, p50):
'''
IC50 equation
'''
return bottom + (top-bottom)/(1+10**((-p50-x)*slope))
def spline_fit(x, y):
x, y = x.values, y.values
spline = UnivariateSpline(x, y)(x)
derivative = np.abs(np.diff(spline)/np.diff(x))
threshold = 0.7*(np.max(derivative) - np.min(derivative)) + np.min(derivative)
try:
indices = np.where(derivative > threshold)[0]
except:
indices = []
while len(indices) < 4:
threshold = threshold*0.9
try:
indices = np.where(derivative > threshold)[0]
except:
indices = []
xi, yi = x[indices], y[indices]
df = pd.DataFrame(data={'x' : xi, 'y' : yi}).sort_values('x').dropna()
xi, yi = df.x, df.y
popt, pcov = curve_fit(linear, xi, yi)
perr = np.sqrt(np.diag(pcov))
xfit = np.linspace(np.min(x), np.max(x), len(spline))
yfit = linear(xfit, *popt)
fit_dict = { 'x' : x, 'y' : y,
'rate' : np.abs(popt[0]), 'error' : perr[0],
'xfit' : xfit,
'yfit' : yfit, 'resi' : np.array(yfit) - y }
return fit_dict
def linear_fit(x, y):
popt, pcov = curve_fit(linear, x, y)
perr = np.sqrt(np.diag(pcov))
xfit = np.linspace(np.min(x), np.max(x), len(x))
yfit = linear(xfit, *popt)
fit_dict = { 'x' : x, 'y' : y,
'rate' : np.abs(popt[0]), 'error' : perr[0],
'xfit' : xfit,
'yfit' : yfit, 'resi' : np.array(yfit) - y }
return fit_dict
def logarithmic_fit(x, y):
popt, pcov = curve_fit(logarithmic, x, y, maxfev=10000)
perr = np.sqrt(np.diag(pcov))
xfit = np.linspace(np.min(x), np.max(x), len(x))
yfit = logarithmic(xfit, *popt)
yerr = logarithmic(xfit, *perr)
fit_dict = { 'x' : x, 'y' : y,
'rate' : np.abs(np.array(np.diff(yfit)/np.diff(xfit))[0]),
'error' : np.abs(np.array(np.diff(yerr)/np.diff(xfit))[0]),
'xfit' : xfit,
'yfit' : yfit, 'resi' : np.array(yfit) - y }
return fit_dict
class sm_fit(object):
def __init__(self, database):
self.data = database
def fit(self, sample, transform, subtract):
ytmp = []
so, s, t = [], [], []
rix = (0, 0)
if subtract in list(self.data):
concentration = float(re.findall(r"[-+]?\d*\.\d+|\d+", subtract)[0])
df = self.data[subtract].data
ts = df[df.columns[0]]
xs = df[df.columns[1]]
x = np.array([(concentration - (max(xs) - xi)) for xi in xs])
if 'x' in transform:
xs = eval(transform)
lmodel = Model(linear)
params = Parameters()
params.add(name="m", value=1)
params.add(name="b", value=0)
sub_line = lmodel.fit(xs, params, x=ts)
m = sub_line.params['m'].value
b = sub_line.params['b'].value
for e in self.data:
if type(self.data[e]) == progress_curve:
concentration = float(re.findall(r"[-+]?\d*\.\d+|\d+", e)[0])
df = self.data[e].data
if e == sample:
rix = (len(t) + 1, len(t) + len(df))
x = df[df.columns[1]]
if 'x' in transform:
x = eval(transform)
ytmpi = [0]*len(x)
if subtract in list(self.data):
ytmpi = [linear(xi, m, b) for xi in df[df.columns[0]]]
x = [xi - linear(xi, m, b) for xi in x]
sx = np.array([(concentration - (max(x) - xi) - yti) for xi, yti in zip(x, ytmpi)])
tx = np.array(df[df.columns[0]])
sox = np.array([concentration]*len(df))
for si, ti, soi, yti in zip(sx, tx, sox, ytmpi):
so.append(soi)
t.append(ti)
s.append(si)
ytmp.append(yti)
x = np.array([t, so])
params = Parameters()
params.add(name="km", value=1)
params.add(name="vmax", value=1)
smodel = Model(SM)
result = smodel.fit(s, params, x=x,
nan_policy='propagate', method='least_squares')
km_val = result.params['km'].value
vmax_val = result.params['vmax'].value
km_err = result.params['km'].stderr
vmax_err = result.params['vmax'].stderr
fit_data = pd.DataFrame(data={
'Km' : ['%.2E' % Decimal(str(km_val)), '%.2E' % Decimal(str(km_err))],
'Vmax' : ['%.2E' % Decimal(str(vmax_val)), '%.2E' % Decimal(str(vmax_err))]
}, index=['value', 'error'])
raw_data = pd.DataFrame(data={
'x' : t[rix[0]:rix[1]],
'y' : s[rix[0]:rix[1]],
'yfit' : result.best_fit[rix[0]:rix[1]].real,
'resi' : s[rix[0]:rix[1]] - result.best_fit[rix[0]:rix[1]].real,
})
xfit = np.linspace(min(so), max(so), 1000)
model_result = pd.DataFrame(data={
'xfit' : xfit,
'yfit' : [mmfit(xf, km_val, vmax_val) for xf in xfit]
})
varea_data = pd.DataFrame(data={
'x' : xfit,
'r1' : [mmfit(xf, km_val + km_err, vmax_val - vmax_err) for xf in xfit],
'r2' : [mmfit(xf, km_val - km_err, vmax_val + vmax_err) for xf in xfit]})
return raw_data, model_result, fit_data, varea_data
class progress_curve(object):
def __init__(self, dataframe, start, end):
df = subsetDf(dataframe, start, end)
self.data = df
def spline(self):
df = self.data
x, y = df[df.columns[0]], df[df.columns[1]]
spline = spline_fit(x, y)
self.spline = spline
return self.spline
def linear(self):
df = self.data
x, y = df[df.columns[0]], df[df.columns[1]]
linear = linear_fit(x, y)
self.linear = linear
return self.linear
def logarithmic(self, offset):
df = self.data
x, y = df[df.columns[0]], df[df.columns[1]]
try:
x = x + offset
except:
pass
logarithmic = logarithmic_fit(x, y)
self.logarithmic = logarithmic
return self.logarithmic
class kinetic_model(object):
def __init__(self, dictionary):
self.dict = dictionary
def model(self, subtract, transform, threshold, bottom, top, slope, scalex, offset):
result = {}
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
from pandas import Series, _testing as tm
def test_title():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.title()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_lower_upper():
values = Series(["om", np.nan, "nom", "nom"])
result = values.str.upper()
exp = Series(["OM", np.nan, "NOM", "NOM"])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
def test_capitalize():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.capitalize()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_swapcase():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", np.nan, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0])
mixed = mixed.str.swapcase()
exp = Series(["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_casemethods():
values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"]
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_pad():
values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"])
result = values.str.pad(5, side="left")
exp = Series([" a", " b", np.nan, " c", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="right")
exp = Series(["a ", "b ", np.nan, "c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="both")
exp = Series([" a ", " b ", np.nan, " c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])
rs = | Series(mixed) | pandas.Series |
## usage
# at a level above emmer/
# python3 -m emmer.test.test_bifurication
from ..bake import BakeCommonArgs
from ..posthoc.stats.bifurication import BifuricationArgs, linearRegressionPVal, DifferentiatingFeatures
from ..posthoc.visual.viewer import Projection
from ..troubleshoot.err.error import ErrorCode12, ErrorCode21
import numpy.testing
import unittest
import pandas
import numpy
import sys
import os
class TestBifuricationArgs(unittest.TestCase):
def test_BifuricationArgs(self):
print('\ntest_BifuricationArgs.:')
print(' case 1: error handling')
print(' 1.1: missing both args.p and args.i setting. raise error for missing args.p')
sys.argv[1:] = ['-m', 'Bifurication']
current_wd = os.getcwd()
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode12):
bifurication_args = BifuricationArgs(args = processed_args.args, current_wd = current_wd,
suppress = True)
print(' ---------------------------------------------------')
print(' 1.2: missing args.i setting')
#sys.argv[1:] = ['-m', 'Bifurication', '-p', 'emmer/data/bake_data_dir_4/information_rich_features_summary.csv']
sys.argv[1:] = ['-m', 'Bifurication', '-p', 'piemmer/data/bake_data_dir_4/information_rich_features_summary.csv']
current_wd = os.getcwd()
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode21):
bifurication_args = BifuricationArgs(args = processed_args.args, current_wd = current_wd,
suppress = True)
print(' ---------------------------------------------------')
print(' 1.3: args.i only contains one csv file')
#sys.argv[1:] = ['-m', 'Bifurication', '-p', 'emmer/data/bake_data_dir_4/information_rich_features_summary.csv',
sys.argv[1:] = ['-m', 'Bifurication', '-p', 'piemmer/data/bake_data_dir_4/information_rich_features_summary.csv',
# '-i', 'emmer/data/data_dir_1']
'-i', 'piemmer/data/data_dir_1']
current_wd = os.getcwd()
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
with self.assertRaises(ErrorCode21):
bifurication_args = BifuricationArgs(args = processed_args.args, current_wd = current_wd,
suppress = True)
print(' ---------------------------------------------------')
print(' case 2: expect to work')
print(' 2.1: get correct list of information-rich features')
#sys.argv[1:] = ['-m', 'Bifurication', '-p', 'emmer/data/bake_data_dir_4/information_rich_features_summary.csv',
sys.argv[1:] = ['-m', 'Bifurication', '-p', 'piemmer/data/bake_data_dir_4/information_rich_features_summary.csv',
# '-i', 'emmer/data/bake_data_dir_4/filtered_data']
'-i', 'piemmer/data/bake_data_dir_4/filtered_data/']
current_wd = os.getcwd()
processed_args = BakeCommonArgs(suppress = True, test = False, neglect = True, silence = False)
processed_args.getHomeKeepingArgs()
bifurication_args = BifuricationArgs(args = processed_args.args, current_wd = current_wd,
suppress = True)
my_result = bifurication_args.list_of_info_rich
expected_result = ['ASV_1', 'ASV_15', 'ASV_2', 'ASV_3', 'ASV_32', 'ASV_4', 'ASV_7']
self.assertListEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' 2.2: get correct number of input files')
my_result = len(bifurication_args.input_files)
expected_result = 2
self.assertEqual(my_result, expected_result)
print('===========================================================')
class TestLinearRegressionPVal(unittest.TestCase):
def test_linearRegressionPVal(self):
print('\ntest_linearRegressionPVal:')
A = [[1,15,2], [1,11,1], [1,13,1], [0,2,1], [0,1,2], [0,3,1]]
A_df = pandas.DataFrame(A, columns=['y','x1','x2'], index=['A__s1','A__s2','A__s3','B__s4','B__s5','B__s6'])
target = A_df['y']
data = A_df[['x1','x2']]
my_result = numpy.array(linearRegressionPVal(target = target, data = data, silence_intersect = False))
expected_result = numpy.array([0.7805, 0.0046, 0.6640]).reshape(3, 1)
# result generated in R use lm() function
numpy.testing.assert_almost_equal(my_result, expected_result, decimal = 4)
print('===========================================================')
class TestDifferentiatingFeatures(unittest.TestCase):
def test_differentiatingFeatures(self):
print('\ntest_differentiatingFeatures:')
print(' Case 1: dataset has a differentiating feature')
A = [[15,2], [11,1], [13,1], [2,1], [1,2], [3,1]]
A_df = pandas.DataFrame(A, columns=['feature_1','feature_2'], index=['A__s1','A__s2','A__s3','B__s4','B__s5','B__s6'])
Projection_class_object = Projection(merged_dataframe = A_df, normalize = False)
Projection_class_object.cleanSpec()
DifferentiatingFeatures_class_object = DifferentiatingFeatures(Projection_class_object)
DifferentiatingFeatures_class_object.atGroup()
my_result = DifferentiatingFeatures_class_object.differentiating_feature
expected_result = ['feature_1']
self.assertListEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' Case 2: dataset do not have differentiating feature')
C = [[1,2], [3,1], [2,1], [2,1], [1,2], [3,1]]
C_df = | pandas.DataFrame(C, columns=['x1','x2'], index=['A__s1','A__s2','A__s3','B__s4','B__s5','B__s6']) | pandas.DataFrame |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = | pd.DataFrame(topGenes2, columns=col) | pandas.DataFrame |
import utils as dutil
import numpy as np
import pandas as pd
import astropy.units as u
from astropy.time import Time
import astropy.constants as const
import astropy.coordinates as coords
from astropy.coordinates import SkyCoord
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
import tqdm
from schwimmbad import MultiPool
from legwork import psd, strain, utils
import legwork.source as source
import paths
pd.options.mode.chained_assignment = None
# Specific to Thiele et al. (2021), here are the used metallicity
# array, the associated binary fractions for each Z value, and the ratios
# of mass in singles to mass in binaries of the Lband with each specific
# binary fraction as found using COSMIC's independent samplers
# (See Binary_Fraction_Modeling.ipynb for Tutorials). All values were
# rounded to 4 significant digits except metallicity which used 8:
met_arr = np.logspace(np.log10(1e-4), np.log10(0.03), 15)
met_arr = np.round(met_arr, 8)
met_arr = np.append(0.0, met_arr)
binfracs = np.array(
[
0.4847,
0.4732,
0.4618,
0.4503,
0.4388,
0.4274,
0.4159,
0.4044,
0.3776,
0.3426,
0.3076,
0.2726,
0.2376,
0.2027,
0.1677,
]
)
ratios = np.array(
[
0.68,
0.71,
0.74,
0.78,
0.82,
0.86,
0.9,
0.94,
1.05,
1.22,
1.44,
1.7,
2.05,
2.51,
3.17,
]
)
ratio_05 = 0.64
# LEGWORK uses astropy units so we do also for consistency
G = const.G.value # gravitational constant
c = const.c.value # speed of light in m s^-1
M_sol = const.M_sun.value # sun's mass in kg
R_sol = const.R_sun.value # sun's radius in metres
sec_Myr = u.Myr.to("s") # seconds in a million years
m_kpc = u.kpc.to("m") # metres in a kiloparsec
L_sol = const.L_sun.value # solar luminosity in Watts
Z_sun = 0.02 # solar metallicity
sun = coords.get_sun(Time("2021-04-23T00:00:00", scale="utc")) # sun coordinates
sun_g = sun.transform_to(coords.Galactocentric)
sun_yGx = sun_g.galcen_distance.to("kpc").value
sun_zGx = sun_g.z.to("kpc").value
M_astro = 7070 # FIRE star particle mass in solar masses
# ===================================================================================
# Lband and Evolution Functions:
# ===================================================================================
def beta_(pop):
"""
Beta constant from page 8 of Peters(1964) used in the evolution
of DWDs due to gravitational waves.
INPUTS
----------------------
pop [pandas dataframe]: DF of population which includes component
masses in solar masses
RETURNS
----------------------
beta [array]: array of beta values
"""
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
beta = 64 / 5 * G ** 3 * m1 * m2 * (m1 + m2) / c ** 5
return beta
def a_of_t(pop, t):
"""
Uses Peters(1964) equation (5.9) for circular binaries to find separation.
as a function of time.
INPUTS
----------------------
pop [pandas dataframe]: population subset from COSMIC.
t [array]: time at which to find separation. Must be in Myr.
RETURNS
----------------------
array of separation at time t in solar radii.
"""
t = t * sec_Myr
beta = beta_(pop)
a_i = pop.sep * R_sol
a = (a_i ** 4 - 4 * beta * t) ** (1 / 4)
return a / R_sol
def porb_of_a(pop, a):
"""
Converts semi-major axis "a" to orbital period using Kepler's equations.
INPUTS
----------------------
pop [pandas dataframe]: population from COSMIC.
a [array]: semi-major axis of systems. Must be in solar radii and an array of
the same length as the dateframe pop.
RETURNS
t [array]: orbital period in days.
"""
a = a * R_sol
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
P_sqrd = 4 * np.pi ** 2 * a ** 3 / G / (m1 + m2)
P = np.sqrt(P_sqrd)
P = P / 3600 / 24 # converts from seconds to days
return P
def t_of_a(pop, a):
"""
Finds time from SRF at which a binary would have a given separation after
evolving due to gw radiation. (Re-arrangement of a_of_t(pop, t)).
INPUTS
----------------------
pop [pandas dataframe]: population subset from COSMIC.
a [array]: separation to find time for. Must be in solar radii.
RETURNS
----------------------
t [array]: time in Myr where DWD reaches separation "a"
"""
beta = beta_(pop)
a_i = pop.sep * R_sol
a = a * R_sol
t = (a_i ** 4 - a ** 4) / 4 / beta
t = t / sec_Myr
return t
def t_merge(pop):
"""
Uses Peters(1964) equation (5.10) to determine the merger time of a circular
DWD binary from time of SRF.
INPUTS
----------------------
pop [pandas dataframe]: population subset from COSMIC
RETURNS
----------------------
t [array]: time in Myr.
"""
a_0 = pop.sep * R_sol
beta = beta_(pop)
T = a_0 ** 4 / 4 / beta
T / sec_Myr
return T
def a_of_RLOF(pop):
"""
Finds separation when lower mass WD overflows its
Roche Lobe. Taken from Eq. 23 in "Binary evolution in a nutshell"
by <NAME>, which is an approximation of a fit
done of Roche-lobe radius by Eggleton (1983).
INPUTS
----------------------
pop [pandas dataframe]: population subset from COSMIC
RETURNS
----------------------
a [array]: RLO separations of pop
"""
m1 = pop.mass_1
m2 = pop.mass_2
primary_mass = np.where(m1 > m2, m1, m2)
secondary_mass = np.where(m1 > m2, m2, m1)
secondary_radius = np.where(m1 > m2, pop.rad_2, pop.rad_1)
R2 = secondary_radius
q = secondary_mass / primary_mass
num = 0.49 * q ** (2 / 3)
denom = 0.6 * q ** (2 / 3) + np.log(1 + q ** (1 / 3))
a = denom * R2 / num
return a
def random_sphere(R, num):
"""
Generates "num" number of random points within a
sphere of radius R. It picks random x, y, z values
within a cube and discards it if it's outside the
sphere.
INPUTS
----------------------
R [array]: Radius in kpc
num [int]: number of points to generate
RETURNS
----------------------
X, Y, Z arrays of length num
"""
X = []
Y = []
Z = []
while len(X) < num:
x = np.random.uniform(-R, R)
y = np.random.uniform(-R, R)
z = np.random.uniform(-R, R)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > R:
continue
if r <= R:
X.append(x)
Y.append(y)
Z.append(z)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
return X, Y, Z
def rad_WD(M):
"""
Calculates the radius of a WD as a function of mass M in solar masses.
Taken from Eq. 91 in Hurley et al. (2000), from Eq. 17 in Tout et al. (1997)
INPUTS
----------------------
M [array]: masses of the WDs in solar masses
RETURNS
----------------------
rad[array]: radii of the WDs in solar radii
"""
M_ch = 1.44
R_NS = 1.4e-5 * np.ones(len(M))
A = 0.0115 * np.sqrt((M_ch / M) ** (2 / 3) - (M / M_ch) ** (2 / 3))
rad = np.max(np.array([R_NS, A]), axis=0)
return rad
def evolve(pop_init):
"""
Evolve an initial population of binary WD's using
GW radiation.
INPUTS
----------------------
pop_init [pandas dataframe]: initial population from COSMIC.
Must include assigned FIRE star
particle age columns.
RETURNS
----------------------
pop_init [pandas dataframe]: input pop with present-day parameter
columns added with evolution time and
present day separation, orbital period
and GW frequency.
"""
t_evol = pop_init.age * 1000 - pop_init.tphys
sep_f = a_of_t(pop_init, t_evol)
porb_f = porb_of_a(pop_init, sep_f)
f_gw = 2 / (porb_f * 24 * 3600)
pop_init["t_evol"] = t_evol
pop_init["sep_f"] = sep_f
pop_init["porb_f"] = porb_f
pop_init["f_gw"] = f_gw
return pop_init
def position(pop_init):
"""
Assigning random microchanges to positions to
give each system a unique position for identical
FIRE star particles
INPUTS
----------------------
pop_init [pandas dataframe]: initial population from COSMIC.
Must include assigned FIRE star
particle columns.
RETURNS
----------------------
pop_init [pandas dataframe]: input pop with columns added for
galactocentric coordinates, and
Sun-to-DWD distance.
"""
R_list = pop_init.kern_len.values
xGx = pop_init.xGx.values.copy()
yGx = pop_init.yGx.values.copy()
zGx = pop_init.zGx.values.copy()
x, y, z = random_sphere(1.0, len(R_list))
X = xGx + (x * R_list)
Y = yGx + (y * R_list)
Z = zGx + (z * R_list)
pop_init["X"] = X
pop_init["Y"] = Y
pop_init["Z"] = Z
pop_init["dist_sun"] = (X ** 2 + (Y - sun_yGx) ** 2 + (Z - sun_zGx) ** 2) ** (1 / 2)
return pop_init
def merging_pop(pop_init):
"""
Identifies DWD systems which will merge before present day,
defined as those in which their delay time is less than their
assigned FIRE star particle age.
INPUTS
----------------------
pop_init [pandas dataframe]: initial population from COSMIC.
Must include assigned FIRE star
particle age columns.
RETURNS
----------------------
pop_init [pandas dataframe]: input pop with merged systems
discarded
pop_merge [pandas dataframe]: merged population which can be
saved separately
"""
t_m = t_merge(pop_init)
pop_init["t_delay"] = t_m + pop_init.tphys.values
pop_merge = pop_init.loc[pop_init.t_delay <= pop_init.age * 1000]
pop_init = pop_init.loc[pop_init.t_delay >= pop_init.age * 1000]
return pop_init, pop_merge
def RLOF_pop(pop_init):
"""
Identifies DWD systems in which the lower mass WD will overflow
its Roche Lobe before present day, i.e when the system's RLO time
is less than its assigned FIRE star particle age.
INPUTS
----------------------
pop_init [pandas dataframe]: initial population from COSMIC.
Must include assigned FIRE star
particle age columns.
RETURNS
----------------------
pop_init [pandas dataframe]: input pop with merged systems
discarded
pop_RLOF [pandas dataframe]: RLO population which can be
saved separately
"""
a_RLOF = a_of_RLOF(pop_init)
t_RLOF = t_of_a(pop_init, a_RLOF)
pop_init["t_RLOF"] = t_RLOF
pop_RLOF = pop_init.loc[t_RLOF + pop_init.tphys <= pop_init.age * 1000]
pop_init = pop_init.loc[t_RLOF + pop_init.tphys >= pop_init.age * 1000]
return pop_init, pop_RLOF
def filter_population(dat):
"""
discards systems which have any of [formation times, delay times, RLOF times]
less than their FIRE age. Evolves the remaining systems to present day. Selects
systems orbiting in the LISA band.
INPUTS
----------------------
dat [list] containing (in order)...
- pop_init [pandas dataframe]: initial population from COSMIC.
Must include assigned FIRE star
particle columns.
- i [int]: bin number for metallicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
metallicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- pathtosave [str]: path to folder for the created files
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
OUTPUTS:
----------------------
LISA_band [pandas dataframe]: evolved DWDs orbiting in the LISA freq. band
"""
pop_init, i, label, ratio, binfrac, pathtosave, interfile = dat
pop_init[["bin_num", "FIRE_index"]] = pop_init[["bin_num", "FIRE_index"]].astype(
"int64"
)
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_init",
format="t",
append=True,
)
# Now that we've obtained an initial population, we make data cuts
# of systems who wouldn't form in time for their FIRE age, or would
# merge or overflow their Roche Lobe before present day.
pop_init = pop_init.loc[pop_init.tphys <= pop_init.age * 1000]
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_age",
format="t",
append=True,
)
pop_init, pop_merge = merging_pop(pop_init)
if interfile == True:
pop_merge[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_merge",
format="t",
append=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_nm",
format="t",
append=True,
)
pop_merge = pd.DataFrame()
pop_init, pop_RLOF = RLOF_pop(pop_init)
if interfile == True:
pop_RLOF[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_RLOF",
format="t",
append=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_nRLOF",
format="t",
append=True,
)
pop_RLOF = pd.DataFrame()
# We now have a final population which we can evolve
# using GW radiation
pop_init = evolve(pop_init)
# Assigning random microchanges to positions to
# give each system a unique position for identical
# FIRE star particles
pop_init = position(pop_init)
if interfile == True:
pop_init[["bin_num", "FIRE_index", "X", "Y", "Z"]].to_hdf(
pathtosave
+ "Lband_{}_{}_{}_inter.hdf".format(label, met_arr[i + 1], binfrac),
key="pop_f",
format="t",
append=True,
)
if binfrac == 0.5:
binfrac_write = 0.5
else:
binfrac_write = "variable"
# Assigning weights to population to be used for histograms.
# This creates an extra columns which states how many times
# a given system was sampled from the cosmic-pop conv df.
pop_init = pop_init.join(
pop_init.groupby("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_pw"
)
# Systems detectable by LISA will be in the frequency band
# between f_gw's 0.01mHz and 1Hz.
LISA_band = pop_init.loc[(pop_init.f_gw >= 1e-4)]
if len(LISA_band) == 0:
print(
"No LISA sources for source {} and met {} and binfrac {}".format(
label, met_arr[i + 1], binfrac
)
)
return []
else:
pop_init = pd.DataFrame()
LISA_band = LISA_band.join(
LISA_band.groupby("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_Lw"
)
return LISA_band
def make_galaxy(dat, verbose=False):
"""
Creates populations of DWDs orbiting in the LISA band for a given
DWD type and metallicity.
INPUTS:
dat [list] containing (in order)...
- pathtodat [str]: path to COSMIC dat files with BPS DWD populations
- fire_path [str]: path to FIRE file with metallicity-dependent SFH data
- pathtosave [str]: path to folder for the created galaxy files
- filename [str]: name of dat file for given DWD type and metallicity bin
- i [int]: bin number for metallicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
metallicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
- nproc: number of processes to allow if using on compute cluster
OUTPUTS:
No direct function outputs, but saves the following:
- HDF file with LISA band systems
- If interfile is True, HDF file with intermediate populations
"""
(
pathtodat,
fire_path,
pathtosave,
filename,
i,
label,
ratio,
binfrac,
interfile,
model,
nproc,
) = dat
if binfrac < 0.5:
var_label = "FZ"
else:
var_label = "F50"
Lkey = "Lband_{}_{}".format(var_label, model)
Rkey = "rand_seed_{}_{}".format(var_label, model)
Lsavefile = "Lband_{}_{}_{}_{}.hdf".format(label, var_label, model, i)
try:
pd.read_hdf(pathtosave + Lsavefile, key=Lkey)
return [], [], []
except:
FIRE = pd.read_hdf(fire_path + "FIRE.h5").sort_values("met")
rand_seed = np.random.randint(0, 100, 1)
np.random.seed(rand_seed)
rand_seed = pd.DataFrame(rand_seed)
rand_seed.to_hdf(pathtosave + Lsavefile, key=Rkey)
# Choose metallicity bin
met_start = met_arr[i] / Z_sun
met_end = met_arr[i + 1] / Z_sun
# Load DWD data at formation of the second DWD component
conv = pd.read_hdf(pathtodat + filename, key="conv")
if "bin_num" not in conv.columns:
conv.index = conv.index.rename("index")
conv["bin_num"] = conv.index.values
# overwrite COSMIC radii
conv["rad_1"] = rad_WD(conv.mass_1.values)
conv["rad_2"] = rad_WD(conv.mass_2.values)
# Use ratio to scale to astrophysical pop w/ specific binary frac.
try:
mass_binaries = pd.read_hdf(pathtodat + filename, key="mass_stars").iloc[-1]
except:
print("m_binaries key")
mass_binaries = pd.read_hdf(pathtodat + filename, key="mass_binaries").iloc[
-1
]
mass_total = (1 + ratio) * mass_binaries # total ZAMS mass of galaxy
# Set up LISAband key to append to:
final_params = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"met",
"tphys",
"rad_1",
"rad_2",
"xGx",
"yGx",
"zGx",
"FIRE_index",
"f_gw",
"dist_sun",
]
d0 = pd.DataFrame(columns=final_params)
d0.to_hdf(pathtosave + Lsavefile, key=Lkey, format="t", append=True)
# Get DWD formatioon efficiency and number of binaries per star particle
DWD_per_mass = len(conv) / mass_total
N_astro = DWD_per_mass * M_astro
# Choose FIRE bin based on metallicity:
FIRE["FIRE_index"] = FIRE.index
if met_end * Z_sun == met_arr[-1]:
FIRE_bin = FIRE.loc[FIRE.met >= met_start]
else:
FIRE_bin = FIRE.loc[(FIRE.met >= met_start) & (FIRE.met <= met_end)]
FIRE = []
# We sample by the integer number of systems per star particle,
# as well as a probabilistic approach for the fractional component
# of N_astro:
N_astro_dec = N_astro % 1
p_DWD = np.random.rand(len(FIRE_bin))
N_sample_dec = np.zeros(len(FIRE_bin))
N_sample_dec[
p_DWD <= N_astro_dec.values
] = 1.0 # assign extra DWD to star particles
num_sample_dec = int(N_sample_dec.sum())
if verbose:
print(
"we will sample {} stars from the decimal portion".format(
num_sample_dec
)
)
sample_dec = pd.DataFrame.sample(conv, num_sample_dec, replace=True)
FIRE_bin_dec = FIRE_bin.loc[N_sample_dec == 1.0]
params_list = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"porb",
"sep",
"met",
"age",
"tphys",
"rad_1",
"rad_2",
"kern_len",
"xGx",
"yGx",
"zGx",
"FIRE_index",
]
pop_init_dec = pd.concat(
[sample_dec.reset_index(), FIRE_bin_dec.reset_index()], axis=1
)
sample_dec = pd.DataFrame()
FIRE_bin_dec = pd.DataFrame()
# get dat list and the population of DWDs orbiting in the LISA band for
# systems added from the decimal component of N_astro
dat = [
pop_init_dec[params_list],
i,
label,
ratio,
binfrac,
pathtosave,
interfile,
]
LISA_band = filter_population(dat)
if len(LISA_band) > 0:
LISA_band = LISA_band[final_params]
LISA_band.to_hdf(pathtosave + Lsavefile, key=Lkey, format="t", append=True)
# now sampling by tthe integer number of systems per star particle:
N_sample_int = int(N_astro) * len(FIRE_bin)
if verbose:
print(
"we will sample {} stars from the integer portion".format(N_sample_int)
)
print("getting FIRE values")
FIRE_int = pd.DataFrame(np.repeat(FIRE_bin.values, int(N_astro), axis=0))
FIRE_int.columns = FIRE_bin.columns
FIRE_bin = pd.DataFrame()
# if the number of populations to be sampled is large, we create galaxies iteratively
# by looping through.
Nsamp_split = 5e6
if N_sample_int < Nsamp_split:
sample_int = pd.DataFrame.sample(conv, N_sample_int, replace=True)
pop_init_int = pd.concat(
[sample_int.reset_index(), FIRE_int.reset_index()], axis=1
)
N = len(sample_int)
sample_int = pd.DataFrame()
FIRE_int = pd.DataFrame()
dat = [
pop_init_int[params_list],
i,
label,
ratio,
binfrac,
pathtosave,
interfile,
]
LISA_band = filter_population(dat)
if len(LISA_band) > 0:
LISA_band = LISA_band[final_params]
LISA_band.to_hdf(
pathtosave + Lsavefile, key=Lkey, format="t", append=True
)
elif N_sample_int > Nsamp_split:
if verbose:
print("looping the integer population")
N = 0
j = 0
jlast = int(Nsamp_split)
dat_filter = []
while j < N_sample_int:
if verbose:
print("j: ", j)
print("jlast: ", jlast)
print("sampling {} systems".format(int(jlast - j)))
sample_int = pd.DataFrame.sample(conv, int(jlast - j), replace=True)
N += len(sample_int)
pop_init_int = pd.concat(
[sample_int.reset_index(), FIRE_int.iloc[j:jlast].reset_index()],
axis=1,
)
dat_filter.append(
[
pop_init_int[params_list],
i,
label,
ratio,
binfrac,
pathtosave,
interfile,
]
)
j += Nsamp_split
j = int(j)
jlast += Nsamp_split
jlast = int(jlast)
if jlast > N_sample_int:
jlast = N_sample_int
with MultiPool(processes=nproc) as pool:
LISA_band_list = list(pool.map(filter_population, dat_filter))
for LISA_band in LISA_band_list:
LISA_band = LISA_band[final_params]
LISA_band.to_hdf(
pathtosave + Lsavefile, key=Lkey, format="t", append=True
)
if N != N_sample_int:
print("loop is incorrect")
FIRE_repeat = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import os
import pickle
import pandas as pd
from pandapower.toolbox import convert_format
from pandapower.create import create_empty_network
from pandapower.auxiliary import PandapowerNet
def to_hdf5(net, filename, complevel=1, complib="zlib", save_res=False):
raise Exception('to_hdf5 is deprecated. Use to_pickle instead')
def from_hdf5(filename):
# Load HDF5 File
raise Exception('from_hdf5 is deprecated. If you need to open a hdf5 File you may go back in GIT. However, to save and load files, to_pickle and from_pickle should be used.')
def to_pickle(net, filename):
"""
Saves a Pandapower Network with the pickle library.
INPUT:
**net** (dict) - The Pandapower format network
**filename** (string) - The absolute or relative path to the input file.
EXAMPLE:
>>> pp.to_pickle(net, os.path.join("C:", "example_folder", "example1.p")) # absolute path
>>> pp.to_pickle(net, "example2.p") # relative path
"""
if not filename.endswith(".p"):
raise Exception("Please use .p to save pandapower networks!")
with open(filename, "wb") as f:
pickle.dump(dict(net), f, protocol=2)
def to_excel(net, filename, include_empty_tables=False, include_results=True):
"""
Saves a Pandapower Network to an excel file.
INPUT:
**net** (dict) - The Pandapower format network
**filename** (string) - The absolute or relative path to the input file.
OPTIONAL:
**include_empty_tables** (bool, False) - empty element tables are saved as excel sheet
**include_results** (bool, True) - results are included in the excel sheet
EXAMPLE:
>>> pp.to_excel(net, os.path.join("C:", "example_folder", "example1.xlsx")) # absolute path
>>> pp.to_excel(net, "example2.xlsx") # relative path
"""
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
for item, table in net.items():
if type(table) != pd.DataFrame or item.startswith("_"):
continue
elif item.startswith("res"):
if include_results and len(table) > 0:
table.to_excel(writer, sheet_name=item)
elif item == "line_geodata":
geo = pd.DataFrame(index=table.index)
for i, coord in table.iterrows():
for nr, (x, y) in enumerate(coord.coords):
geo.loc[i, "x%u" % nr] = x
geo.loc[i, "y%u" % nr] = y
geo.to_excel(writer, sheet_name=item)
elif len(table) > 0 or include_empty_tables:
table.to_excel(writer, sheet_name=item)
parameters = pd.DataFrame(index=["name", "f_hz", "version"], columns=["parameters"],
data=[net.name, net.f_hz, net.version])
| pd.DataFrame(net.std_types["line"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file multi_md_analysis.py
# @brief multi_md_analysis object
# @author <NAME>
#
# <!--------------------------------------------------------------------------
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import itertools
import hdbscan
import matplotlib
import matplotlib.cm as cm
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.metrics import silhouette_samples, silhouette_score, calinski_harabaz_score
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
import os
import sys
import pickle
import time
import pylab as plt
from scipy import linalg
from pandas import HDFStore, DataFrame
import matplotlib as mpl
import mdtraj as md
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
from sklearn.decomposition import PCA
from sklearn import mixture
from multiprocessing import Pool
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import pdb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import protein_analysis
from molmolpy.utils import nucleic_analysis
from molmolpy.utils import helper as hlp
from itertools import combinations
import seaborn as sns
import numba
matplotlib.rcParams.update({'font.size': 12})
# matplotlib.style.use('ggplot')
sns.set(style="white", context='paper')
# font = {'family' : 'normal',
# 'weight' : 'bold',
# 'size' : 18}
#
# matplotlib.rc('font', **font)
class MultiMDAnalysisObject(object):
"""
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.pdb
"""
def __init__(self, file_list=None):
self.simulation_data = {}
self.sim_indexes = []
if file_list is not None:
if len(file_list) > 0:
for i in range(len(file_list)):
self.add_simulation_pickle_data(i + 1, file_list[i])
self.sim_indexes.append(i + 1)
colors = sns.cubehelix_palette(n_colors=len(file_list), rot=.7, dark=0, light=0.85)
self.colors_ = colors
test = 1
def add_simulation_pickle_data(self, index, filename):
temp_data = pickle.load(open(filename, "rb"))
self.simulation_data.update({str(index): temp_data})
@hlp.timeit
def plot_rmsd_multi(self, selection,
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=1200,
custom_labels=None,
position='best',
noTitle=True,
size_x=8.4,
size_y=7):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
# fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(size_x, size_y))
# fig.suptitle(title, fontsize=16)
if noTitle is False:
fig.suptitle(title)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['rmsd'][selection]
if custom_labels is None:
curr_label = 'Simulation {0}'.format(i)
else:
curr_label = '{0}'.format(custom_labels[i-1])
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.52, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc=position, shadow=True, ncol=2)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# remove part of ticks
sns.despine()
fig.savefig('Multi_Plot_RMSD_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rg_multi(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(10, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['Rg'][selection]
curr_label = 'Simulation {0}'.format(i)
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.6, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# remove part of ticks
sns.despine()
# In[28]:
fig.savefig('Multi_Plot_Rg_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Rg plot created')
print('-----------------------------------\n')
# TODO calculate confidence intervals
@hlp.timeit
def plot_rmsf_plus_confidence_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=600):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.format(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf, x_axis_name='Residue',
y_axis_name='RMSF')
conv_data['Residue'] += 1
confidence = hlp.mean_confidence_interval(conv_data['RMSF'])
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
# Plot the response with standard error
sns.tsplot(data=conv_data, ci=[95], color="m")
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_confidence_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF +confidence plot created')
@hlp.timeit
def prep_mdtraj_object(self, filename):
'''
Prepare receptor mdtraj object
get mdtraj topology and save as pandas dataframe
Calculate pdb receptor center of mass
:return:
'''
self.receptor_file = filename
self.receptor_mdtraj = md.load_pdb(self.receptor_file)
self.receptor_mdtraj_topology = self.receptor_mdtraj.topology
self.receptor_mdtraj_topology_dataframe = self.receptor_mdtraj.topology.to_dataframe()
topology = self.receptor_mdtraj.topology
atom_indices = topology.select('backbone')
test = 1
# self.center_of_mass_receptor = md.compute_center_of_mass(self.receptor_mdtraj)[0]
#
# self.x_center = math.ceil(self.center_of_mass_receptor[0] * 10)
# self.y_center = math.ceil(self.center_of_mass_receptor[1] * 10)
# self.z_center = math.ceil(self.center_of_mass_receptor[2] * 10)
#
# self.receptor_pybel = pybel.readfile("pdb", self.receptor_file).__next__()
# self.ligand_pybel = pybel.readfile("pdb", self.ligand_file).__next__()
test = 1
@hlp.timeit
def plot_rmsf_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=1200):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
# fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(8.4, 8.4))
# fig.suptitle(title, fontsize=16)
fig.suptitle(title)
# self.receptor_mdtraj_topology.atom(3000).residue.resSeq
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.format(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
converted_resseq,converted_index = converters.convert_mdtraj_atom_nums_to_resseq(self.receptor_mdtraj_topology,
atom_indices_rmsf)
conv_data_temp = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
conv_data = conv_data_temp.ix[converted_index]
conv_data['x'] = converted_resseq
test = 1
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
linewidth=0.52, label=curr_label)
#plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) #
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc='best', shadow=True)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
def count_lig_hbond(self, t, hbonds, ligand):
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
hbond_atoms = []
hbond_indexes_sel = []
hbond_count = 0
for hbond in hbonds:
res = label(hbond)
# print('res ', res)
if ligand in res:
# print("res is ", res)
hbond_atoms.append(res)
hbond_indexes_sel.append(hbond)
hbond_count += 1
test=1
# print('------------------------------------------------')
test = 1
return hbond_atoms, hbond_count, hbond_indexes_sel
@hlp.timeit
def hbond_lig_count_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.append(hbond_atoms)
sim_hbond_count.append(hbond_count)
sim_hbound_np = np.array(sim_hbond_count)
self.simulation_data[str(i)].update({'hbond_atoms':sim_hbond_atoms})
self.simulation_data[str(i)].update({'hbond_count':sim_hbond_count})
curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.format(i)
curr_label = "Simulation of Cluster {0} mean: {1}±{2}".format(i, round(np.mean(sim_hbound_np),3),
round(np.std(sim_hbond_count),3))
# Version 1
plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
linewidth=0.2, label=curr_label)
# Version 2
# plt.scatter(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.3, label=curr_label)
# data_frame = converters.convert_data_to_pandas(self.sim_time, self.hbond_count)
#
# y_average_mean = data_frame['y'].rolling(center=False, window=20).mean()
# atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
# curr_color = self.colors_[i - 1]
#
# conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
#
# # plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# # linewidth=0.6, label=curr_label)
#
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
test = 1
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_HBOND_count_Lig_' + '_' + title + '_' + ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond lig count plot created')
@hlp.timeit
def hbond_freq_plot_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
plt.clf()
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.format(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
sim_hbond_sel = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.append(hbond_atoms)
sim_hbond_count.append(hbond_count)
if len( hbond_indexes_sel) > 0:
sim_hbond_sel+= hbond_indexes_sel
sim_hbound_np = np.array(sim_hbond_count)
sim_hbound_sel_np = np.array(sim_hbond_sel)
# self.simulation_data[str(i)].update({'hbond_atoms':sim_hbond_atoms})
# self.simulation_data[str(i)].update({'hbond_count':sim_hbond_count})
# curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.format(i)
curr_label = "Simulation of Cluster {0} mean: {1}±{2}".format(i, round(np.mean(sim_hbound_np),3),
round(np.std(sim_hbond_count),3))
# This won't work here
da_distances = md.compute_distances(t, sim_hbound_sel_np[:, [0, 2]], periodic=False)
# Version 1
# plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.2, label=curr_label)
# color = itertools.cycle(['r', 'b', 'gold'])
colors = sns.cubehelix_palette(n_colors=len(da_distances), rot=-.4)
# self.colors_ = colors
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
color = itertools.cycle(['r', 'b', 'gold'])
for i in [0]:
plt.hist(da_distances[:, i], color=colors[i], label=label(sim_hbound_sel_np[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
#
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
#
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(9.0)
sns.despine()
fig.savefig('Multi_Plot_HBOND_frequency_' + '_' + title + '_' + str(i)+ '_'+ ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond frequency lig plot created')
@hlp.timeit
def plot_solvent_area_multi(self, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.total_sasa)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_frame_multi(self, frame, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.sasa[frame])
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot_{0}.png'.format(frame), dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_autocorr_multi(self, show=False):
self.sasa_autocorr = protein_analysis.autocorr(self.total_sasa)
fig = plt.figure(figsize=(10, 10))
plt.semilogx(self.sasa_traj.time, self.sasa_autocorr)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('SASA autocorrelation', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_autocorrelation.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_rmsd_cluster_color_multi(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def find_best_fit_regressor(self):
# from sklearn.tree import DecisionTreeRegressor
self.best = 100
self.index = 100
self.best_rg = 100
self.index_rg = 100
self.regr_index = []
self.regr_scores = {}
self.regr_index_rg = []
self.regr_scores_rg = {}
self.reshaped_time = self.sim_time.reshape(-1, 1)
for i in list(range(1, self.regression_fit_range + 1)):
self.create_fit(i)
print('best score is ', self.best)
print('best index is', self.index)
print('-=-' * 10)
print('best score Rg is ', self.best_rg)
print('best index Rg is', self.index_rg)
@hlp.timeit
def create_fit(self, i):
from sklearn import tree
from sklearn.model_selection import cross_val_score
self.reshaped_time = self.sim_time.reshape(-1, 1)
regressor = tree.DecisionTreeRegressor(max_depth=i) # interesting absolutely
fitVal = regressor.fit(self.reshaped_time, self.sim_rmsd)
print('fitVal ', fitVal)
rmsd_pred = regressor.predict(self.reshaped_time)
# cv how is it determined?
# A good compromise is ten-fold cross-validation. 10ns
# Maybe mse better?
cross_val = cross_val_score(regressor,
self.reshaped_time,
self.sim_rmsd,
scoring="neg_mean_squared_error",
cv=10)
regressor_rg = tree.DecisionTreeRegressor(max_depth=i) # interesting absolutely
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
print('fitVal ', fitVal)
rmsd_pred_rg = regressor_rg.predict(self.reshaped_time)
# cv how is it determined?
# A good compromise is ten-fold cross-validation. 10ns
cross_val_rg = cross_val_score(regressor,
self.reshaped_time,
self.rg_res,
scoring="neg_mean_squared_error",
cv=10)
self.regr_scores.update({i: cross_val})
self.regr_index.append(i)
self.regr_scores_rg.update({i: cross_val_rg})
self.regr_index_rg.append(i)
cross_val_score = -cross_val.mean()
cross_val_std = cross_val.std()
cross_val_score_rg = -cross_val_rg.mean()
cross_val_std_rg = cross_val_rg.std()
print('Cross validation score is ', cross_val)
print("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(i, -cross_val.mean(), cross_val.std()))
print('-=-' * 10)
print('Cross validation Rg score is ', cross_val_rg)
print("Rg Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(i, -cross_val_rg.mean(), cross_val_rg.std()))
# r2_score = regressor.score(self.sim_time.reshape(-1, 1), self.sim_rmsd)
# if r2_score > self.r2_best:
# self.r2_best = r2_score
# self.r2_index = i
if cross_val_score < self.best:
self.best = cross_val_score
self.index = i
if cross_val_score_rg < self.best_rg:
self.best_rg = cross_val_score_rg
self.index_rg = i
del regressor
del fitVal
del rmsd_pred
time.sleep(2)
# print('R2 score is ', r2_score)
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_rmsd_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores[i].mean()
cross_val_std = self.regr_scores[i].std()
y.append(cross_val_score)
yerr_list.append(cross_val_std)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".format(self.index,
-self.regr_scores[
self.index].mean(),
self.regr_scores[
self.index].std()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for RMSD")
fig.savefig(self.simulation_name + '_errorBar_rmsd.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_Rg_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores_rg[i].mean()
cross_val_std = self.regr_scores_rg[i].std()
y.append(cross_val_score)
yerr_list.append(cross_val_std)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".format(self.index_rg,
-self.regr_scores_rg[
self.index_rg].mean(),
self.regr_scores_rg[
self.index_rg].std()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for Rg")
fig.savefig(self.simulation_name + '_errorBar_Rg.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_fit_test(self):
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
# example variable error bar values
yerr = 0.1 + 0.2 * np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
plt.figure()
plt.errorbar(x, y, xerr=0.2, yerr=0.4)
plt.title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o')
ax.set_title('Hor. symmetric')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2 * yerr], xerr=[xerr, 2 * xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2 * yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
plt.show()
@hlp.timeit
def plot_boxplot_fit_regr(self):
data_to_plot = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
data_to_plot.append(self.regr_scores[i])
# Create a figure instance
fig = plt.figure(figsize=(10, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
# change outlier to hexagon
# bp = ax.boxplot(data_to_plot, 0, 'gD')
# dont show outlier
bp = ax.boxplot(data_to_plot, 0, '')
# Save the figure
fig.savefig(self.simulation_name + '_boxplot.png', dpi=600, bbox_inches='tight')
# plt.show()
print('Box plot created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def example_test(self):
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
degrees = [1, 4, 8, 15, 20]
# true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = self.sim_time
y = self.sim_rmsd
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X, y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X, y,
scoring="neg_mean_squared_error", cv=10)
X_test = self.sim_time
plt.plot(X_test, pipeline.predict(X_test), label="Model")
plt.plot(X_test, self.sim_rmsd, label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
@hlp.timeit
def plot_rmsd_with_regressor(self, title='LasR Simulation RMSD',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(max_depth=self.index) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.sim_rmsd)
print('fitVal ', fitVal)
self.rmsd_pred = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.sim_rmsd, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def plot_Rg_with_regressor(self, title='LasR Radius of Gyration',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(max_depth=self.index_rg) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.rg_res)
print('fitVal ', fitVal)
self.rmsd_pred_rg = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.rg_res, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred_rg, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def md_full_load(self, custom_stride=10):
print('MD Load has been called\n')
print('-------------------------------\n')
self.full_traj = md.load(self.md_trajectory_file, top=self.md_topology_file,
stride=custom_stride)
self.sim_time = self.full_traj.time / 1000
print("Full trajectory loaded successfully")
print('-----------------------------------\n')
@hlp.timeit
def rg_analysis(self, selection='protein'):
self.called_rg_analysis = True
# self.rg_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rg_traj.restrict_atoms(self.selection)
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.rg_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.rg_res = md.compute_rg(self.rg_traj)
self.rg_analysis_data.update({selection: self.rg_res})
print("Rg has been calculated")
print('-----------------------------------\n')
@hlp.timeit
def hbond_analysis_count(self, selection='protein',
title='LasR H-Bonds',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=300):
sns.set(style="ticks", context='paper')
self.called_hbond_analysis_count = True
print('HBonds analysis has been called\n')
print('-------------------------------\n')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.hbond_count = []
self.sim_time = self.full_traj.time / 1000
# paral = Pool(processes=16)
# data_count = list(map(self.hbond_frame_calc, self.full_traj))
#
# print('data count ',data_count)
# hbonds = md.baker_hubbard(self.full_traj, exclude_water=True, periodic=False)
# print('count of hbonds is ', len(hbonds))
# self.hbond_count.append(len(hbonds))
hbonds_frames = md.wernet_nilsson(self.full_traj, exclude_water=True, periodic=False)
self.hbonds_frames = hbonds_frames
for hbonds in hbonds_frames:
self.hbond_count.append(len(hbonds))
data_frame = converters.convert_data_to_pandas(self.sim_time, self.hbond_count)
y_average_mean = data_frame['y'].rolling(center=False, window=20).mean()
fig = plt.figure(figsize=(7, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(data_frame['x'], data_frame['y'], color='b',
linewidth=0.6, label='LasR')
# Dont plot rolling mean
plt.plot(data_frame['x'], y_average_mean, color='r',
linewidth=0.9, label='LasR rolling mean')
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '.png', dpi=custom_dpi, bbox_inches='tight')
print('HBond count plot created')
print('-----------------------------------\n')
# for hbond in hbonds:
# print(hbond)
# print(label(hbond))
# atom1 = self.full_traj.topology.atom(hbond[0])
# atom2 = self.full_traj.topology.atom(hbond[2])
# # atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
# if atom1.residue.resSeq != atom2.residue.resSeq:
# if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# # for domain reside analysis
# if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
# diff_hbonds.append(hbond)
@hlp.timeit
def hbond_analysis(self, selection='protein'):
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
self.full_traj.restrict_atoms(self.selection)
if self.save_pdb_hbond is True:
traj_sim1_hbonds = md.load_pdb(self.pdb_file_name)
hbonds = md.baker_hubbard(traj_sim1_hbonds, periodic=False)
# hbonds = md.wernet_nilsson(traj_sim1_hbonds, periodic=True)[0]
label = lambda hbond: '%s -- %s' % (traj_sim1_hbonds.topology.atom(hbond[0]),
traj_sim1_hbonds.topology.atom(hbond[2]))
diff_hbonds = []
for hbond in hbonds:
# print(hbond)
# print(label(hbond))
atom1 = traj_sim1_hbonds.topology.atom(hbond[0])
atom2 = traj_sim1_hbonds.topology.atom(hbond[2])
# atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
if atom1.residue.resSeq != atom2.residue.resSeq:
if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# domain reside analysis
if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
diff_hbonds.append(hbond)
for hbond in diff_hbonds:
print(hbond)
print(label(hbond))
print('Diff hbonds printed\n')
diff_hbonds = np.asarray(diff_hbonds)
self.da_distances = md.compute_distances(traj_sim1_hbonds, diff_hbonds[:, [0, 2]], periodic=False)
import itertools
# color = itertools.cycle(['r', 'b', 'gold'])
# fig = plt.figure(figsize=(7, 7))
# color = np.linspace(0, len(diff_hbonds),len(diff_hbonds))
#
# # color = itertools.cycle(['r', 'b','g','gold'])
# for i in list(range(0,len(diff_hbonds))):
# plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
# plt.show()
# this works wel, but needs to be modified
fig = plt.figure(figsize=(7, 7))
color = np.linspace(0, len(diff_hbonds), len(diff_hbonds))
color = itertools.cycle(['r', 'b', 'g', 'tan', 'black', 'grey', 'yellow', 'gold'])
for i in list(range(0, len(diff_hbonds))):
plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
plt.show()
fig.savefig(self.simulation_name + '_hbonds.png', dpi=600, bbox_inches='tight')
print("Hbonds have been calculated")
print('-----------------------------------\n')
@hlp.timeit
def rmsd_analysis(self, selection):
'''
:param selection: has to be mdtraj compatible
:return:
'''
self.called_rmsd_analysis = True
# self.rmsd_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rmsd_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.pdb')
# this is for keeping selection from trajectory
# self.rmsd_traj.restrict_atoms(self.selection)
# self.rmsd_traj = self.full_traj[:]
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
# self.selection = self.topology.select(selection)
# print('selection is ', self.selection)
self.rmsd_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.sim_rmsd = md.rmsd(self.rmsd_traj, self.rmsd_traj, 0)
self.sim_time = self.rmsd_traj.time / 1000
self.rmsd_analysis_data.update({selection: self.sim_rmsd})
self.regression_fit_range = 10
print('RMSD analysis has been called on selection {0}\n'.format(selection))
print('-----------------------------\n')
@hlp.timeit
def plot_rmsd_cluster_color(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rmsf(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=300):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
conv_data = converters.convert_data_to_pandas(atom_indices_rmsf, traj_rmsf)
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
fig = plt.figure(figsize=(14, 7))
plt.plot(conv_data['x'], conv_data['y'], color='b',
linewidth=0.6, label=title)
plt.xlabel(xlabel)
plt.xlim(min(conv_data['x']) - 100, max(conv_data['x']) + 100)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '_rmsf.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
@hlp.timeit
def plot_rg(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
traj_rg = self.rg_analysis_data[selection]
plt.plot((self.sim_time), traj_rg, color='b',
linewidth=0.6, label='LasR')
plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
# need to select only protein for analysis
@hlp.timeit
def find_centroid(self):
atom_indices = [a.index for a in self.full_traj.topology.atoms if a.element.symbol != 'H']
distances = np.empty((self.full_traj.n_frames, self.full_traj.n_frames))
for i in range(self.full_traj.n_frames):
distances[i] = md.rmsd(self.full_traj, self.full_traj, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = self.full_traj[index]
print(centroid)
centroid.save('centroid.pdb')
####################################################################################################################
# TODO do PCA transformation of MD simulation
@hlp.timeit
def md_pca_analysis(self, selection='protein'):
self.called_md_pca_analysis = True
print('PCA analysis has been called\n')
print('-------------------------------\n')
pca1 = PCA(n_components=2)
# this is for keeping selection from trajectory
# self.pca_traj = self.full_traj[:]
#
# self.topology = self.pca_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.pca_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.pdb')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.pca_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.pca_traj.superpose(self.pca_traj, 0)
self.reduced_cartesian = pca1.fit_transform(
self.pca_traj.xyz.reshape(self.pca_traj.n_frames, self.pca_traj.n_atoms * 3))
print(self.reduced_cartesian.shape)
print("PCA transformation finished successfully")
print('-----------------------------------\n')
####################################################################################################################
@hlp.timeit
def extract_info_cluster_data(self, cluster_data, key):
temp_data = []
for clust_num in self.range_n_clusters:
temp_data.append(cluster_data[clust_num][key])
return temp_data
@hlp.timeit
def silhouette_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.sil_pca
criteria_name = 'Mean Silhouette Coefficient for all samples'
score_text = 'Objects with a high silhouette value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
def calinski_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.calinski_pca
criteria_name = 'Calinski-Harabaz score'
score_text = 'Objects with a high Calinski-Harabaz score value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dunn_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dunn_pca
criteria_name = "Dunn's Index"
score_text = "Maximum value of the index represents the right partitioning given the index"
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dbi_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dbi_pca
criteria_name = 'Davis-Bouldain Index'
score_text = 'The optimal clustering solution has the smallest Davies-Bouldin index value.'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def select_number_of_clusters(self):
# ["foo", "bar", "baz"].index("bar")
max_silhouette = max(self.sil_pca)
max_dunn = max(self.dunn_pca)
min_dbi = min(self.dbi_pca)
sil_index = self.sil_pca.index(max_silhouette)
dunn_index = self.dunn_pca.index(max_dunn)
dbi_index = self.dbi_pca.index(min_dbi)
cluster_quantity = []
cluster_quantity.append(self.range_n_clusters[sil_index])
cluster_quantity.append(self.range_n_clusters[dunn_index])
cluster_quantity.append(self.range_n_clusters[dbi_index])
print('------------------------------------------------')
print('verify yolo', cluster_quantity)
cluster_set = set(cluster_quantity)
cluster_dict = {}
for n_set in cluster_set:
count = cluster_quantity.count(n_set)
cluster_dict.update({n_set: count})
print('verify yolo ', cluster_dict)
import operator
clust_num = max(cluster_dict.items(), key=operator.itemgetter(1))[0]
print("number of clusters is ", clust_num)
return clust_num
# def write_model_to_file(self, model, resnum=None, filename_pdb=None):
# curr_df = model['molDetail']['dataframe']
# pdb_tools.write_lig(curr_df, resnum, filename_pdb)
# need to select only protein for analysis
@hlp.timeit
def find_max_cluster(self):
length = 0
clust_temp_data = []
for k in self.clusterized_data:
data = self.clusterized_data[k]
if len(data) > length:
length = len(data)
clust_temp_data = data
self.max_clust_temp_data = clust_temp_data
return self.max_clust_temp_data
@hlp.timeit
def find_clusters_centroid(self):
print('Find Clusters centroids is called\n')
print('-----------------------------------\n')
self.called_find_clusters_centroid = True
self.clusters_centroids = []
for k in self.clusterized_data:
print('Finding centroid for cluster {0}'.format(k))
clust_temp_data = self.clusterized_data[k]
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = clust_temp_data[index]
# self.centroid_conf = centroid
# print(centroid)
# self.centroid_conf = centroid
self.clusters_centroids.append(centroid)
centroid.save(self.simulation_name + '_' + '{0}_cluster_centroid.pdb'.format(k))
print('-----------------------------------\n')
@hlp.timeit
def find_max_cluster_centroid(self):
print('Find Max Cluster centroid is called\n')
print('-----------------------------------\n')
self.called_find_max_cluster_centroid = True
clust_temp_data = self.max_clust_temp_data
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.std()).sum(axis=1).argmax()
print(index)
centroid = clust_temp_data[index]
self.centroid_conf = centroid
print(centroid)
self.centroid_conf = centroid
centroid.save(self.simulation_name + '_' + 'max_cluster_centroid.pdb')
print('-----------------------------------\n')
# need to find a way to extract models correctrly
@hlp.timeit
def export_cluster_models(self,
selection_obj='protein',
select_lig=None,
save_data=False, nth_frame=1):
'''
Save cluster data to pdb files in cluster_traj directory
:return:
'''
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
labels = cluster_labels
sample_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
unique_labels = list(set(cluster_labels))
print('Unique labels ', unique_labels)
original_data = self.full_traj
self.clusterized_data = {}
for k in unique_labels: # Need to modify WORKS
# print('k is ',k)
# k == -1 then it is an outlier
if k != -1:
cluster_data = []
xyz = original_data[labels == k]
# sel_traj = xyz[:]
topology = xyz.topology
selection_name = selection_obj
selection_final_name = selection_obj
selection = topology.select(selection_obj)
selection_final = selection
if select_lig is not None:
# selection1 = topology.select(select_lig)
# selection_final = np.concatenate((selection, selection1))
# selection_name = selection_name + ' and ' + select_lig
#
# selection_final = list(topology.select(selection_obj)) + list(topology.select(select_lig))
selection_final_name = selection_obj + '+' + select_lig
selection_final = topology.select(selection_obj + ' or ' + select_lig)
# list(topology.select(selection_obj)) + list(topology.select(select_lig))
sel_traj = xyz.atom_slice(atom_indices=selection_final)
# sel_traj.restrict_atoms(selection_final)
clust_num = int(k) + 1
if save_data is True:
temp_data = sel_traj[::nth_frame]
temp_data[0].save(self.simulation_name + '_' + 'cluster_' + str(
clust_num) + '_' + selection_final_name + '_frame_0.pdb')
temp_data.save(
self.simulation_name + '_' + 'cluster_' + str(clust_num) + '_' + selection_final_name + '.xtc')
self.clusterized_data.update({k: sel_traj})
self.save_pdb_hbond = True
def save_analysed_data(self, filename):
'''
:param filename: Saves clustered data to pickle file
:return:
'''
# import json
# with open(filename, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
import pickle
# pickle.dump(self.cluster_models, open(filename, "wb"))
pickle.dump(self, open(filename, "wb"))
# should I add json saving of information or not?
def load_analysed_data(self, filename):
'''
:param filename: load pickle file
:return:
'''
self.analysed_data = pickle.load(open(filename, "rb"))
print('test')
####################################################################################################################
# TODO calc ramachandran part
@hlp.timeit
def ramachandran_calc(self):
self.atoms, self.bonds = self.full_traj.topology.to_dataframe()
self.phi_indices, self.phi_angles = md.compute_phi(self.full_traj, periodic=False)
self.psi_indices, self.psi_angles = md.compute_psi(self.full_traj, periodic=False)
self.angles_calc = md.compute_dihedrals(self.full_traj, [self.phi_indices[0], self.psi_indices[0]])
@hlp.timeit
def ramachandran_plot(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc[:, 0], self.angles_calc[:, 1], marker='x', c=self.full_traj.time)
cbar = plt.colorbar()
cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis' + '.png', dpi=600, bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
@hlp.timeit
def ramachandran_calc_centroid(self, selection='backbone'):
print('Ramachandran centroid calc has been called\n')
print('------------------------------------------\n')
self.called_ramachandran_centroid_calc = True
self.centroid_topology = self.centroid_conf.topology
self.centroid_selection = self.centroid_topology.select(selection)
self.centroid_new_traj = self.centroid_conf.atom_slice(atom_indices=self.centroid_selection)
self.atoms_centroid, self.bonds_centroid = self.centroid_new_traj.topology.to_dataframe()
self.phi_indices_centroid, self.phi_angles_centroid = md.compute_phi(self.centroid_conf, periodic=False)
self.psi_indices_centroid, self.psi_angles_centroid = md.compute_psi(self.centroid_conf, periodic=False)
self.angles_calc_centroid_list = []
for i, y in zip(self.phi_indices_centroid, self.psi_indices_centroid):
temp = md.compute_dihedrals(self.centroid_conf, [i, y])
self.angles_calc_centroid_list.append(temp[0])
self.angles_calc_centroid = np.array(self.angles_calc_centroid_list, dtype=np.float64)
print('------------------------------------------\n')
@hlp.timeit
def ramachandran_plot_centroid(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc_centroid[:, 0], self.angles_calc_centroid[:, 1], marker='x')
# cbar = plt.colorbar()
# cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis_centroid' + '.png', dpi=600,
bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
####################################################################################################################
# gmx trjconv -s md_0_1.tpr -f md_0_1.xtc -o md_0_1_noPBC.xtc -pbc mol -ur compact
# gmx trjconv -s md_0_3.tpr -f md_0_3_noPBC.xtc -o md_0_3_clear.xtc -fit rot+trans
# def get_gmx_command(self):
# sim1_file_tpr = sim1 + '/md_0_3.tpr'
#
# # In[39]:
#
# sim1_out = sim1 + '/md_sim1.pdb'
#
# # In[40]:
#
# index = sim1 + '/index.ndx'
#
# # In[41]:
#
# trj_conv = 'gmx trjconv -f {0} -s {1} -n {2} -o {3} -dt 500'.format(sim1_file_traj, sim1_file_tpr, index,
# sim1_out)
#
# # traj_sim1_hbonds = md.load(sim1_out)
#
#
# # In[44]:
#
# # traj_sim1_hbonds
#
#
# # In[45]:
#
# sim1_clear = sim1 + '/md_sim1_clear.pdb'
#
# # In[46]:
#
# traj_sim1_hbonds = md.load_pdb(sim1_clear)
#
# # In[47]:
#
# traj_sim1_hbonds
#
# # In[48]:
#
# traj_sim1_hbonds[-1].save('QRC_sim0_lastFrame.pdb')
#
# # In[49]:
#
# traj_sim1_hbonds[0].save('QRC_sim0_firstFrame.pdb')
#
# # In[50]:
#
# traj_sim1_hbonds[0:-1:30].save('QRC_sim0_shortAnimation.pdb')
#
# # In[51]:
#
# hbonds = md.baker_hubbard(traj_sim1_hbonds, freq=0.8, periodic=False)
#
# # In[52]:
#
# hbonds = md.wernet_nilsson(traj_sim1_hbonds[-1], periodic=True)[0]
#
# # In[53]:
#
# sel
#
# # In[54]:
#
# # for hbond in hbonds:
# # # print(hbond)
# # print(label(hbond))
#
#
# # In[55]:
#
# da_distances = md.compute_distances(traj_sim1_hbonds, hbonds[:, [0, 2]], periodic=False)
#
# # In[56]:
#
# import itertools
#
# # In[57]:
#
# color = itertools.cycle(['r', 'b', 'gold'])
# for i in [2, 3, 4]:
# plt.hist(da_distances[:, i], color=next(color), label=label(hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
#
# # TEST ORIGIANL EXAMPLE
# #
#
# # Check for HSL_LasR_1
#
# # In[ ]:
def get_data_for_analysis(self):
return self.analysis_structure
def drawVectors(self, transformed_features, components_, columns, plt, scaled):
if not scaled:
return plt.axes() # No cheating ;-)
num_columns = len(columns)
# This funtion will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:, 0])
yvector = components_[1] * max(transformed_features[:, 1])
## visualize projections
# Sort each column by it's length. These are your *original*
# columns, not the principal components.
important_features = {columns[i]: math.sqrt(xvector[i] ** 2 + yvector[i] ** 2) for i in range(num_columns)}
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)
plt.text(xvector[i] * 1.2, yvector[i] * 1.2, list(columns)[i], color='b', alpha=0.75)
return ax
# test code
@hlp.timeit
def rmsf_calc(self, target=None, reference=None, frame=0, wrt=False, atom_indices=None, ref_atom_indices=None):
'''
use backbone for selection
Looks like GROMACS uses WRT
'''
self.called_rmsf_calc = True
print('RMSF analysis has been called\n')
print('-----------------------------\n')
self.topology = self.full_traj.topology
atom_indices = self.topology.select(atom_indices)
ref_atom_indices_name = ref_atom_indices
ref_atom_indices = self.topology.select(ref_atom_indices)
self.atom_indices = atom_indices
self.ref_atom_indices = ref_atom_indices
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.sim_time = self.full_traj.time / 1000
trajectory = self.full_traj
trajectory.superpose(self.full_traj[frame], atom_indices=atom_indices, ref_atom_indices=ref_atom_indices)
if wrt is True:
avg_xyz = np.mean(trajectory.xyz[:, atom_indices, :], axis=0)
self.avg_xyz = avg_xyz
self.sim_rmsf = np.sqrt(3 * np.mean((trajectory.xyz[:, atom_indices, :] - avg_xyz) ** 2, axis=(0, 2)))
else:
reference = trajectory[frame]
self.sim_rmsf = np.sqrt(
3 * np.mean((trajectory.xyz[:, atom_indices, :] - reference.xyz[:, ref_atom_indices, :]) ** 2,
axis=(0, 2)))
self.rmsf_analysis_data.update({ref_atom_indices_name: {'atom_indices': self.atom_indices,
'ref_atom_indices': self.ref_atom_indices,
'rmsf': self.sim_rmsf}})
print('-----------------------------\n')
return self.sim_rmsf
@hlp.timeit
def pca_analysis(self):
scaleFeatures = False
df = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
T = pca.transform(df)
# ax = self.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
# T.plot.scatter(x='component1', y='component2', marker='o', s=300, alpha=0.75) # , ax=ax)
# plt.show()
return T
@hlp.timeit
def pca_analysis_reshape(self):
scaleFeatures = False
df = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(df)
T = pca.transform(df)
# ax = self.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = | pd.DataFrame(T) | pandas.DataFrame |
# Note
#
# 2D Numpy Array can hold values only of one dayatype. Pandas does not have that
# restriction and is ideal data structure for tabular data which has columnn values
# with multiple data types
#
import pandas as pd
# Create Panda dataframe using dictionary
dict = {
"country" : ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Delhi", "Beijing", "Pretoria"],
"area": [8.516, 17.100, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98]
}
brics = | pd.DataFrame(dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = | Panel.from_dict(data, orient='minor') | pandas.core.panel.Panel.from_dict |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def get_edges_df_from_obabel(obabel_edges_df, structures_df):
"""
Given obabel edge data, convert it to format of edges_df so that all other code remains the same.
"""
obabel_edges_df = obabel_edges_df[[
'mol_id', 'atom_index_0', 'atom_index_1', 'BondLength', 'BondOrder', 'IsAromatic'
]]
obabel_edges_df.loc[obabel_edges_df.IsAromatic, 'BondOrder'] = 1.5
obabel_edges_df.rename({'BondLength': 'distance', 'BondOrder': 'bond_type'}, axis=1, inplace=True)
enc = LabelEncoder()
enc.fit(structures_df.molecule_name)
obabel_edges_df['molecule_name'] = enc.inverse_transform(obabel_edges_df['mol_id'])
obabel_edges_df.drop(['IsAromatic', 'mol_id'], axis=1, inplace=True)
obabel_edges_df = add_structure_data_to_edge(obabel_edges_df, structures_df, ['x', 'y', 'z'])
obabel_edges_df['x'] = (obabel_edges_df['x_1'] - obabel_edges_df['x_0']) / obabel_edges_df['distance']
obabel_edges_df['y'] = (obabel_edges_df['y_1'] - obabel_edges_df['y_0']) / obabel_edges_df['distance']
obabel_edges_df['z'] = (obabel_edges_df['z_1'] - obabel_edges_df['z_0']) / obabel_edges_df['distance']
obabel_edges_df.drop(['x_1', 'x_0', 'y_1', 'y_0', 'z_0', 'z_1'], axis=1, inplace=True)
return obabel_edges_df
def add_structure_data_to_edge(edges_df, structures_df, cols_to_add=['atom']):
edges_df = pd.merge(
edges_df,
structures_df[['molecule_name', 'atom_index'] + cols_to_add],
how='left',
left_on=['molecule_name', 'atom_index_0'],
right_on=['molecule_name', 'atom_index'],
)
edges_df.drop('atom_index', axis=1, inplace=True)
edges_df.rename({k: f'{k}_0' for k in cols_to_add}, inplace=True, axis=1)
edges_df = pd.merge(
edges_df,
structures_df[['molecule_name', 'atom_index'] + cols_to_add],
how='left',
left_on=['molecule_name', 'atom_index_1'],
right_on=['molecule_name', 'atom_index'],
)
edges_df.drop('atom_index', axis=1, inplace=True)
edges_df.rename({k: f'{k}_1' for k in cols_to_add}, inplace=True, axis=1)
return edges_df
def get_symmetric_edges(edge_df):
"""
Ensures that all edges in all molecules occur exactly twice in edge_df. This ensures that when we join with
on with one of atom_index_0/atom_index_1, all edges are covered.
"""
e_df = edge_df.copy()
atom_1 = e_df.atom_index_1.copy()
e_df['atom_index_1'] = e_df['atom_index_0']
e_df['atom_index_0'] = atom_1
xyz_cols = list(set(['x', 'y', 'z']).intersection(set(edge_df.columns)))
assert len(xyz_cols) in [0, 3]
if len(xyz_cols) == 3:
e_df[['x', 'y', 'z']] = -1 * e_df[['x', 'y', 'z']]
edge_df = | pd.concat([edge_df, e_df], ignore_index=True) | pandas.concat |
""" Behind the scenes work of querying a tweet and producing graphs relating to the sentiment analysis. """
from afinn import Afinn
from matplotlib.figure import Figure
from matplotlib import rcParams
from pandas import DataFrame
from sqlite3 import connect
from twitterscraper.query import query_tweets
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'F0F0F0'})
def backend(scrape, topic, begin_date, end_date, min_likes, min_retweets):
"""
Create database tables if they don't exist, insert query entry and return an
analysis of corresponding tweets
:param scrape:
:param topic: user inputted keyword
:param begin_date:
:param end_date:
:param min_likes: 0 if no user input
:param min_retweets: 0 if no user input
:return: sentiment analysis of tweets
"""
if scrape is True:
tweets = scrape_tweets(query=topic, begin_date=begin_date, end_date=end_date)
else:
tweets = None
with connect('database.db') as connection:
create_tables(connection=connection)
query_id = insert_query(connection=connection, query=(topic, begin_date, end_date, min_likes, min_retweets))
if tweets is not None:
insert_tweets(connection=connection, tweets=tweets)
insert_sampled(connection=connection, query_id=query_id)
return analyze_tweets(connection=connection, query_id=query_id)
def scrape_tweets(query, begin_date, end_date):
"""
:param query: user input query
:param begin_date:
:param end_date:
:return: None if no matching keywords else pandas dataframe of tweets
"""
limit = None
lang = 'english'
filters = ['tweet_id', 'text', 'timestamp', 'likes', 'retweets', 'user_id', 'screen_name']
tweets = query_tweets(query, limit=limit, lang=lang, begindate=begin_date, enddate=end_date)
if len(tweets) > 0:
data_frame = | DataFrame(tweet.__dict__ for tweet in tweets) | pandas.DataFrame |
import json
import sys
import warnings
from pathlib import Path
# import matplotlib.pyplot as plt
import pandas as pd
import requests
class Timeseries:
def __init__(self, dataset, id_timeseries="", name_timeseries=""):
self.ds = dataset
self._id_ds = self.ds._id
self._id_proj = self.ds.con.project_id
self._id = id_timeseries
self._name = name_timeseries
if id_timeseries == "" and name_timeseries != "":
self._id = self.ds.query_ts_id(name_timeseries)
self._name = name_timeseries
elif id_timeseries != "" and name_timeseries == "":
self._name = self.ds.query_ts_name(id_timeseries)
self._id = id_timeseries
if self._name == "" and self._id == "":
warnings.warn("neither timeseries id nor timerseries name were not defined (at least one value required).")
self._header = {'dhi-open-api-key': '{0}'.format(self.ds.con._api_key),
'Content-Type': 'application/json',
'dhi-project-id': '{0}'.format(self._id_proj),
'dhi-dataset-id': '{0}'.format(self._id_ds),
'dhi-service-id': 'timeseries',
}
def get_data(self, time_from=None, time_to=None):
"""
function to request data in timeseries
:param time_from: specify from what timestamp data is requested; format: yyyy-mm-ddThhmmss. If None, will return from first timestamp.
:param time_to: specify to what timestamp data is requested; format: yyyy-mm-ddThhmmss. If None, will return up to latest timestamp.
:return: dataframe containing the timeseries data
:rtype: pd.DataFrame
"""
url = None
if time_from is None and time_to is None:
url = self.ds.con.metadata_service_url + "api/ts/{0}/timeseries/{1}/values"\
.format(self._id_ds, self._id)
elif time_from is None and time_to is not None:
url = self.ds.con.metadata_service_url + "api/ts/{0}/timeseries/{1}/values?to={2}"\
.format(self._id_ds, self._id, time_to)
elif time_from is not None and time_to is None:
url = self.ds.con.metadata_service_url + "api/ts/{0}/timeseries/{1}/values?from={2}"\
.format(self._id_ds, self._id, time_from)
elif time_from is not None and time_to is not None:
url = self.ds.con.metadata_service_url + "api/ts/{0}/timeseries/{1}/values?from={2}&to={3}"\
.format(self._id_ds, self._id, time_from, time_to)
response = requests.get(url, headers=self._header)
if response.status_code > 300 and time_from is not None or response.status_code > 300 and time_to is not None:
raise ValueError("request failed - validate that times are given in format {yyyy-MM-ddTHHmmss}")
json_ = response.json()
if response.status_code > 300:
return json_
df = | pd.DataFrame(json_["data"]) | pandas.DataFrame |
import pyprind
import pandas as pd
import os
xxxhmiac = 'imported done'
print(xxxhmiac)
labels = {'pos': 1, 'neg': 0}
pbar = pyprind.ProgBar(50000)
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = r'C:\Users\<NAME>\Documents\aclImdb/%s/%s' % (s, l)
for file in os.listdir(path):
with open(os.path.join(path, file), 'r', encoding='utf-8') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]], ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
xxxhmiad = 'appended to dataframe done'
print(xxxhmiad)
# # path = os.path.join(basepath, s, l)
import numpy as np
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.to_csv(r'C:\Users\<NAME>\Documents\SAfiles/movie_data.csv', index=False)
xxxhmiae = 'permutation done'
print(xxxhmiae)
import pandas as pd
xxxhmiaf = 'panda done'
print(xxxhmiaf)
df = | pd.read_csv(r'C:\Users\<NAME>\Documents\SAfiles/movie_data.csv') | pandas.read_csv |
import pickle
from pathlib import Path
import pandas as pd
import data.utils.web_scrappers as ws
DATA_DIR = Path("data/data")
COM_DATA_DIR = DATA_DIR / "DAX30"
PKL_DIR = DATA_DIR / "PKL_DIR"
DAX_DATA_PKL = PKL_DIR / "DAX30.data.pkl"
DAX_DATA_CSV = DATA_DIR / "DAX30.csv"
def path_to_string(path):
return "/".join(path.parts)
def compute_dax_df():
"""
returns main dataframe with stocks data of all dax companies
:return: dataframe with stocks data
"""
tickers = ws.get_tickers()
main_df = pd.DataFrame()
for count, ticker in enumerate(tickers):
ticker = ticker.rstrip()
try:
df = get_com_as_df(ticker)
except IOError:
print("No .csv found for {}".format(ticker))
continue
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df.rename(columns={"Adj Close": ticker}, inplace=True)
df.drop(["Open", "High", "Low", "Close", "Volume"], 1, inplace=True)
if main_df.empty:
main_df = df
else:
main_df = main_df.merge(df)
# print(main_df.tail())
save_dax_as_csv(main_df)
save_dax_as_pkl(main_df)
def save_com_as_csv(df, ticker):
"""
Saves company data as .csv
:param df: new fetched dataframe from yahoo
:param ticker: ticker symbol
"""
path = path_to_string(COM_DATA_DIR) + "/{}.csv".format(ticker)
df.to_csv(path)
print("Saved {} data to {}".format(ticker, path))
def save_dax_as_csv(df):
"""
Saves dax data as .csv
param df: DataFrame
"""
path = path_to_string(DATA_DIR) + "/DAX30.csv"
df.to_csv(path)
print("Saved DAX30 data to{}".format(path))
def save_dax_as_pkl(df):
path = path_to_string(DAX_DATA_PKL)
with open(path, "wb") as f:
pickle.dump(df, f)
print("Saved DAX30 data to{}".format(path))
def get_dax_as_pkl():
path = path_to_string(DAX_DATA_PKL)
with open(path, "wb") as f:
pickle.load(f)
def get_dax__as_df():
if DAX_DATA_PKL.exists():
path = path_to_string(DAX_DATA_PKL)
return | pd.read_pickle(path) | pandas.read_pickle |
import boto3
import base64
import os
from botocore.exceptions import ClientError
import json
import psycopg2
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import sys
import traceback
class DB:
"""database interface class"""
@staticmethod
def connect(params: dict) -> [psycopg2.extensions.connection, psycopg2.extensions.cursor]:
"""
@brief: connects to the database
@params:
params: dictionary of db connection parameters
@returns:
db: the database
cur: the cursor
"""
if "datasource.username" in params:
temp = {
"user": params["datasource.username"],
"password": params["datasource.password"],
"database": params["datasource.database"],
"host": params["datasource.url"],
"port": params["datasource.port"]
}
params = temp
try:
print("[INFO] connecting to db.")
db = psycopg2.connect(**params)
print("[INFO] connected.")
cur = db.cursor()
except Exception as e:
print("[ERROR] failed to connect to db.")
print(e)
return []
return [db, cur]
@staticmethod
def execute(sql_query: str, database: psycopg2.extensions.connection) -> pd.DataFrame:
"""
@brief: shorthand sql style execution
@params:
sql_query: the query string to execute
database: the database to execute on
@returns: a pandas table of the query results
"""
try:
if ('insert' in sql_query):
print("insert here")
| pd.read_sql_query(sql_query, database) | pandas.read_sql_query |
import pandas as pd
import os
import matplotlib.pyplot as plt
def plot_mortality_vs_excess(csv, owid_excess_mortality):
"""
Description: plots a comparison between official death count and p-score death count
in a two-ax chart for countries with p-score > 1
:param owid_data: OWID main coronavirus dataset in a pandas dataframe format,
it can be already processed with clean_data()
:param owid_excess_mortality: OWID excess mortality dataset on coronavirus
in a pandas dataframe format
"""
owid_data = pd.read_csv(csv)
owid_excess_mortality = | pd.read_csv(owid_excess_mortality) | pandas.read_csv |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c31_0_0', 'coding'])
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c31_0_0', 'type'] == 'Date'
assert tmp.loc['c31_0_0', 'description'] == 'A date'
assert tmp.loc['c34_0_0', 'field_id'] == '34'
assert tmp.loc['c34_0_0', 'inst'] == 0
assert tmp.loc['c34_0_0', 'arr'] == 0
assert tmp.loc['c34_0_0', 'coding'] == 9
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'type'] == 'Integer'
assert tmp.loc['c34_0_0', 'description'] == 'Some integer'
assert tmp.loc['c46_0_0', 'field_id'] == '46'
assert tmp.loc['c46_0_0', 'inst'] == 0
assert tmp.loc['c46_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c46_0_0', 'coding'])
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'type'] == 'Integer'
assert tmp.loc['c46_0_0', 'description'] == 'Some another integer'
assert tmp.loc['c47_0_0', 'field_id'] == '47'
assert tmp.loc['c47_0_0', 'inst'] == 0
assert tmp.loc['c47_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c47_0_0', 'coding'])
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c47_0_0', 'type'] == 'Continuous'
assert tmp.loc['c47_0_0', 'description'] == 'Some continuous value'
assert tmp.loc['c48_0_0', 'field_id'] == '48'
assert tmp.loc['c48_0_0', 'inst'] == 0
assert tmp.loc['c48_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c48_0_0', 'coding'])
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'type'] == 'Time'
assert tmp.loc['c48_0_0', 'description'] == 'Some time'
def test_postgresql_auxiliary_table_check_types(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
sql_types = """
select column_name, data_type
from information_schema.columns
where table_name = 'fields';
"""
tmp = pd.read_sql(sql_types, create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['field_id', 'data_type'] == 'text'
assert tmp.loc['inst', 'data_type'] == 'bigint'
assert tmp.loc['arr', 'data_type'] == 'bigint'
assert tmp.loc['coding', 'data_type'] == 'bigint'
assert tmp.loc['table_name', 'data_type'] == 'text'
assert tmp.loc['type', 'data_type'] == 'text'
assert tmp.loc['description', 'data_type'] == 'text'
def test_postgresql_auxiliary_table_constraints(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('fields', column_query='column_name', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
# index on 'event' column
constraint_sql = self._get_table_contrains('fields', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 6
assert 'arr' in columns
assert 'field_id' in columns
assert 'inst' in columns
assert 'table_name' in columns
assert 'type' in columns
assert 'coding' in columns
def test_postgresql_two_csv_files(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_1_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine))
expected_columns = ["eid","c100_0_0", "c100_1_0", "c100_2_0", "c110_0_0", "c120_0_0", "c130_0_0", "c140_0_0", "c150_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 5
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert int(tmp.loc[1, 'c34_0_0']) == -33
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 41.55312
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert tmp.loc[5, 'c21_0_0'] == 'Option number 5'
assert tmp.loc[5, 'c21_1_0'] == 'Maybe'
assert tmp.loc[5, 'c21_2_0'] == 'Probably'
assert pd.isnull(tmp.loc[5, 'c31_0_0'])
assert int(tmp.loc[5, 'c34_0_0']) == -4
assert int(tmp.loc[5, 'c46_0_0']) == 1
assert pd.isnull(tmp.loc[5, 'c47_0_0'])
assert tmp.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 3
assert int(tmp.loc[1, 'c100_0_0']) == -9
assert int(tmp.loc[1, 'c100_1_0']) == 3
assert pd.isnull(tmp.loc[1, 'c100_2_0'])
assert tmp.loc[1, 'c110_0_0'].round(5) == 42.55312
assert int(tmp.loc[1, 'c120_0_0']) == -33
assert tmp.loc[1, 'c130_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c140_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert tmp.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert pd.isnull(tmp.loc[3, 'c100_0_0'])
assert int(tmp.loc[3, 'c100_1_0']) == -4
assert int(tmp.loc[3, 'c100_2_0']) == -10
assert tmp.loc[3, 'c110_0_0'].round(5) == -35.31471
assert int(tmp.loc[3, 'c120_0_0']) == 0
assert tmp.loc[3, 'c130_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c140_0_0'].strftime('%Y-%m-%d') == '1997-04-15'
assert pd.isnull(tmp.loc[3, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_single_table(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2020-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '1990-02-15'
assert query_result.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_tables(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_multiple_tables(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_multiple_tables(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
def test_postgresql_two_csv_files_flipped_query_multiple_tables(self):
# Prepare
# In this test the files are just flipped
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv02, csv01), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_custom_columns(self):
# SQLite is very limited when selecting variables, renaming, doing math operations, etc
pass
def test_postgresql_query_custom_columns(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', '(c47_0_0 ^ 2.0) as c47_squared']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c47_squared'] for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[3, 'c47_0_0'].round(5) == -5.32471
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
assert query_result.loc[1, 'c47_squared'].round(5) == round(45.55412 ** 2, 5)
assert query_result.loc[2, 'c47_squared'].round(5) == round((-0.55461) ** 2, 5)
assert query_result.loc[3, 'c47_squared'].round(5) == round((-5.32471) ** 2, 5)
assert query_result.loc[4, 'c47_squared'].round(5) == round(55.19832 ** 2, 5)
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_filter(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
def test_postgresql_query_single_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_and_filter(self):
# 'RIGHT and FULL OUTER JOINs are not currently supported'
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
def test_postgresql_query_multiple_and_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
@unittest.skip('sqlite being removed')
def test_sqlite_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'] == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
# FIXME: this is strange, data type in this particular case needs np.round
assert np.round(tmp.loc[1, 'c47_0_0'], 5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'] == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'] == '2010-01-01'
def test_postgresql_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_timestamp_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example04.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert pd.isnull(tmp.loc[2, 'c48_0_0'])
assert tmp.loc[3, 'c47_0_0'].round(5) == -5.32471
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_integer_is_nan(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example06_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_first_row_is_nan_integer(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example07_first_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert pd.isnull(tmp.loc[1, 'c46_0_0'])
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_sql_chunksize01(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
assert len(chunk.index) == 2
if chunk_idx == 0:
indexes = (1, 2)
assert all(x in chunk.index for x in indexes)
else:
indexes = (3, 4)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
assert chunk.shape[0] == 2
if chunk_idx == 0:
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
else:
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_sql_chunksize02(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=3)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
if chunk_idx == 0:
assert len(chunk.index) == 3
indexes = (1, 2, 3)
assert all(x in chunk.index for x in indexes)
else:
assert len(chunk.index) == 1
indexes = (4,)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
if chunk_idx == 0:
assert chunk.shape[0] == 3
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
else:
assert chunk.shape[0] == 1
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_all_eids_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine))
expected_columns = ["eid"]
assert len(all_eids.columns) == len(expected_columns)
assert all(x in all_eids.columns for x in expected_columns)
## Check data is correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine), index_col='eid')
assert len(all_eids.index) == 6 + 4, len(all_eids.index)
assert 1000010 in all_eids.index
assert 1000020 in all_eids.index
assert 1000021 in all_eids.index
assert 1000030 in all_eids.index
assert 1000040 in all_eids.index
assert 1000041 in all_eids.index
assert 1000050 in all_eids.index
assert 1000060 in all_eids.index
assert 1000061 in all_eids.index
assert 1000070 in all_eids.index
def test_postgresql_all_eids_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('all_eids', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 1
assert 'eid' in columns
def test_postgresql_bgen_samples_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine))
expected_columns = ["index", "eid"]
assert len(samples_data.columns) == len(expected_columns)
assert all(x in samples_data.columns for x in expected_columns)
## Check data is correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine), index_col='index')
assert not samples_data.empty
assert samples_data.shape[0] == 5
assert samples_data.loc[1, 'eid'] == 1000050
assert samples_data.loc[2, 'eid'] == 1000030
assert samples_data.loc[3, 'eid'] == 1000040
assert samples_data.loc[4, 'eid'] == 1000010
assert samples_data.loc[5, 'eid'] == 1000020
def test_postgresql_bgen_samples_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
# indexes
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
def test_postgresql_events_tables_only_one_instance_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 6
assert events_data.loc[0, 'eid'] == 1000020
assert events_data.loc[0, 'field_id'] == 84
assert events_data.loc[0, 'event'] == 'E103'
assert events_data.loc[1, 'eid'] == 1000020
assert events_data.loc[1, 'field_id'] == 84
assert events_data.loc[1, 'event'] == 'N308'
assert events_data.loc[2, 'eid'] == 1000020
assert events_data.loc[2, 'field_id'] == 84
assert events_data.loc[2, 'event'] == 'Q750'
assert events_data.loc[3, 'eid'] == 1000030
assert events_data.loc[3, 'field_id'] == 84
assert events_data.loc[3, 'event'] == 'N308'
assert events_data.loc[4, 'eid'] == 1000040
assert events_data.loc[4, 'field_id'] == 84
assert events_data.loc[4, 'event'] == 'N308'
assert events_data.loc[5, 'eid'] == 1000050
assert events_data.loc[5, 'field_id'] == 84
assert events_data.loc[5, 'event'] == 'E103'
def test_postgresql_events_tables_only_two_instances_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example11')
csv_file = get_repository_path(os.path.join(directory, 'example11_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 11
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
def test_postgresql_events_tables_two_categorical_fields_and_two_and_three_instances(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, field_id, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check total data
assert not events_data.empty
assert events_data.shape[0] == 25
# 1000010
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1701'
# 1000020
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
# 1000030
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1434'
# 1000040
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '457'
# 1000050
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
# 1000060
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1136'
def test_postgresql_events_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('events', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 4
assert 'eid' in columns
assert 'field_id' in columns
assert 'instance' in columns
assert 'event' in columns
# index on 'event' column
constraint_sql = self._get_table_contrains('events', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine), index_col='index_name')
assert constraints_results is not None
assert not constraints_results.empty
assert constraints_results.shape[0] == 6
assert constraints_results.loc[['ix_events_eid']].shape[0] == 1
assert constraints_results.loc['ix_events_eid', 'column_name'] == 'eid'
assert constraints_results.loc[['ix_events_field_id']].shape[0] == 1
assert constraints_results.loc['ix_events_field_id', 'column_name'] == 'field_id'
assert constraints_results.loc[['ix_events_instance']].shape[0] == 1
assert constraints_results.loc['ix_events_instance', 'column_name'] == 'instance'
assert constraints_results.loc[['ix_events_event']].shape[0] == 1
assert constraints_results.loc['ix_events_event', 'column_name'] == 'event'
assert constraints_results.loc[['ix_events_field_id_event']].shape[0] == 2
assert 'field_id' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
assert 'event' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
def test_postgresql_phenotypes_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=15, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('ukb_pheno_0_00', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
constraint_sql = self._get_table_contrains('ukb_pheno_0_01', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
def test_postgresql_vacuum(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data(vacuum=True)
# Validate
vacuum_data = pd.DataFrame()
query_count = 0
# FIXME waits for vacuum to finish
while vacuum_data.empty and query_count < 150:
vacuum_data = pd.read_sql("""
select relname, last_vacuum, last_analyze
from pg_stat_user_tables
where schemaname = 'public' and last_vacuum is not null and last_analyze is not null
""", db_engine)
query_count += 1
assert vacuum_data is not None
assert not vacuum_data.empty
def test_postgresql_load_data_non_utf_characters(self):
# Prepare
directory = get_repository_path('pheno2sql/example15')
csv_file1 = get_repository_path(os.path.join(directory, 'example15_00.csv')) # latin1
csv_file2 = get_repository_path(os.path.join(directory, 'example15_01.csv')) # latin1
csv_file3 = get_repository_path(os.path.join(directory, 'example15_02.csv')) # utf-8
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2, csv_file3), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c21_1_0', 'c21_0_0', 'c103_0_0', 'c104_0_0', 'c221_0_0', 'c221_1_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 10
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.loc[1000041, 'c103_0_0'] == 'Optión 4'
assert query_result.loc[1000041, 'c104_0_0'] == '158'
assert query_result.loc[1000070, 'c21_1_0'] == 'Of course ñ'
assert query_result.loc[1000070, 'c21_0_0'] == 'Option number 7'
assert query_result.loc[1000050, 'c221_0_0'] == 'Option number 25'
assert query_result.loc[1000050, 'c221_1_0'] == 'Maybe ñó'
def test_postgresql_load_data_with_duplicated_data_field(self):
# Prepare
directory = get_repository_path('pheno2sql/example16')
csv_file1 = get_repository_path(os.path.join(directory, 'example1600.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example1601.csv'))
db_engine = POSTGRESQL_ENGINE
# intentionally, load first "latest" dataset (since 1601 > 1600)
p2sql = Pheno2SQL((csv_file2, csv_file1), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c103_0_0', 'c47_0_0', 'c50_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 7 + 3, len(query_result.index)
assert not query_result.empty
assert query_result.shape[0] == 7 + 3, query_result.shape[0]
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
# this individuals should not have data for data-field 50, since we overwrote the old dataset (1600)
assert pd.isnull(query_result.loc[1000021, 'c50_0_0'])
assert pd.isnull(query_result.loc[1000041, 'c50_0_0'])
assert | pd.isnull(query_result.loc[1000061, 'c50_0_0']) | pandas.isnull |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
#
import sys
import os
import argparse
import pandas as pd
from decimal import Decimal
from collections import OrderedDict
import scipy.stats as st
import numpy as np
def main():
# Parse args
args = parse_args()
# Load top loci
top_loci = pd.read_json(args.inf, orient='records', lines=True)
# Only keep type == gwas
top_loci = top_loci.loc[top_loci['type'] == 'gwas', :]
# Load study information, required for case-control information GCST000964
study = pd.read_json(args.study_info, orient='records', lines=True)
study['case_prop'] = study['n_cases'] / study['n_initial']
study = study.loc[:, ['study_id', 'case_prop']]
study = study.drop_duplicates()
# Fix very low p-values
top_loci.loc[top_loci['pval'] == 0, 'pval'] = (1 / sys.float_info.max)
# Filter p-values
top_loci = top_loci.loc[top_loci['pval'] <= 5e-8, :]
# Get P mantissa and exponent
top_loci.loc[:, 'p_mantissa'] = top_loci['pval'].apply(fman)
top_loci.loc[:, 'p_exponent'] = top_loci['pval'].apply(fexp)
# Extract effect size information
top_loci = | pd.merge(top_loci, study, on='study_id', how='left') | pandas.merge |
# -*- coding: utf-8 -*-
# @Time : 09.04.21 09:54
# @Author : sing_sd
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import src.common_functions as cf
import csv
import ais
from datetime import datetime, timedelta, timezone
import re
vb_dir = os.path.dirname(__file__)
data_dir = os.path.join(vb_dir, "resources/")
headers = ['x', 'y', 'cog', 'sog', 'time', 'mmsi', "nav_status", "ship_type", "destination"]
plt.rcParams.update({'font.size': 12})
def main():
# data = generate_short_data(data_len=100000)
# filename = 'ais_data_rostock_2020.csv' # 'ais_data_rostock_2016.csv'
# generate_processed_data(filename)
filename = "ais_data_rostock_2020_processed.csv" # "ais_data_rostock_2016_processed.csv"
plot_data(filename)
# generate_rostock_gedsar_dataset(filename)
# decode_data()
def plot_data(filename):
mpl.rcParams['agg.path.chunksize'] = 10000
fig, axs = plt.subplots(1, 1)
fig.set_size_inches([8, 6])
plt.pause(0.0001)
with open(data_dir + filename, "r") as f:
print("start")
data = | pd.read_csv(f) | pandas.read_csv |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates[-1] + | pd.DateOffset(hours=25) | pandas.DateOffset |
import gc
from pathlib import Path
from tqdm import tqdm
import skvideo
import skvideo.io
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from moviepy.editor import *
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision import transforms
__all__ = ['CreateOptim', 'save_checkpoint',
'preprocess_df', 'preprocess_df_audio']
def plot_losses(train=[], test=[], path=''):
""" Plotting function for training/val losses
Parameters
----------
train : list
Training losses over training
test : list
Test losses over training
path : str
Path for output plot
"""
epochs = [x for x in range(len(train))]
fig = plt.figure(figsize=(5, 5))
sns.lineplot(x=epochs, y=train, label='Train')
sns.lineplot(x=epochs, y=test, label='Test')
plt.legend(loc='upper right')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig('{}/Loss.jpeg'.format(path))
def CreateOptim(parameters, lr=0.001, betas=(0.5, 0.999), weight_decay=0,
factor=0.2, patience=5, threshold=1e-03, eps=1e-08):
""" Creates optimizer and associated learning rate scheduler for a model
Paramaters
----------
parameters : torch parameters
Pytorch network parameters for associated optimzer and scheduler
lr : float
Learning rate for optimizer
betas : 2-tuple(floats)
Betas for optimizer
weight_decay : float
Weight decay for optimizer regularization
factor : float
Factor by which to reduce learning rate on Plateau
patience : int
Patience for learning rate scheduler
Returns
-------
optimizer : torch.optim
optimizer for model
scheduler : ReduceLROnPlateau
scheduler for optimizer
"""
optimizer = optim.Adam(parameters, lr=lr, betas=(
0.5, 0.999), weight_decay=weight_decay)
scheduler = ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, patience=patience,
threshold=threshold, eps=eps, verbose=True)
return optimizer, scheduler
def save_checkpoint(model, description, filename='checkpoint.pth.tar'):
""" Saves input state dict to file
Parameters
----------
state : dict
State dict to save. Can include parameters from model, optimizer, etc.
as well as any other elements.
is_best : bool
If true will save current state dict to a second location
filename : str
File name for save
Returns
-------
"""
state = {
'architecture': str(model),
'description': description,
'model': model.state_dict()
}
torch.save(state, filename)
def preprocess_df(df=None, mtcnn=None, path=None, outpath=None,
target_n_frames=60, frame_rate=10, mini_batch=15,
n_seconds=5, start_at_end=True, debug=False):
""" Preprocessing script for deep fake challenge. Subsamples, videos,
isolates faces and saves frames.
Parameters
----------
df : pd.DataFrame
Dataframe with video metadata
mtcnn : torch.Module
Facial detection module from facenet-python (https://github.com/timesler/facenet-pytorch)
path : str
Path to directory with DFDC data
outpath : str
Destination for preprocessed frames
target_n_frames : int
Target number of frames to extract
frame_rate : int
Number of frames per second to process
mini_batch : str
Mini batch size for preprocessing steps (protects against memory overflow)
n_seconds : int
Numbr of seconds to load video (speed up optimization)
debug : bool
Debug switch to test memory leak
Returns
-------
faces_dataframe : pd.DataFrame
Dataframe of preprocessed data
"""
def split(my_list, n):
""" Splits list into lists of length n
Paramaters
----------
my_list : list
List to subdivide
n : int
Max length of desired sub-lists
Returns
final : list
List of sub-lists of length n
"""
final = [my_list[i * n:(i + 1) * n]
for i in range((len(my_list) + n - 1) // n)]
return final
def process_min_batch_reverse(batch=None, start_index=0):
""" Pre-process and save a mini_batch of frames
Parameters
----------
batch : list(torch.tensor)
List with frames to preprocess
start_index : int
Number fo previously saved frames in the video
Returns
-------
end_index : int
Number of saved frames at end of this mini-batch
"""
with torch.no_grad():
faces, probs = mtcnn(batch, return_prob=True)
saved_frames = 0
faces_probs = []
for ii, face in enumerate(faces):
if face is None or probs[ii] < 0.95:
continue
if start_index-saved_frames < 0:
break
faces_probs.append(probs[ii])
imface = to_pil(face/2 + 0.5)
imface.save('{}/frame_{}.png'.format(dest,
start_index-saved_frames))
del imface
saved_frames += 1
del faces
return saved_frames, faces_probs
def process_min_batch(batch=None, start_index=0):
""" Pre-process and save a mini_batch of frames
Parameters
----------
batch : list(torch.tensor)
List with frames to preprocess
start_index : int
Number fo previously saved frames in the video
Returns
-------
end_index : int
Number of saved frames at end of this mini-batch
"""
with torch.no_grad():
faces, probs = mtcnn(batch, return_prob=True)
saved_frames = 0
faces_probs = []
for ii, face in enumerate(faces):
if face is None or probs[ii] < 0.95:
continue
faces_probs.append(probs[ii])
imface = to_pil(face/2 + 0.5)
imface.save('{}/frame_{}.png'.format(dest,
saved_frames+start_index))
del imface
saved_frames += 1
del faces
return saved_frames, faces_probs
frame_skip = 30//frame_rate
to_pil = transforms.ToPILImage()
pbar = tqdm(total=len(df))
faces_dataframe = []
for idx in range(len(df)):
pbar.update(1)
entry = df.iloc[idx]
this_entry = {'split': entry['split'], 'File': entry['File'],
'label': entry['label'], 'frames': 0,
'probabilites': []}
try:
filename = '{}/{}/{}'.format(path, entry['split'], entry['File'])
dest = '{}/{}/{}/'.format(outpath, entry['split'], entry['File'])
Path(dest).mkdir(parents=True, exist_ok=True)
try:
videodata = skvideo.io.vread(filename, (n_seconds)*30)
except RuntimeError:
videodata = skvideo.io.vread(filename)
except:
continue
if start_at_end:
frames = [to_pil(x) for x in videodata[::-frame_skip]]
frames_batches = split(frames, mini_batch)
else:
frames = [to_pil(x) for x in videodata[0::frame_skip]]
frames_batches = split(frames, mini_batch)
probabilities = []
if start_at_end:
n_frames = target_n_frames
for batch in frames_batches:
if n_frames < 0:
break
t_frames, t_probs = process_min_batch_reverse(
batch, n_frames)
n_frames -= t_frames
probabilities += t_probs
n_frames = target_n_frames
else:
n_frames = 0
for batch in frames_batches:
if n_frames >= target_n_frames:
break
t_frames, t_probs = process_min_batch(batch, n_frames)
n_frames += t_frames
probabilities += t_probs
this_entry['frames'] = n_frames
this_entry['probabilities'] = probabilities
del frames, videodata
except:
pass
faces_dataframe.append(this_entry)
del entry, this_entry
#
if debug:
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
except:
pass
pbar.close()
return pd.DataFrame(faces_dataframe)
def preprocess_df_audio(df=None, path=None, outpath=None, fps=16000):
""" Preprocessing to extact audio from videos
Parameters
----------
df : pd.DataFrame
Metadata for files to pre-process
path : str
Path to source data
outpath : str
Path for output prpeorcessed data
fps :
Framerate for data loading/saving
Returns
audio_dataframe : pd.Dataframe
Extracted audio meta data
"""
audio_dataframe = []
pbar = tqdm(total=len(df))
for idx in range(len(df)):
pbar.update(1)
entry = df.iloc[idx]
this_entry = {'split': entry['split'], 'File': entry['File'],
'label': entry['label'], 'wlen': 0}
try:
filename = '{}/{}/{}'.format(path, entry['split'], entry['File'])
dest = '{}/{}'.format(outpath, entry['split'])
Path('{}'.format(dest)).mkdir(parents=True, exist_ok=True)
audio = AudioFileClip(filename, fps=16000)
audio_array = audio.to_soundarray()
this_entry['wlen'] = audio_array.shape[0]
np.save(file='{}/{}'.format(dest, entry['File']), arr=audio_array)
except:
pass
audio_dataframe.append(this_entry)
return | pd.DataFrame(audio_dataframe) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
this script enables to transform SNP data in vcf files to fasta format.
it is thought to be used for the Mytilus dataset.
"""
__author__ = '<NAME>'
__mail__ = '<EMAIL>'
#import os
import pandas as pd
import argparse
### use a single vcf field and choose a base due to the given probabilities of the set.
def random_base_set(field,
bases):
import random
probability = [float(i) for i in field.split(':')[3].split(',')]
choice = random.random()
if './.' in field.split(':')[0]: # sort out uncovered alleles as N
print('it happened...')
haplobase = 'N'
else:
if choice <= probability[0]: # choose reference base by random choice
haplobase = bases[0]
elif choice <= sum(probability[0:2]): # choose for heterozygotes
new_choice = random.random()
if new_choice <= .5:
haplobase = bases[0]
else:
haplobase = bases[1]
else: # choose alternative base if random choice
haplobase = bases[1]
return haplobase
### especially for the .
def make_header(atoms):
pops = ['M.eduEuNorth' if 'M-edu-Europe-N' in i else i for i in atoms]
pops = ['M.eduEuSouth' if 'M-edu-Europe-S' in i else i for i in pops]
pops = ['M.eduAm' if 'M-edu-America_' in i else i for i in pops]
pops = ['M.galloAtl' if 'M-galloprovincialis-A' in i else i for i in pops]
pops = ['M.galloMed' if 'M-galloprovincialis-M' in i else i for i in pops]
pops = ['M.tross' if 'M-trossulus' in i else i for i in pops]
header = [f'>{pop}|{atom}'for pop, atom in zip(pops, atoms)]
return header
# wordir = '/Users/Thomsn/Desktop/Studium/MEME Programme [current]/Universite de Montpellier [Sem2]/internship_scornavacca_lab/gitlab/phylogenetwork/data/real_data/mytilus'
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", action="store", dest="vcf_file", required=True,
help="Name of the input VCF file")
args = parser.parse_args()
vcf_file = args.vcf_file
print(f'~~~ Conversion of {vcf_file} to fasta format started ~~~')
print(f'~ translation is running in ...~')
with open(vcf_file) as coffee:
soup = coffee.readlines() # soup is the list of lines of the file
sequences = pd.DataFrame([])
for weight, molecules in enumerate(soup): # weight reflects the count of the lines
if weight % 10000 == 0:
print(f'... line {weight} ...')
if weight == 15: # this is the header line of the vcf
atoms = molecules.strip().split('\t')[9:]
header = make_header(atoms)
if weight >= 16: # these are all the lines with snp data
bases = molecules.strip().split('\t')[3:5]
fields = molecules.strip().split('\t')[9:] # these fields reflect compacted information of one snp of one ind
basealign = []
for count, field in enumerate(fields):
haplobase = random_base_set(field, bases)
basealign.append(haplobase)
baseframe = | pd.Series(data=basealign) | pandas.Series |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[ | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.