prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2014 Open Data Science Initiative and other authors. See AUTHORS.txt
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import csv
import copy
import numpy as np
import scipy.io
import datetime
import json
import yaml
import re
import tarfile
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
filename="/tmp/sods.log",
filemode="w",
)
from functools import reduce
import pandas as pd
from .config import *
from . import access
from . import util
DATAPATH = os.path.expanduser(os.path.expandvars(config.get("datasets", "dir")))
PYTRENDS_AVAILABLE = True
try:
from pytrends.request import TrendReq
except ImportError:
PYTRENDS_AVAILABLE = False
GPY_AVAILABLE = True
try:
import GPy
except ImportError:
GPY_AVAILABLE = False
NETPBMFILE_AVAILABLE = True
try:
import netpbmfile
except ImportError:
NETPBMFILE_AVAILABLE = False
GEOPANDAS_AVAILABLE = True
try:
import geopandas
except ImportError:
GEOPANDAS_AVAILABLE = False
if sys.version_info >= (3, 0):
from urllib.parse import quote
from urllib.request import urlopen
else:
from urllib2 import quote
from urllib2 import urlopen
# Global variables
default_seed = 10000
def bmi_steps(data_set="bmi_steps"):
if not access.data_available(data_set):
access.download_data(data_set)
data = pd.read_csv(os.path.join(access.DATAPATH, data_set, "steps-bmi-data.csv"))
X = np.hstack(
(data["steps"].values[:, np.newaxis], data["bmi"].values[:, np.newaxis])
)
Y = data["gender"].values[:, None]
return access.data_details_return(
{"X": X, "Y": Y, "covariates": ["steps", "bmi"], "response": ["gender"]},
data_set,
)
# The data sets
def boston_housing(data_set="boston_housing"):
if not access.data_available(data_set):
access.download_data(data_set)
all_data = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "housing.data"))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return access.data_details_return({"X": X, "Y": Y}, data_set)
def boxjenkins_airline(data_set="boxjenkins_airline", num_train=96):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
access.download_data(data_set)
data = np.loadtxt(
os.path.join(access.DATAPATH, data_set, "boxjenkins_airline.csv"), delimiter=","
)
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"covariates": [util.decimalyear("year")],
"response": ["AirPassengers"],
"info": "Monthly airline passenger data from Box & Jenkins 1976.",
},
data_set,
)
def brendan_faces(data_set="brendan_faces"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "frey_rawface.mat"))
Y = mat_data["ff"].T
return access.data_details_return({"Y": Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set="della_gatta", gene_number=None):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "DellaGattadata.mat"))
X = np.double(mat_data["timepoints"])
if gene_number == None:
Y = mat_data["exprs_tp53_RMA"]
else:
Y = mat_data["exprs_tp53_RMA"][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def epomeo_gpx(data_set="epomeo_gpx", sample_every=4):
"""Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
try:
import gpxpy
import gpxpy.gpx
except ImportError:
print("Need to install gpxpy to process the empomeo_gpx dataset.")
return
if not access.data_available(data_set):
access.download_data(data_set)
files = [
"endomondo_1",
"endomondo_2",
"garmin_watch_via_endomondo",
"viewranger_phone",
"viewranger_tablet",
]
X = []
for file in files:
gpx_file = open(os.path.join(access.DATAPATH, "epomeo_gpx", file + ".gpx"), "r")
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [
point
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
data = [
[
(
point.time
- datetime.datetime(2013, 8, 21, tzinfo=datetime.timezone.utc)
).total_seconds(),
point.latitude,
point.longitude,
point.elevation,
]
for point in points
]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
X = pd.DataFrame(
X[0], columns=["seconds", "latitude", "longitude", "elevation"]
)
X.set_index(keys="seconds", inplace=True)
return access.data_details_return(
{
"X": X,
"info": "Data is an array containing time in seconds, latitude, longitude and elevation in that order.",
},
data_set,
)
if GEOPANDAS_AVAILABLE:
def nigerian_administrative_zones(
data_set="nigerian_administrative_zones", refresh_data=False
):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
from zipfile import ZipFile
with ZipFile(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb.zip"), "r"
) as zip_ref:
zip_ref.extractall(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb")
)
states_file = "nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/"
from geopandas import read_file
Y = read_file(os.path.join(access.DATAPATH, data_set, states_file), layer=1)
Y.crs = "EPSG:4326"
Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def nigerian_covid(data_set="nigerian_covid", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "line-list-nigeria.csv")
Y = pd.read_csv(
filename,
parse_dates=[
"date",
"date_confirmation",
"date_onset_symptoms",
"date_admission_hospital",
"death_date",
],
)
return access.data_details_return({"Y": Y}, data_set)
def nigeria_nmis(data_set="nigeria_nmis", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "healthmopupandbaselinenmisfacility.csv")
Y = pd.read_csv(filename)
return access.data_details_return({"Y": Y}, data_set)
def nigerian_population(data_set="nigerian_population", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "nga_admpop_adm1_2020.csv")
Y = pd.read_csv(filename)
Y.dropna(axis=1, how='all', inplace=True)
Y.dropna(axis=0, how='any', inplace=True)
Y.rename(columns = {"ADM0_NAME":"admin0Name_en",
"ADM0_PCODE" : "admin0Pcode",
"ADM1_NAME" : "admin1Name_en",
"ADM1_PCODE" : "admin1Pcode",
"T_TL" :"population"},
inplace=True)
Y["admin0Name_en"] = Y["admin0Name_en"].str.capitalize()
Y["admin1Name_en"] = Y["admin1Name_en"].str.capitalize()
Y = Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def pmlr(volumes="all", data_set="pmlr", refresh_data=False):
"""Abstracts from the Proceedings of Machine Learning Research"""
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
proceedings = access.pmlr_proceedings_list(data_set)
# Create a new resources entry for downloading contents of proceedings.
data_name_full = "pmlr"
access.data_resources[data_set]["dirs"] = [['.']]
for entry in proceedings:
if volumes == "all" or entry["volume"] in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
access.data_resources[data_name_full]["files"].append([file])
access.data_resources[data_name_full]["dirs"].append([dirname])
access.data_resources[data_name_full]["urls"].append(urln)
Y = []
# Download the volume data
if not access.data_available(data_name_full):
access.download_data(data_name_full)
for entry in reversed(proceedings):
volume = entry["volume"]
# data_name_full = data_name_full_stub + "v" + str(volume)
if volumes == "all" or volume in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
volume_file = open(
os.path.join(access.DATAPATH, data_name_full, dirname, file), "r"
)
Y += yaml.load(volume_file, Loader=yaml.FullLoader)
Y = pd.DataFrame(Y)
Y["published"] = pd.to_datetime(Y["published"])
# Y.columns.values[4] = util.json_object('authors')
# Y.columns.values[7] = util.json_object('editors')
try:
Y["issued"] = Y["issued"].apply(
lambda x: np.datetime64(datetime.datetime(*x["date-parts"]))
)
except TypeError:
raise TypeError("Type error for entry\n" + Y["issued"]) from e
def full_name(person):
order = ["given", "prefix", "family", "suffix"]
names = [str(person[key]) for key in order if key in person and person[key] is not None]
return " ".join(names)
Y["author"] = Y["author"].apply(
lambda x: ', '.join([full_name(author) for author in x])
)
Y["editor"] = Y["editor"].apply(
lambda x: ', '.join([full_name(editor) for editor in x])
)
columns = list(Y.columns)
columns[14] = util.datetime64_("published")
columns[11] = util.datetime64_("issued")
Y.columns = columns
return access.data_details_return(
{
"Y": Y,
"info": "Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.",
},
data_set,
)
def football_data(season="1617", data_set="football_data"):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
league_dict = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "EC": 4}
def league2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
return league_dict[string]
def football2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
if string in access.football_dict:
return access.football_dict[string]
else:
access.football_dict[string] = len(access.football_dict) + 1
return len(access.football_dict) + 1
def datestr2num(s):
return util.date2num(datetime.datetime.strptime(s.decode("utf-8"), "%d/%m/%y"))
data_set_season = data_set + "_" + season
access.data_resources[data_set_season] = copy.deepcopy(access.data_resources[data_set])
access.data_resources[data_set_season]["urls"][0] += season + "/"
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ["E0.csv", "E1.csv", "E2.csv", "E3.csv"]
if start_year > 4 and start_year < 93:
files += ["EC.csv"]
access.data_resources[data_set_season]["files"] = [files]
if not access.data_available(data_set_season):
access.download_data(data_set_season)
start = True
for file in reversed(files):
filename = os.path.join(access.DATAPATH, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(access.DATAPATH, data_set_season, "temp.csv")
input = open(filename, encoding="ISO-8859-1")
output = open(writename, "w")
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(
writename,
skiprows=1,
usecols=(0, 1, 2, 3, 4, 5),
converters={
0: league2num,
1: datestr2num,
2: football2num,
3: football2num,
},
delimiter=",",
)
if start:
X = table[:, :4]
Y = table[:, 4:]
start = False
else:
X = np.append(X, table[:, :4], axis=0)
Y = np.append(Y, table[:, 4:], axis=0)
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [
util.discrete(league_dict, "league"),
util.datenum("match_day"),
util.discrete(access.football_dict, "home team"),
util.discrete(access.football_dict, "away team"),
],
"response": [util.integer("home score"), util.integer("away score")],
},
data_set,
)
def sod1_mouse(data_set="sod1_mouse"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "sod1_C57_129_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0)
num_repeats = 4
num_time = 4
num_cond = 4
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast(data_set="spellman_yeast"):
"""This is the classic Spellman et al 1998 Yeast Cell Cycle gene expression data that is widely used as a benchmark."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast_cdc15(data_set="spellman_yeast"):
"""These are the gene expression levels from the CDC-15 experiment of Spellman et al (1998)."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
t = np.asarray(
[
10,
30,
50,
70,
80,
90,
100,
110,
120,
130,
140,
150,
170,
180,
190,
200,
210,
220,
230,
240,
250,
270,
290,
]
)
times = ["cdc15_" + str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return access.data_details_return(
{
"Y": Y,
"t": t,
"info": "Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).",
},
data_set,
)
def lee_yeast_ChIP(data_set="lee_yeast_ChIP"):
"""Yeast ChIP data from Lee et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "binding_by_gene.tsv")
S = pd.read_csv(filename, header=1, index_col=0, sep="\t")
transcription_factors = [col for col in S.columns if col[:7] != "Unnamed"]
annotations = S[["Unnamed: 1", "Unnamed: 2", "Unnamed: 3"]]
S = S[transcription_factors]
return access.data_details_return(
{
"annotations": annotations,
"Y": S,
"transcription_factors": transcription_factors,
},
data_set,
)
def fruitfly_tomancak(data_set="fruitfly_tomancak", gene_number=None):
"""Fruitfly gene expression data from Tomancak et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "tomancak_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time - 1, num_time)
xr = np.linspace(0, num_repeats - 1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def drosophila_protein(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
Y = pd.read_csv(filename, header=0)
return access.data_details_return({"Y": Y}, data_set)
def drosophila_knirps(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = pd.read_csv(filename, header=0)
t = df["t"].to_numpy()[:, np.newaxis]
x = df["x"].to_numpy()[:, np.newaxis]
g = df["expression1"].to_numpy()[:, np.newaxis]
p = df["expression2"].to_numpy()[:, np.newaxis]
leng = x.shape[0]
T = np.vstack([t, t])
S = np.vstack([x, x])
inx = np.zeros(leng * 2)[:, None]
inx[leng * 2 // 2 : leng * 2] = 1
X = np.hstack([T, S, inx])
Y = np.vstack([g, p])
return access.data_details_return({"Y": Y, "X": X}, data_set)
if PYTRENDS_AVAILABLE:
def google_trends(
query_terms=["big data", "machine learning", "data science"],
data_set="google_trends",
refresh_data=False,
):
"""
Data downloaded from Google trends for given query terms. Warning,
if you use this function multiple times in a row you get blocked
due to terms of service violations.
The function will cache the result of any query in an attempt to
avoid this. If you wish to refresh an old query set refresh_data
to True. The original function is inspired by this notebook:
http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb
But the update makes use of `pytrends`
"""
query_terms.sort()
from pytrends.request import TrendReq
pytrends = TrendReq(hl="en-US", tz=360)
# Create directory name for data
dir_path = os.path.join(access.DATAPATH, "google_trends")
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = "-".join(query_terms)
dir_name = dir_name.replace(" ", "_")
dir_path = os.path.join(dir_path, dir_name)
file = "data.csv"
file_name = os.path.join(dir_path, file)
if not os.path.exists(file_name) or refresh_data:
print(
"Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks."
)
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(quote(term))
print("Query terms: ", ", ".join(query_terms))
print("Fetching query:")
pytrends = TrendReq(hl="en-US", tz=0)
pytrends.build_payload(query_terms, cat=0, timeframe="all", geo="", gprop="")
df = pytrends.interest_over_time()
print("Done.")
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df["Date"] = df.index
df = df.set_index(np.array(range(len(df.index))))
df = df.rename({"date": "Date"})
df.to_csv(file_name)
loaddf = False
else:
print(
"Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function."
)
print("Query terms: ", ", ".join(query_terms))
df = pd.read_csv(file_name, parse_dates=[0])
loaddf = True
columns = df.columns
terms = len(query_terms)
if loaddf:
X = np.asarray(
[
(
util.date2num(
datetime.datetime.strptime(df.iloc[row]["Date"], "%Y-%m-%d")
),
i,
)
for i in range(terms)
for row in df.index
]
)
else:
X = np.asarray(
[
(util.date2num(df.iloc[row]["Date"]), i)
for i in range(terms)
for row in df.index
]
)
Y = np.asarray(
[[df.iloc[row][query_terms[i]]] for i in range(terms) for row in df.index]
)
output_info = columns[1:]
cats = {}
for i in range(terms):
cats[query_terms[i]] = i
return access.data_details_return(
{
"data frame": df,
"X": X,
"Y": Y,
"query_terms": query_terms,
"info": "Data downloaded from google trends with query terms: "
+ ", ".join(query_terms)
+ ".",
"covariates": [util.datenum("date"), util.discrete(cats, "query_terms")],
"response": ["normalized interest"],
},
data_set,
)
def oil(data_set="three_phase_oil_flow"):
"""The three phase oil data from Bishop and James (1993)."""
if not access.data_available(data_set):
access.download_data(data_set)
oil_train_file = os.path.join(access.DATAPATH, data_set, "DataTrn.txt")
oil_trainlbls_file = os.path.join(access.DATAPATH, data_set, "DataTrnLbls.txt")
oil_test_file = os.path.join(access.DATAPATH, data_set, "DataTst.txt")
oil_testlbls_file = os.path.join(access.DATAPATH, data_set, "DataTstLbls.txt")
oil_valid_file = os.path.join(access.DATAPATH, data_set, "DataVdn.txt")
oil_validlbls_file = os.path.join(access.DATAPATH, data_set, "DataVdnLbls.txt")
fid = open(oil_train_file)
X = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"Xtest": Xtest,
"Xvalid": Xvalid,
"Yvalid": Yvalid,
},
data_set,
)
# else:
# throw an error
def leukemia(data_set="leukemia"):
if not access.data_available(data_set):
access.download_data(data_set)
all_data = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "leuk.dat"))
X = all_data[1:, 1:]
censoring = all_data[1:, 1]
Y = all_data[1:, 0]
return access.data_details_return({"X": X, "censoring": censoring, "Y": Y}, data_set)
def oil_100(seed=default_seed, data_set="three_phase_oil_flow"):
np.random.seed(seed=seed)
data = oil()
indices = util.permute(1000)
indices = indices[0:100]
X = data["X"][indices, :]
Y = data["Y"][indices, :]
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was "
+ str(seed),
},
data_set,
)
def pumadyn(seed=default_seed, data_set="pumadyn-32nm"):
"""Data from a simulation of the Puma robotic arm generated by <NAME>."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
tar = tarfile.open(os.path.join(dir_path, "pumadyn-32nm.tar.gz"))
print("Extracting file.")
tar.extractall(path=dir_path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(
os.path.join(access.DATAPATH, data_set, "pumadyn-32nm", "Dataset.data.gz")
)
indices = util.permute(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return access.data_details_return(
{"X": X, "Y": Y, "Xtest": Xtest, "Ytest": Ytest, "seed": seed}, data_set
)
def robot_wireless(data_set="robot_wireless"):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not access.data_available(data_set):
access.download_data(data_set)
file_name = os.path.join(access.DATAPATH, data_set, "uw-floor.txt")
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype=str)
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 3))
allY[:] = -92.0
strengths = {}
for address, j in zip(addresses, list(range(len(addresses)))):
ind = np.nonzero(address == macaddress)
temp_strengths = strength[ind]
temp_x = x[ind]
temp_y = y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time == temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time == times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = time
allX[i, 1] = temp_x[ind2]
allX[i, 2] = temp_y[ind2]
allY = (allY + 85.0) / 15.0
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"addresses": addresses,
"times": times,
"covariates": [util.timestamp("time", "%H:%M:%S.%f"), "X", "Y"],
"response": addresses,
},
data_set,
)
def silhouette(data_set="ankur_pose_data"):
"""<NAME> and <NAME>'s silhoutte data."""
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "ankurDataPoseSilhouette.mat")
)
inMean = np.mean(mat_data["Y"])
inScales = np.sqrt(np.var(mat_data["Y"]))
X = mat_data["Y"] - inMean
X = X / inScales
Xtest = mat_data["Y_test"] - inMean
Xtest = Xtest / inScales
Y = mat_data["Z"]
Ytest = mat_data["Z_test"]
return access.data_details_return(
{"X": X, "Y": Y, "Xtest": Xtest, "Ytest": Ytest}, data_set
)
def decampos_digits(
data_set="decampos_characters", which_digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
):
"""Digits data set from Teo de Campos"""
if not access.data_available(data_set):
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
digits = np.load(os.path.join(path, "digits.npy"))
digits = digits[which_digits, :, :, :]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape(
(digits.shape[0] * digits.shape[1], digits.shape[2] * digits.shape[3])
)
lbls = np.array([[l] * num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)] * num_samples for l in which_digits])
return access.data_details_return(
{
"Y": Y,
"lbls": lbls,
"str_lbls": str_lbls,
"info": "Digits data set from the de Campos characters data",
},
data_set,
)
def ripley_synth(data_set="ripley_prnn_data"):
"""Synthetic classification data set generated by <NAME> for his Neural Networks book."""
if not access.data_available(data_set):
access.download_data(data_set)
train = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "synth.tr"), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "synth.te"), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return access.data_details_return(
{
"X": X,
"Y": y,
"Xtest": Xtest,
"Ytest": ytest,
"info": "Synthetic data generated by Ripley for a two class classification problem.",
},
data_set,
)
"""def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(access.DATAPATH, data_set)
if access.data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
access.download_data(data_set)
data = np.loadtxt(os.path.join(access.DATAPATH, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return access.data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Global average temperature data with " + str(num_train) + " values used as training points."}, data_set)
"""
def mauna_loa(data_set="mauna_loa", num_train=545, refresh_data=False):
"""CO2 concentrations from the Mauna Loa observatory."""
path = os.path.join(access.DATAPATH, data_set)
if access.data_available(data_set) and not refresh_data:
print(
"Using cached version of the data set, to use latest version set refresh_data to True"
)
else:
access.download_data(data_set)
data = np.loadtxt(os.path.join(access.DATAPATH, data_set, "co2_mm_mlo.txt"))
print(
"Most recent data observation from month ",
data[-1, 1],
" in year ",
data[-1, 0],
)
allX = data[data[:, 3] != -99.99, 2:3]
allY = data[data[:, 3] != -99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"covariates": [util.decimalyear("year", "%Y-%m")],
"response": ["CO2/ppm"],
"info": "Mauna Loa data with "
+ str(num_train)
+ " values used as training points.",
},
data_set,
)
def osu_run1(data_set="osu_run1", sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
zip = zipfile.ZipFile(os.path.join(access.DATAPATH, data_set, "run1TXT.ZIP"), "r")
for name in zip.namelist():
zip.extract(name, path)
from . import mocap
Y, connect = mocap.load_text_data("Aug210106", path)
Y = Y[0:-1:sample_every, :]
return access.data_details_return({"Y": Y, "connect": connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(
os.path.join(os.path.dirname(__file__), "datasets", "swiss_roll.pickle")
) as f:
if sys.version_info >= (3, 0):
import pickle
else:
import cPickle as pickle
data = pickle.load(f)
Na = data["Y"].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data["Y"][perm, :]
t = data["t"][perm]
c = data["colors"][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {"Y": Y, "t": t, "colors": c}
def singlecell(data_set="guo_qpcr_2010"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "guo_qpcr.csv")
Y = pd.read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return access.data_details_return(
{
"Y": Y,
"info": "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
"genes": genes,
"labels": labels,
},
data_set,
)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set="swiss_roll"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "swiss_roll_data.mat")
)
Y = mat_data["X_data"][:, 0:num_samples].transpose()
return access.data_details_return(
{
"Y": Y,
"Full": mat_data["X_data"],
"info": "The first "
+ str(num_samples)
+ " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001).",
},
data_set,
)
def isomap_faces(num_samples=698, data_set="isomap_face_data"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "face_data.mat"))
Y = mat_data["images"][:, 0:num_samples].transpose()
return access.data_details_return(
{
"Y": Y,
"poses": mat_data["poses"],
"lights": mat_data["lights"],
"info": "The first "
+ str(num_samples)
+ " points from the face data of Tennenbaum, de Silva and Langford (2001).",
},
data_set,
)
if GPY_AVAILABLE:
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1.0, lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(
np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1)
)
return {
"X": X,
"Y": y,
"info": "Sampled "
+ str(num_samples)
+ " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.",
}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = util.permute(data["X"].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data["X"][indices, :]
Y = data["Y"][indices, :]
return {
"X": X,
"Y": Y,
"info": "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.",
"seed": seed,
}
def toy_linear_1d_classification(seed=default_seed):
"""Simple classification data in one dimension for illustrating models."""
def sample_class(f):
p = 1.0 / (1.0 + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {
"X": X,
"Y": sample_class(2.0 * X),
"F": 2.0 * X,
"covariates": ["X"],
"response": [util.discrete({"positive": 1, "negative": -1})],
"seed": seed,
}
def airline_delay(
data_set="airline_delay", num_train=700000, num_test=100000, seed=default_seed
):
"""Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "airline_delay.hdf")
# 1. Load the dataset
data = pd.read_hdf(filename)
# WARNING: removing year
data.pop("Year")
# Get data matrices
Yall = data.pop("ArrDelay").values[:, None]
Xall = data.values
# Subset the data (memory!!)
all_data = num_train + num_test
Xall = Xall[:all_data]
Yall = Yall[:all_data]
# Get testing points
np.random.seed(seed=seed)
N_shuffled = util.permute(Yall.shape[0])
train, test = N_shuffled[num_test:], N_shuffled[:num_test]
X, Y = Xall[train], Yall[train]
Xtest, Ytest = Xall[test], Yall[test]
covariates = [
"month",
"day of month",
"day of week",
"departure time",
"arrival time",
"air time",
"distance to travel",
"age of aircraft / years",
]
response = ["delay"]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"seed": seed,
"info": "Airline delay data used for demonstrating Gaussian processes for big data.",
"covariates": covariates,
"response": response,
},
data_set,
)
if NETPBMFILE_AVAILABLE:
def olivetti_faces(data_set="olivetti_faces"):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, "att_faces.zip"), "r")
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(
path, "orl_faces", "s" + str(subject + 1), str(image + 1) + ".pgm"
)
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return access.data_details_return(
{"Y": Y, "lbls": lbls, "info": "ORL Faces processed to 64x64 images."}, data_set
)
def xw_pen(data_set="xw_pen"):
if not access.data_available(data_set):
access.download_data(data_set)
Y = np.loadtxt(os.path.join(access.DATAPATH, data_set, "xw_pen_15.csv"), delimiter=",")
X = np.arange(485)[:, None]
return access.data_details_return(
{
"Y": Y,
"X": X,
"info": "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275.",
},
data_set,
)
def download_rogers_girolami_data(data_set="rogers_girolami_data"):
if not access.data_available("rogers_girolami_data"):
import tarfile
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
tar_file = os.path.join(path, "firstcoursemldata.tar.gz")
tar = tarfile.open(tar_file)
print("Extracting file.")
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male100"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_100m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female100"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_200m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female200"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_200m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male200"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_400m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female400"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_400m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male400"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_marathon_men(data_set="olympic_marathon_men"):
if not access.data_available(data_set):
access.download_data(data_set)
olympics = np.genfromtxt(
os.path.join(access.DATAPATH, data_set, "olympicMarathonTimes.csv"), delimiter=","
)
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
},
data_set,
)
def olympic_sprints(data_set="rogers_girolami_data"):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate(
[
olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women,
]
):
data = dataset()
year = data["X"]
time = data["Y"]
X = np.vstack((X, np.hstack((year, np.ones_like(year) * i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data["X"] = X
data["Y"] = Y
data[
"info"
] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y"), util.discrete(cats, "event")],
"response": ["time"],
"info": "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
"output_info": {
0: "100m Men",
1: "100m Women",
2: "200m Men",
3: "200m Women",
4: "400m Men",
5: "400m Women",
},
},
data_set,
)
def movie_body_count(data_set="movie_body_count"):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "film-death-counts-Python.csv")
Y = pd.read_csv(filename)
Y["Actors"] = Y["Actors"].apply(lambda x: x.split("|"))
Y["Genre"] = Y["Genre"].apply(lambda x: x.split("|"))
Y["Director"] = Y["Director"].apply(lambda x: x.split("|"))
return access.data_details_return(
{
"Y": Y,
"info": "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R.",
},
data_set,
)
def movie_body_count_r_classify(data_set="movie_body_count"):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R."""
data = movie_body_count()["Y"]
X = data[["Year", "Body_Count"]]
Y = data["MPAA_Rating"] == "R" # set label to be positive for R rated films.
# Create series of movie genres with the relevant index
s = data["Genre"].str.split("|").apply(pd.Series, 1).stack()
s.index = s.index.droplevel(-1) # to line up with df's index
# Extract from the series the unique list of genres.
genres = s.unique()
# For each genre extract the indices where it is present and add a column to X
for genre in genres:
index = s[s == genre].index.tolist()
values = pd.Series(np.zeros(X.shape[0]), index=X.index)
values[index] = 1
X[genre] = values
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.",
},
data_set,
)
def movielens100k(data_set="movielens100k"):
"""Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
zip = zipfile.ZipFile(os.path.join(dir_path, "ml-100k.zip"), "r")
for name in zip.namelist():
zip.extract(name, dir_path)
encoding = "latin-1"
movie_path = os.path.join(access.DATAPATH, "movielens100k", "ml-100k")
items = pd.read_csv(
os.path.join(movie_path, "u.item"),
index_col="index",
header=None,
sep="|",
names=[
"index",
"title",
"date",
"empty",
"imdb_url",
"unknown",
"Action",
"Adventure",
"Animation",
"Children" "s",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"Film-Noir",
"Horror",
"Musical",
"Mystery",
"Romance",
"Sci-Fi",
"Thriller",
"War",
"Western",
],
encoding=encoding,
)
users = pd.read_csv(
os.path.join(movie_path, "u.user"),
index_col="index",
header=None,
sep="|",
names=["index", "age", "sex", "job", "id"],
encoding=encoding,
)
parts = [
"u1.base",
"u1.test",
"u2.base",
"u2.test",
"u3.base",
"u3.test",
"u4.base",
"u4.test",
"u5.base",
"u5.test",
"ua.base",
"ua.test",
"ub.base",
"ub.test",
]
ratings = []
for part in parts:
rate_part = pd.read_csv(
os.path.join(movie_path, part),
index_col="index",
header=None,
sep="\t",
names=["user", "item", "rating", "index"],
encoding=encoding,
)
rate_part["split"] = part
ratings.append(rate_part)
Y = pd.concat(ratings)
return access.data_details_return(
{
"Y": Y,
"film_info": items,
"user_info": users,
"info": "The Movielens 100k data",
},
data_set,
)
def nigeria_nmis_facility_database(data_set="nigeria_nmis_facility_database"):
"""A rigorous, geo-referenced baseline facility inventory across Nigeria is created spanning from 2009 to 2011 with an additional survey effort to increase coverage in 2014, to build Nigeria’s first nation-wide inventory of health facility. The database includes 34,139 health facilities info in Nigeria."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "healthmopupandbaselinenmisfacility.csv")
Y = pd.read_csv(filename)
return access.data_details_return(
{
"Y": Y,
"info": "Geo-referenced baseline facility inventory across Nigeria giving Nigeria's first nation-wide inventory of health facilities.",
},
data_set,
)
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.0))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack(
(
np.ones((num_data_part[0] + num_data_part[1], 1)),
-np.ones((num_data_part[2] + num_data_part[3], 1)),
)
)
cats = {"negative": -1, "positive": 1}
return {
"X": X,
"Y": Y,
"info": "Two separate classes of data formed approximately in the shape of two crescents.",
"response": [util.discrete(cats, "class")],
}
def creep_data(data_set="creep_rupture"):
"""Brun and Yoshida's metal creep rupture data."""
if not access.data_available(data_set):
import tarfile
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
tar_file = os.path.join(path, "creeprupt.tar")
tar = tarfile.open(tar_file)
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(access.DATAPATH, data_set, "taka"))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(list(range(2, 31)))
X = all_data[:, features].copy()
cats = {"furnace cooling": 0, "air cooling": 1, "oil cooling": 2, "water quench": 3}
attributes = [
"Lifetime / hours",
"Temperature / Kelvin",
"Carbon / wt%",
"Silicon / wt%",
"Manganese / wt%",
"Phosphorus / wt%",
"Sulphur / wt%",
"Chromium / wt%",
"Molybdenum / wt%",
"Tungsten / wt%",
"Nickel / wt%",
"Copper / wt%",
"Vanadium / wt%",
"Niobium / wt%",
"Nitrogen / wt%",
"Aluminium / wt%",
"Boron / wt%",
"Cobalt / wt%",
"Tantalum / wt%",
"Oxygen / wt%",
"Normalising temperature / Kelvin",
"Normalising time / hours",
util.discrete(cats, "Cooling rate of normalisation"),
"Tempering temperature / Kelvin",
"Tempering time / hours",
util.discrete(cats, "Cooling rate of tempering"),
"Annealing temperature / Kelvin",
"Annealing time / hours",
util.discrete(cats, "Cooling rate of annealing"),
"Rhenium / wt%",
]
return access.data_details_return(
{
"X": X,
"Y": y,
"covariates": attributes,
"response": ["Rupture stress / MPa"],
},
data_set,
)
def ceres(data_set="ceres"):
"""Twenty two observations of the Dwarf planet Ceres as observed by <NAME> and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later."""
if not access.data_available(data_set):
access.download_data(data_set)
data = pd.read_csv(
os.path.join(access.DATAPATH, data_set, "ceresData.txt"),
index_col="Tag",
header=None,
sep="\t",
names=[
"Tag",
"Mittlere Sonnenzeit",
"Gerade Aufstig in Zeit",
"Gerade Aufstiegung in Graden",
"Nordlich Abweich",
"Geocentrische Laenger",
"Geocentrische Breite",
'Ort der Sonne + 20" Aberration',
"Logar. d. Distanz",
],
parse_dates=True,
dayfirst=False,
)
return access.data_details_return({"data": data}, data_set)
def kepler_lightcurves(data_set="kepler_telescope"):
"""Load Kepler light curves from <NAME> & <NAME>'s NeurIPS 2020 Tutorial as shown in this colab https://colab.research.google.com/drive/1TimsiQhhcK6qX_lD951H-WJDHd92my61?usp=sharing"""
datasets = {'2009350155506':
['001720554',
'002696955',
'002987660',
'003246460',
'003429637',
'003441157',
'003836439',
'004040917',
'004044238',
'004150611',
'004155395',
'004242575',
'004567097',
'004660665',
'004671313',
'004857678',
'004931363',
'004989900',
'005108214',
'005113557',
'005164767',
'005177450',
'005458880',
'005683912',
'005724440',
'005737655',
'005802562',
'005939450',
'005952403',
'005954370',
'006065699',
'006101376',
'006106415',
'006150124',
'006225718',
'006342566',
'006352430',
'006382808',
'006450107',
'006469154',
'006670812',
'006675338',
'007201012',
'007286856',
'007345479',
'007366121',
'007510397',
'007669848',
'007798339',
'007820638',
'007827131',
'007909976',
'007939145',
'007940546',
'007940959',
'007944142',
'007950369',
'007970740',
'008006161',
'008077489',
'008085683',
'008153795',
'008313018',
'008324268']}
data = kepler_telescope(datasets)
data["datasets"] = datasets
data["citation"] = "Data from Kepler space mission used by <NAME> and <NAME> for their NeurIPS tutorial https://dwh.gg/NeurIPSastro1"
data["info"] = """The following wget lines were obtained by doing a simple search at this web form: http://archive.stsci.edu/kepler/data_search/search.php
where we put "< 8" into the field "KEP_Mag" and "Quarter" into the field "User-specified field 1" and "3" into the "Field descriptions" box associated with that."""
return access.data_details_return(data, data_set)
def kepler_telescope(datasets, data_set="kepler_telescope"):
"""Load a given kepler_id's datasets."""
scan_dir = os.path.join(access.DATAPATH, data_set)
# Make sure the data is downloaded.
resource = access.kepler_telescope_urls_files(datasets)
access.data_resources[data_set] = access.data_resources["kepler_telescope_base"].copy()
access.data_resources[data_set]["files"] = resource["files"]
access.data_resources[data_set]["urls"] = resource["urls"]
if resource["urls"]:
access.download_data(data_set)
dataset_dir = os.path.join(access.DATAPATH, "kepler_telescope")
filenames = []
for dataset in datasets:
for kepler_id in datasets[dataset]:
filenames.append("kplr" + kepler_id + "-" + dataset + "_llc.fits")
from astropy.table import Table
Y = pd.DataFrame({dataset: {kepler_id: Table.read(os.path.join(dataset_dir, "kplr" + kepler_id + "-" + dataset + "_llc.fits"), format='fits').to_pandas() for kepler_id in datasets[dataset]} for dataset in datasets})
return access.data_details_return(
{
"Y": Y,
},
data_set,
)
def cmu_mocap_49_balance(data_set="cmu_mocap"):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ["18", "19"]
test_motions = ["20"]
data = cmu_mocap(
"49", train_motions, test_motions, sample_every=4, data_set=data_set
)
data["info"] = (
"One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of "
+ data["info"]
)
return data
def cmu_mocap_35_walk_jog(data_set="cmu_mocap"):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"28",
"30",
"31",
"32",
"33",
"34",
]
test_motions = ["18", "29"]
data = cmu_mocap(
"35", train_motions, test_motions, sample_every=4, data_set=data_set
)
data["info"] = (
"Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of "
+ data["info"]
)
return data
def cmu_mocap_high_five(data_set="cmu_mocap"):
"""Load the CMU Motion capture for the high 5 between subjects 20 and 21 in the motion capture data. The data was used by Lawrence and Moore ICML 2007. Later the work was recreated by Damianou and Lawrence at AISTATS 2013."""
data = cmu_mocap("20", ["11"], [], sample_every=4, data_set=data_set)
data2 = cmu_mocap("21", ["11"], [], sample_every=4, data_set=data_set)
data["Y1"] = data.pop("Y")
data["skel1"] = data.pop("skel")
data["Y2"] = data2["Y"]
data["skel2"] = data2["skel"]
data["info"] = (
"High Five motion capture of two subjects walking towards each other and 'high fiving' as used by Lawrence and Moore at ICML. Data taken from subjects 20 and 21. It consists of "
+ data["info"]
+ " and "
+ data2["info"]
)
return data
def cmu_mocap(
subject, train_motions, test_motions=[], sample_every=4, data_set="cmu_mocap"
):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
from . import mocap
subject_dir = os.path.join(access.DATAPATH, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = access.cmu_urls_files(([subject], [all_motions]))
access.data_resources[data_set] = access.data_resources["cmu_mocap_full"].copy()
access.data_resources[data_set]["files"] = resource["files"]
access.data_resources[data_set]["urls"] = resource["urls"]
if resource["urls"]:
access.download_data(data_set)
skel = mocap.acclaim_skeleton(os.path.join(subject_dir, subject + ".asf"))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(
os.path.join(subject_dir, subject + "_" + train_motions[i] + ".amc")
)
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(
os.path.join(subject_dir, subject + "_" + test_motions[i] + ".amc")
)
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = "Subject: " + subject + ". Training motions: "
for motion in train_motions:
info += motion + ", "
info = info[:-2]
if len(test_motions) > 0:
info += ". Test motions: "
for motion in test_motions:
info += motion + ", "
info = info[:-2] + "."
else:
info += "."
if sample_every != 1:
info += " Data is sub-sampled to every " + str(sample_every) + " frames."
return access.data_details_return(
{
"Y": Y,
"lbls": lbls,
"Ytest": Ytest,
"lblstest": lblstest,
"info": info,
"skel": skel,
},
data_set,
)
def mcycle(data_set="mcycle", seed=default_seed):
if not access.data_available(data_set):
access.download_data(data_set)
np.random.seed(seed=seed)
data = pd.read_csv(os.path.join(access.DATAPATH, data_set, "motor.csv"))
data = data.reindex(util.permute(data.shape[0])) # Randomize so test isn't at the end
X = data["times"].values[:, None]
Y = data["accel"].values[:, None]
return access.data_details_return(
{"X": X, "Y": Y, "covariates": ["times"], "response": ["acceleration"]},
data_set,
)
def elevators(data_set="elevators", seed=default_seed):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
tar = tarfile.open(name=os.path.join(dir_path, "elevators.tgz"))
tar.extractall(dir_path)
tar.close()
elevator_path = os.path.join(access.DATAPATH, "elevators", "Elevators")
elevator_train_path = os.path.join(elevator_path, "elevators.data")
elevator_test_path = os.path.join(elevator_path, "elevators.test")
train_data = | pd.read_csv(elevator_train_path, header=None) | pandas.read_csv |
import itertools
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
# from https://tidyr.tidyverse.org/reference/complete.html
df = pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
)
columns = [
["group", "item_id", "item_name"],
["group", ("item_id", "item_name")],
]
expected_output = [
pd.DataFrame(
{
"group": [1, 1, 1, 1, 2, 2, 2, 2],
"item_id": [1, 1, 2, 2, 1, 1, 2, 2],
"item_name": ["a", "b", "a", "b", "a", "b", "a", "b"],
"value1": [1.0, np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 2.0],
"value2": [4.0, np.nan, np.nan, 6.0, np.nan, np.nan, np.nan, 5.0],
}
),
pd.DataFrame(
{
"group": [1, 1, 2, 2],
"item_id": [1, 2, 1, 2],
"item_name": ["a", "b", "a", "b"],
"value1": [1.0, 3.0, np.nan, 2.0],
"value2": [4.0, 6.0, np.nan, 5.0],
}
),
]
complete_parameters = [
(dataframe, columns, output)
for dataframe, (columns, output) in itertools.product(
[df], zip(columns, expected_output)
)
]
@pytest.mark.parametrize("df,columns,output", complete_parameters)
def test_complete(df, columns, output):
"""Test the complete function, with and without groupings."""
assert_frame_equal(df.complete(columns), output)
# from http://imachordata.com/2016/02/05/you-complete-me/
@pytest.fixture
def df1():
return pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
def test_fill_value(df1):
"""Test fill_value argument."""
output1 = pd.DataFrame(
{
"Year": [1999, 1999, 2000, 2000, 2004, 2004],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1, 4.0, 0, 5, 8, 2],
}
)
result = df1.complete(
columns=["Year", "Taxon"], fill_value={"Abundance": 0}
)
assert_frame_equal(result, output1)
def test_fill_value_all_years(df1):
"""
Test the complete function accurately replicates for all the years
from 1999 to 2004.
"""
output1 = pd.DataFrame(
{
"Year": [
1999,
1999,
2000,
2000,
2001,
2001,
2002,
2002,
2003,
2003,
2004,
2004,
],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2],
}
)
result = df1.complete(
columns=[
{"Year": range(df1.Year.min(), df1.Year.max() + 1)},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, output1)
def test_type_columns(df1):
"""Raise error if columns is not a list object."""
with pytest.raises(TypeError):
df1.complete(columns="Year")
def test_empty_columns(df1):
"""Raise error if columns is empty"""
with pytest.raises(ValueError):
df1.complete(columns=[])
def test_fill_value_is_a_dict(df1):
"""Raise error if fill_value is not a dictionary"""
with pytest.raises(TypeError):
df1.complete(columns=["Year", "Taxon"], fill_value=0)
frame = pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
wrong_columns = (
(frame, ["b", "Year"]),
(frame, [{"Yayay": range(7)}]),
(frame, ["Year", ["Abundant", "Taxon"]]),
(frame, ["Year", ("Abundant", "Taxon")]),
)
empty_sub_columns = [
(frame, ["Year", []]),
(frame, ["Year", {}]),
(frame, ["Year", ()]),
(frame, ["Year", set()]),
]
@pytest.mark.parametrize("frame,wrong_columns", wrong_columns)
def test_wrong_columns(frame, wrong_columns):
"""Test that KeyError is raised if wrong column is supplied."""
with pytest.raises(KeyError):
frame.complete(columns=wrong_columns)
@pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns)
def test_empty_subcols(frame, empty_sub_cols):
"""Raise ValueError for an empty container in columns'"""
with pytest.raises(ValueError):
frame.complete(columns=empty_sub_cols)
# https://stackoverflow.com/questions/32874239/
# how-do-i-use-tidyr-to-fill-in-completed-rows-within-each-value-of-a-grouping-var
def test_grouping_first_columns():
"""Test complete function when the first entry in columns is
a grouping."""
df2 = pd.DataFrame(
{
"id": [1, 2, 3],
"choice": [5, 6, 7],
"c": [9.0, np.nan, 11.0],
"d": [
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
],
}
)
output2 = pd.DataFrame(
{
"id": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"c": [9.0, 9.0, 9.0, np.nan, np.nan, np.nan, 11.0, 11.0, 11.0],
"d": [
pd.NaT,
pd.NaT,
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
| pd.Timestamp("2015-09-29 00:00:00") | pandas.Timestamp |
# -*- coding: utf-8 -*-
import pdb,importlib,inspect,time,datetime,json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_earning
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet':'financial.factor_earning','class':'FactorEarning'},]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method,x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
trade_date_pre_year = self.get_trade_date(trade_date, 1)
trade_date_pre_year_2 = self.get_trade_date(trade_date, 2)
trade_date_pre_year_3 = self.get_trade_date(trade_date, 3)
trade_date_pre_year_4 = self.get_trade_date(trade_date, 4)
trade_date_pre_year_5 = self.get_trade_date(trade_date, 5)
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report Data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.LABORGETCASH,
CashFlowReport.FINALCASHBALA,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(column, axis=1)
cash_flow_sets = cash_flow_sets.rename(
columns={'LABORGETCASH': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZTOTINCO,
IncomeReport.BIZINCO,
IncomeReport.PERPROFIT,
IncomeReport.PARENETP,
IncomeReport.NETPROFIT,
], dates=[trade_date])
for column in columns:
if column in list(income_sets.keys()):
income_sets = income_sets.drop(column, axis=1)
income_sets = income_sets.rename(columns={'NETPROFIT': 'net_profit', # 净利润
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'BIZINCO': 'operating_revenue', # 营业收入
'PERPROFIT': 'operating_profit', # 营业利润
'PARENETP': 'np_parent_company_owners', # 归属于母公司所有者的净利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[
IndicatorReport.NETPROFITCUT,
# 扣除非经常损益后的净利润
IndicatorReport.MGTEXPRT
], dates=[trade_date])
for column in columns:
if column in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(column, axis=1)
indicator_sets = indicator_sets.rename(columns={'NETPROFITCUT': 'adjusted_profit', # 扣除非经常损益后的净利润
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH,
], dates=[trade_date])
for column in columns:
if column in list(balance_sets.keys()):
balance_sets = balance_sets.drop(column, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
income_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.NETPROFIT, # 净利润
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_sets_pre_year_1.keys()):
income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1)
income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'NETPROFIT': 'net_profit_pre_year_1', # 净利润
'BIZINCO': 'operating_revenue_pre_year_1',
# 营业收入
})
income_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_sets_pre_year_2.keys()):
income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1)
income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'NETPROFIT': 'net_profit_pre_year_2', # 净利润
'BIZINCO': 'operating_revenue_pre_year_2',
# 营业收入
})
income_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_sets_pre_year_3.keys()):
income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1)
income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'NETPROFIT': 'net_profit_pre_year_3', # 净利润
'BIZINCO': 'operating_revenue_pre_year_3',
# 营业收入
})
income_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_sets_pre_year_4.keys()):
income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1)
income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'NETPROFIT': 'net_profit_pre_year_4', # 净利润
'BIZINCO': 'operating_revenue_pre_year_4',
# 营业收入
})
tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code')
tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code')
tp_earning = | pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code') | pandas.merge |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
def test_shift_32bit_take(self, frame_or_series):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = obj.shift(1, "2H")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.BDay())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.arrays[0]
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="H")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame, frame_or_series):
# TODO(2.0): remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.tshift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.tshift(1)
expected = dtobj.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_equal(unshifted, inferred_ts)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame, frame_or_series):
# GH#11631
dtobj = tm.get_obj(datetime_frame, frame_or_series)
with tm.assert_produces_warning(FutureWarning):
dtobj.tshift()
def test_period_index_frame_shift_with_freq(self, frame_or_series):
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame, frame_or_series):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.shift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.shift(1, freq="infer")
expected = dtobj.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self, frame_or_series):
ps = tm.makePeriodFrame()
ps = tm.get_obj(ps, frame_or_series)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(
self, datetime_frame, frame_or_series
):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with tm.assert_produces_warning(FutureWarning):
result = ser.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]])
tm.assert_series_equal(result, expected)
df = ser.to_frame()
with | tm.assert_produces_warning(FutureWarning) | pandas._testing.assert_produces_warning |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# ## Validation of network modules
#
# This notebook performs a couple of analyses to validate the co-expression modules generated:
# 1. We examine the size of modules
# 2. We examine how co-operonic/co-regulonic genes are clustered into a few modules
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import scipy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scripts import utils, paths
np.random.seed(1)
# +
# User params
# Params to examine module size
clustering_method_list = ["dbscan", "hierarchal", "affinity"]
# Params for regulon/operon coverage
# Clustering method to examine regulon/operon coverage
# This method needs to be one of the ones listed above in `clustering_method_list`
method_toexamine = "affinity"
# Remove modules of this size or greater for analysis looking at coverage of regulon/operons
module_size_threshold = 1000
# Seed to use to randomly sample a matched-sized set of genes
# to compare against regulon/operon composition
sample_seed = 1
# Gene subset
gene_subset = "acc"
# How was data processed
# Choices: {"spell", "raw"}
processed = "raw"
# -
# ## Examine size of modules
#
# This will serve as a quick check that we are using reasonable clustering params in [2_get_network_communities.ipynb](2_get_network_communities.ipynb)
for method_name in clustering_method_list:
print(f"Modules using clustering method: {method_name}")
pao1_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pao1_modules_{method_name}_{gene_subset}_{processed}.tsv",
)
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pa14_modules_{method_name}_{gene_subset}_{processed}.tsv",
)
pao1_membership = pd.read_csv(
pao1_membership_filename, sep="\t", header=0, index_col=0
)
pa14_membership = pd.read_csv(
pa14_membership_filename, sep="\t", header=0, index_col=0
)
# Note: Sort module ids by occurence for plotting later
pao1_membership.sort_values(by="module id", ascending=False, inplace=True)
pa14_membership.sort_values(by="module id", ascending=False, inplace=True)
print(pao1_membership["module id"].value_counts())
print(pa14_membership["module id"].value_counts())
# plotting function
def plot_dist_modules(clustering_method_list, gene_subset):
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(15, 15))
axes = axes.ravel()
for i in range(len(clustering_method_list)):
pao1_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pao1_modules_{clustering_method_list[i]}_{gene_subset}_{processed}.tsv",
)
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pa14_modules_{clustering_method_list[i]}_{gene_subset}_{processed}.tsv",
)
pao1_membership = pd.read_csv(
pao1_membership_filename, sep="\t", header=0, index_col=0
)
pa14_membership = pd.read_csv(
pa14_membership_filename, sep="\t", header=0, index_col=0
)
fig = (
pao1_membership["module id"]
.value_counts()
.sort_values(ascending=False)
.reset_index()["module id"]
.plot(ax=axes[i])
)
fig = (
pa14_membership["module id"]
.value_counts()
.sort_values(ascending=False)
.reset_index()["module id"]
.plot(ax=axes[i])
)
fig.set_title(
f"Histogram of size of modules using {clustering_method_list[i]}",
fontsize=12,
)
handles, labels = fig.get_legend_handles_labels()
fig.legend(handles, ["PAO1", "PA14"], loc="upper right")
# Plot distribution of modules per clustering method
plot_dist_modules(clustering_method_list, gene_subset)
# **Takeaway:**
# Our expectation on size of modules would be 2-50 genes. Most operons have fewer than 10 genes and most regulons have fewer than 100 genes. Some examples that demonstrate the size of co-expression networks can be found in papers using ADAGE signatures to define modules:
# * Figure 5 in [eADAGE paper](https://bmcbioinformatics.biomedcentral.com/track/pdf/10.1186/s12859-017-1905-4.pdf)
# * Figure 7 in [Harty et al. paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6531624/)
# * Figure 2 in [Doing et al. paper](https://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.1008783)
#
# What did we find? Which method follows our expectation?
# * Looks like there is one large modules using DBSCAN clustering
# * There are more even sized modules using hierarchal clustering and affinity propogation so we will probably use one of these 2 methods.
# ## Examine composition of modules
#
# This is a negative control. We expect that genes within the same operon or regulon will cluster together (i.e. be within the same module). To test this we will calculate the probability that a pair of genes will be from the same module, given that they are both from the same regulon or operon. We will calculate this probability for each (module, regulon/operon) combination.
#
# _Some definitions:_
#
# [Operons](https://en.wikipedia.org/wiki/Operon#:~:text=An%20operon%20is%20made%20up,transcription%20of%20the%20structural%20genes.) are a group of genes that share a promoter (DNA sequence that is recognized by RNA polymerase and enables transcription) and an operator (DNA sequence that repressor binds to and blocks RNA polymerase). Therefore these group of genes are transcribed or turned off together (so we would expect a very high correlation amongst these genes)
#
# [Regulons](https://en.wikipedia.org/wiki/Regulon) are a group of genes that are regulated by the same regulatory protein. A regulon can be composed of multiple operons.
# +
# Load PAO1 regulon file
pao1_regulon_filename = paths.PAO1_REGULON
# Load operon files
pa14_operon_filename = paths.PA14_OPERON
pao1_operon_filename = paths.PAO1_OPERON
# Load membership for specific clustering method
pao1_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pao1_modules_{method_toexamine}_{gene_subset}_{processed}.tsv",
)
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR,
f"pa14_modules_{method_toexamine}_{gene_subset}_{processed}.tsv",
)
pao1_membership = pd.read_csv(pao1_membership_filename, sep="\t", header=0, index_col=0)
pa14_membership = | pd.read_csv(pa14_membership_filename, sep="\t", header=0, index_col=0) | pandas.read_csv |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import logging
import numpy as np
import pandas as pd
from sklearn.utils.validation import check_X_y, check_consistent_length, check_array
logger = logging.getLogger(__file__)
_KW_SENSITIVE_FEATURES = "sensitive_features"
_MESSAGE_X_NONE = "Must supply X"
_MESSAGE_Y_NONE = "Must supply y"
_MESSAGE_SENSITIVE_FEATURES_NONE = "Must specify {0} (for now)".format(_KW_SENSITIVE_FEATURES)
_MESSAGE_X_Y_ROWS = "X and y must have same number of rows"
_MESSAGE_X_SENSITIVE_ROWS = "X and the sensitive features must have same number of rows"
_MESSAGE_RATIO_NOT_IN_RANGE = "ratio must lie between (0,1]"
_INPUT_DATA_FORMAT_ERROR_MESSAGE = "The only allowed input data formats for {} are: {}. " \
"Your provided data was of type {}."
_EMPTY_INPUT_ERROR_MESSAGE = "At least one of sensitive_features, labels, or scores are empty."
_LABELS_NOT_0_1_ERROR_MESSAGE = "Supplied y labels are not 0 or 1"
_MORE_THAN_ONE_COLUMN_ERROR_MESSAGE = "{} is a {} with more than one column"
_NOT_ALLOWED_TYPE_ERROR_MESSAGE = "{} is not an ndarray, Series or DataFrame"
_NDARRAY_NOT_TWO_DIMENSIONAL_ERROR_MESSAGE = "{} is an ndarray which is not 2D"
_NOT_ALLOWED_MATRIX_TYPE_ERROR_MESSAGE = "{} is not an ndarray or DataFrame"
_ALLOWED_INPUT_TYPES_X = [np.ndarray, pd.DataFrame]
_ALLOWED_INPUT_TYPES_SENSITIVE_FEATURES = [np.ndarray, pd.DataFrame, pd.Series, list]
_ALLOWED_INPUT_TYPES_Y = [np.ndarray, pd.DataFrame, pd.Series, list]
_SENSITIVE_FEATURE_COMPRESSION_SEPARATOR = ","
def _validate_and_reformat_input(X, y=None, expect_y=True, enforce_binary_labels=False, **kwargs):
"""Validate input data and return the data in an appropriate format.
:param X: The feature matrix
:type X: numpy.ndarray or pandas.DataFrame
:param y: The label vector
:type y: numpy.ndarray, pandas.DataFrame, pandas.Series, or list
:param expect_y: if True y needs to be provided, otherwise ignores the argument; default True
:type expect_y: bool
:param enforce_binary_labels: if True raise exception if there are more than two distinct
values in the `y` data; default False
:type enforce_binary_labels: bool
:return: the validated and reformatted X, y, and sensitive_features; note that certain
estimators rely on metadata encoded in X which may be stripped during the reformatting
process, so mitigation methods should ideally use the input X instead of the returned X
for training estimators and leave potential reformatting of X to the estimator.
:rtype: (pandas.DataFrame, pandas.Series, pandas.Series)
"""
if y is not None:
# calling check_X_y with a 2-dimensional y causes a warning, so ensure it is 1-dimensional
if isinstance(y, np.ndarray) and len(y.shape) == 2 and y.shape[1] == 1:
y = y.reshape(-1)
elif isinstance(y, pd.DataFrame) and y.shape[1] == 1:
y = y.to_numpy().reshape(-1)
X, y = check_X_y(X, y)
y = check_array(y, ensure_2d=False, dtype='numeric')
if enforce_binary_labels and not set(np.unique(y)).issubset(set([0, 1])):
raise ValueError(_LABELS_NOT_0_1_ERROR_MESSAGE)
elif expect_y:
raise ValueError(_MESSAGE_Y_NONE)
else:
X = check_array(X)
sensitive_features = kwargs.get(_KW_SENSITIVE_FEATURES)
if sensitive_features is None:
raise ValueError(_MESSAGE_SENSITIVE_FEATURES_NONE)
check_consistent_length(X, sensitive_features)
sensitive_features = check_array(sensitive_features, ensure_2d=False, dtype=None)
# compress multiple sensitive features into a single column
if len(sensitive_features.shape) > 1 and sensitive_features.shape[1] > 1:
sensitive_features = \
_compress_multiple_sensitive_features_into_single_column(sensitive_features)
# If we don't have a y, then need to fiddle with return type to
# avoid a warning from pandas
if y is not None:
result_y = pd.Series(y)
else:
result_y = | pd.Series(dtype="float64") | pandas.Series |
##################################################
# All functions related to training a deep learning architecture using sensor-based activity data.
##################################################
# Author: <NAME>
# Email: <EMAIL>
# Author: <NAME>
# Email: <EMAIL>
##################################################
import os
import random
import time
from datetime import timedelta
from glob import glob
import numpy as np
import pandas as pd
import torch
from sklearn.utils import class_weight
from torch.utils.data import DataLoader
from dl_har_model.eval import eval_one_epoch, eval_model
from utils import paint, AverageMeter
from dl_har_model.train_utils import compute_center_loss, get_center_delta, mixup_data, MixUpLoss, init_weights, \
init_loss, init_optimizer, init_scheduler, seed_torch
from dl_har_dataloader.datasets import SensorDataset
train_on_gpu = torch.cuda.is_available() # Check for cuda
def split_validate(model, train_args, dataset_args, seeds=None, verbose=False):
"""
Train model for a number of epochs using split validation.
:param model: A pytorch model for training. Must implement forward function and allow backprop.
:param dict train_args: A dict containing args for training. For allowed keys see train_model arguments.
:param dict dataset_args: A dict containing args for SensorDataset class excluding the prefix. For allowed keys see
SensorDataset.__init__ arguments.
:param verbose: A boolean indicating whether to print results.
:param list seeds: A dict containing all random seeds used for training.
:return: training and validation losses, accuracies, f1 weighted and macro across epochs and raw predictions
"""
train_data = SensorDataset(prefix='train', **dataset_args)
val_data = SensorDataset(prefix='val', **dataset_args)
test_data = SensorDataset(prefix='test', **dataset_args)
if seeds is None:
seeds = [1]
if verbose:
print(paint("Running HAR training loop ..."))
start_time = time.time()
if verbose:
print(paint("Applying Split-Validation..."))
results_array = pd.DataFrame(columns=['v_type', 'seed', 'sbj', 't_loss', 't_acc', 't_fm', 't_fw', 'v_loss', 'v_acc',
'v_fm', 'v_fw'])
test_results_array = pd.DataFrame(columns=['v_type', 'seed', 'test_loss', 'test_acc', 'test_fm', 'test_fw'])
preds_array = | pd.DataFrame(columns=['v_type', 'seed', 'sbj', 'val_preds', 'test_preds']) | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
from math import isnan
from numpy import mean, std, power, asarray, log
from scipy.stats.mstats import gmean
from warnings import warn
from types import *
from itertools import repeat
import pandas as pd
log2 = lambda x: log(x)/log(2)
def average_cq(seq, efficiency=1.0):
"""Given a set of Cq values, return the Cq value that represents the
average expression level of the input.
The intent is to average the expression levels of the samples,
since the average of Cq values is not biologically meaningful.
:param iterable seq: A sequence (e.g. list, array, or Series) of Cq values.
:param float efficiency: The fractional efficiency of the PCR reaction; i.e.
1.0 is 100% efficiency, producing 2 copies per amplicon per cycle.
:return: Cq value representing average expression level
:rtype: float
"""
denominator = sum( [pow(2.0*efficiency, -Ci) for Ci in seq] )
return log(len(seq)/denominator)/log(2.0*efficiency)
def validate_sample_frame(sample_frame):
"""Makes sure that `sample_frame` has the columns we expect.
:param DataFrame sample_frame: A sample data frame.
:return: True (or raises an exception)
:raises TypeError: if sample_frame is not a pandas DataFrame
:raises ValueError: if columns are missing or the wrong type
"""
if not isinstance(sample_frame, pd.core.frame.DataFrame):
raise TypeError("Expected a pandas DataFrame, received {}".format(type(sample_frame)))
for col in ['Sample', 'Target', 'Cq']:
if col not in sample_frame:
raise ValueError("Missing column {} in sample frame".format(col))
if sample_frame['Cq'].dtype.kind != 'f':
raise ValueError("Expected Cq column to have float type; has type {} instead".format(str(sample_frame['Cq'].dtype)))
return True
def censor_background(sample_frame, ntc_samples=['NTC'], margin=log2(10)):
"""Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC']
:param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10)
:return: a view of the sample data frame containing only non-background rows
:rtype: DataFrame
"""
ntcs = sample_frame.loc[ sample_frame['Sample'].apply(lambda x: x in ntc_samples), ]
if ntcs.empty:
return sample_frame
g = ntcs.groupby('Target')
min_ntcs = g['Cq'].min()
# if a target has no NTC, min_ntcs.loc[sample] is NaN
# we should retain all values from targets with no NTC
# all comparisons with NaN are false
# so we test for the "wrong" condition and invert the result
censored = sample_frame.loc[ ~(sample_frame['Cq'] > (min_ntcs.loc[sample_frame['Target']] - margin)) ]
return censored
def expression_ddcq(sample_frame, ref_target, ref_sample):
"""Calculates expression of samples in a sample data frame relative to a
single reference gene and reference sample using the ∆∆Cq method.
For best results, the ref_sample should be defined for all targets and the
ref_target should be defined for all samples, or else the series you get
back will have lots of NaNs.
:param DataFrame sample_frame: A sample data frame.
:param string ref_target: A string matching an entry of the Target column;
the target to use as the reference target (e.g. 'Gapdh')
:param string ref_sample: A string matching an entry of the Sample column.
:return: a Series of expression values for each row of the sample data
frame.
:rtype: Series
"""
# It might be more correct to replace asarray calls (to discard indexes)
# with proper joins.
ref_target_df = sample_frame.loc[sample_frame['Target'] == ref_target, ['Sample', 'Cq']]
ref_target_grouped = ref_target_df.groupby('Sample')
ref_target_mean_by_sample = ref_target_grouped['Cq'].aggregate(average_cq)
ref_target_mean_list = ref_target_mean_by_sample.loc[sample_frame['Sample']]
ref_target_delta = asarray(ref_target_mean_list - ref_target_mean_by_sample[ref_sample])
ref_sample_df = sample_frame.loc[sample_frame['Sample'] == ref_sample, ['Target', 'Cq']]
ref_sample_grouped = ref_sample_df.groupby('Target')
ref_sample_mean_by_target = ref_sample_grouped['Cq'].aggregate(average_cq)
ref_sample_delta = asarray(sample_frame['Cq'] - asarray(ref_sample_mean_by_target.loc[sample_frame['Target']]))
rel_exp = pd.Series(
power(2, ref_target_delta - ref_sample_delta),
index = sample_frame.index)
return rel_exp
def expression_nf(sample_frame, nf_n, ref_sample):
"""Calculates expression of samples in a sample data frame relative to
pre-computed normalization factors.
ref_sample should be defined for all targets or the result will contain
many NaNs.
:param DataFrame sample_frame: A sample data frame.
:param Series nf_n: A Series of normalization factors indexed by sample.
You probably got this from `compute_nf`.
:param string ref_sample: The name of the sample to normalize against,
which should match a value in the sample_frame Sample column.
:return: a Series of expression values for each row in the sample data
frame.
:rtype: Series
"""
ref_sample_df = sample_frame.loc[sample_frame['Sample'] == ref_sample, ['Target', 'Cq']]
ref_sample_cq = ref_sample_df.groupby('Target')['Cq'].aggregate(average_cq)
delta = -sample_frame['Cq'] + asarray(ref_sample_cq.loc[sample_frame['Target']])
rel = power(2, delta) / asarray(nf_n.loc[sample_frame['Sample']])
return rel
def collect_expression(sample_frame, ref_targets, ref_sample):
"""Calculates the expression of all rows in the sample_frame relative to
each of the ref_targets. Used in rank_targets.
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A sequence of targets from the Target column of
the sample frame.
:param string ref_sample: The name of the sample to which expression should
be referenced.
:return: a DataFrame of relative expression; rows represent rows of the
sample_frame and columns represent each of the ref_targets.
:rtype: DataFrame
"""
by_gene = {'Sample': sample_frame['Sample'], 'Target': sample_frame['Target']}
for target in ref_targets:
by_gene[target] = expression_ddcq(sample_frame, target, ref_sample)
return pd.DataFrame(by_gene)
def rank_targets(sample_frame, ref_targets, ref_sample):
"""Uses the geNorm algorithm to determine the most stably expressed
genes from amongst ref_targets in your sample.
See Vandesompele et al.'s 2002 Genome Biology paper for information about
the algorithm: http://dx.doi.org/10.1186/gb-2002-3-7-research0034
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A sequence of targets from the Target column
of sample_frame to consider for ranking.
:param string ref_sample: The name of a sample from the Sample
column of sample_frame. It doesn't really matter what it is but it
should exist for every target.
:return: a sorted DataFrame with two columns, 'Target' and 'M' (the
relative stability; lower means more stable).
:rtype: DataFrame
"""
table = collect_expression(sample_frame, ref_targets, ref_sample)
all_samples = sample_frame['Sample'].unique()
t = table.groupby(['Sample', 'Target']).mean()
logt = log2(t)
ref_targets = set(ref_targets)
worst = []
worst_m = []
while len(ref_targets) - len(worst) > 1:
M = []
for test_target in ref_targets:
if test_target in worst: continue
Vs = []
for ref_target in ref_targets:
if ref_target == test_target or ref_target in worst: continue
A = logt.loc[zip(all_samples, repeat(test_target)), ref_target]
Vs.append(A.std())
M.append( (sum(Vs)/(len(ref_targets)-len(worst)-1), test_target) )
worst.append(max(M)[1])
worst_m.append(max(M)[0])
best = ref_targets - set(worst)
worst.reverse()
worst_m.reverse()
worst_m = [worst_m[0]] + worst_m
return pd.DataFrame({'Target': list(best) + worst, 'M': worst_m}, columns=['Target', 'M'])
def calculate_all_nfs(sample_frame, ranked_targets, ref_sample):
"""For a set of n ranked_genes, calculates normalization factors NF_1,
NF_2, ..., NF_n. NF_i represents the normalization factor generated by
considering the first i targets in ranked_targets.
calculate_nf (which returns only NF_n) is probably more
useful for routine analysis.
:param DataFrame sample_frame: A sample data frame.
:param iterable ranked_targets: A list or Series of target names, in order
of descending stability (ascending M).
:param string ref_sample: The name of the sample to normalize against.
:return: a DataFrame with columns 1, 2, ..., n containing normalization
factors NF_1, ..., NF_n for each sample, indexed by sample name.
:rtype: DataFrame
"""
# Returns a DataFrame, where rows represent samples and columns represent a number of reference genes.
grouped = sample_frame.groupby(['Target', 'Sample'])['Cq'].aggregate(average_cq)
samples = sample_frame['Sample'].unique()
nfs = {}
for i in xrange(1, len(ranked_targets)+1):
nfs[i] = gmean([pow(2, -grouped.loc[zip(repeat(ref_gene), samples)] + grouped.loc[ref_gene, ref_sample]) for ref_gene in ranked_targets[:i]])
return pd.DataFrame(nfs, index=samples)
def calculate_nf(sample_frame, ref_targets, ref_sample):
"""Calculates a normalization factor from the geometric mean of the
expression of all ref_targets, normalized to a reference sample.
:param DataFrame sample_frame: A sample data frame.
:param iterable ref_targets: A list or Series of target names.
:param string ref_sample: The name of the sample to normalize against.
:return: a Series indexed by sample name containing normalization factors
for each sample.
"""
grouped = sample_frame.groupby(['Target', 'Sample'])['Cq'].aggregate(average_cq)
samples = sample_frame['Sample'].unique()
nfs = gmean([pow(2, -grouped.loc[zip(repeat(ref_gene), samples)] + grouped.loc[ref_gene, ref_sample]) for ref_gene in ref_targets])
return pd.Series(nfs, index=samples)
def calculate_v(nfs):
"""Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...].
"""
v = []
if (nfs.columns != range(1, nfs.columns[-1]+1)).any():
raise ValueError("Column names invalid in nf_v_frame")
for i in nfs.columns[:-1]:
v.append(std(log2(nfs[i]/nfs[i+1]), ddof=1))
return | pd.Series(v, index=nfs.columns[:-1]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 17:08:39 2019
@author: max
"""
#%%
import os
import sys
import pandas as pd
import seaborn as sns
sys.path.append(os.path.realpath(__file__))
import KnockdownFeatures_class
'''
This module is calling KnockdownFeatures_class.py to create an object holding all knockdowns
of an experiment with their respective features.
It takes a list of paths and knockdown names as an input.
Folder structure must be the following:
Experiment1 |-Knockdown1|-Cell1|-GCAFeatureExtraction|-...|-feature1.csv
|-feature2.csv
|-Cell2|-GCAFeatureExtraction|-...
|-Knockdown2|-...
Experiment2 |-Knockdown2|-...
|-Knockdown3|-...
GCAFeatureExtraction is hard coded, every feature needs to be contained as a .csv
file somewhere in a folder exactly fitting this name. The path to this file
is allowed to have an undefined number of subfolders within GCAFeatureExtraction.
Not each experiment has to have all the knockdowns. If a knockdown is not found
for an experiment it is simply skipped.
The object, when initialized will only contain the information about path and knockdowns
and contains function to extract and organize the given data into one object.
self.extract_all() will use the input to extract the data from the different features
by using the KnockdownFeatures_class.py module on each individual knockdown.
It will create the following data structures:
self.path: A list of the experiment folders to extract data from (first input argument)
self.knockdowns: A list of the knockdowns to extract (second input argument)
self.experiment: A dictionary mapping the experiment+knockdown name to the
respective object
created by the KnockdownFeatures_class.py module
self.features: A list of strings of all the features that could be extracted
grouped_features: A dictionary mapping the feature names to long format
DataFrames containing the following information combined for
all input data:
KD: Name of the respective knockdown (second input argument)
experiment: name of the experiment (first input argument)
item: name of the knockdown+the cell number within that group
meltid: row number of the original csv file, can be for example
the ID of an individual filopodium
timepoint: column number of the original csv file.
usually the timepoint
value: cell value of the original csv file. This is the measured
value for the feature
variable: experiment+item+timepoint. This is the unique ID
for the cell and timepoint.
edit self.exclude to exclude features from being printed to the csv files
Dependencies:
KnockdownFeatures_class.py
'''
#add the paths to the experiment folders
#path=['/Users/max/Desktop/Office/test/data_test/SiRNA_30/segmented/', '/Users/max/Desktop/Office/test/data_test/SiRNA_31/segmented/']
#add the knockdowns you want to load
#Knockdowns=['CTRL', 'DLC1', 'ARHGAP17']
class Experiment_data:
'''
Initialize with the pathname and a list of all knockdowns.
Creates an object holding all objects of an experiment with their given features.
Create a dictionary of Knockdowns and features by launching load_groups()
Create a dataframe of Knockdowns and one feature by launching feature_extraction(feature)
Create a dictionary mapping each feature to such a dataframe with extract_all()
'''
def __init__(self, path, knockdowns):
self.knockdowns=knockdowns
self.path=path
#the upcoming functions will become elements of the class
def info(self):
print(self.knockdowns, self.path, self.features)
def load_groups(self):
'''
loads the objects for each individual feature
'''
self.features=[]
experiment={}
#for each of the specified knockdowns
#create an object from the KnockdownFeatures class at the first path instance
for p in self.path:
print('loading experiment', p)
for i in self.knockdowns:
if os.path.isdir(os.path.join(p, i)):
print('loading group: ', i)
temp=KnockdownFeatures_class.KnockdownFeatures(p, i)
#for the current object call the objects load_all function to load the features
temp.load_all()
#adds the object to a dictionary with the objects experiment identifier and the
#current knockdown as the key
experiment.update({temp.experiment_identifier+'_'+i:temp})
self.features=self.features+temp.features
else:
print('invalid directory parsed')
self.features=pd.Series(self.features).unique()
self.features=list(self.features)
#self.features=next(iter(experiment.values())).features
self.exclude=['meas_branchIntensity_2ndOrder', 'meas_filoIntensityToVeil_Norm', 'meas_filoIntensityEmbedded_Norm']
self.feature_list=[i for i in self.features if i not in self.exclude]
return experiment
def feature_extraction(self, feature):
'''
feature: input for the feature to extract
'''
l=[]
if self.experiment is None:
self.experiment=self.load_groups()
#for each object in the dict
for i in self.experiment:
#print('extracting feature: ', feature, 'for group: ', i)
#creates a list with each element being a dataframe for the same feature
#for a different group
try:
temp=self.experiment[i].all_features[feature]
l.append(temp)
except KeyError:
print('Error: feature {} not found for group {}'.format(feature, i))
#concatonates the list to a dataframe
cross_group_feature = pd.concat(l, axis=0, sort=True)
cross_group_feature=cross_group_feature.reset_index(drop=True)
return cross_group_feature
def extract_all(self):
'''
extract all features for the given experiment by calling feature_extraction
for each feature and creating a dictionary from this.
'''
#calls the load groups function to get all the groups of the experiment
self.experiment=self.load_groups()
self.grouped_features={}
for feature in self.features:
#print('extracting feature: ', feature)
try:
self.grouped_features.update({feature:self.feature_extraction(feature)})
except KeyError:
print('Error: feature {} not found'.format(feature))
#%%
def pca_feature_data(self, value='value'):
'''
creates a wide format dataframe with the feature data for each cell
to use for PCA analysis
'''
temp=[]
#loops through features
for enum, f in enumerate(self.feature_list):
#computes the median value of the current feature for each group and appends the
#resulting dataframe consisting of variable and median value
#to the list
temp.append(self.grouped_features[f].groupby('variable').agg({value:'median'}))
#renames the column of the dataframe to the current feature
temp[enum].rename(columns = {value:'{}'.format(f)}, inplace = True)
#concatonates the list to one data frame adding it as an attribute to the object
self.wide_feature=pd.concat(temp, axis=1, sort=True)
self.wide_feature=self.wide_feature.fillna(0)
def pca_attribute_data(self):
'''
creates a wide format dataframe with the attributes, experiment and knockdown,
for each cell.
to use for PCA analysis
'''
kd={}
exp={}
exp_kd={}
#loops through the features
for f in self.feature_list:
#prints the current feature to show progress
print('collecting attributes of feature {}'.format(f))
#loops through the variables
for enum, i in enumerate(self.grouped_features[f]['variable']):
#if the current variable is not already in the dictionary
if f not in kd:
#updates the dictionary with the variable and the knockdown
kd.update({i:self.grouped_features[f].loc[enum]['KD']})
#updates the dictionary with the variable and the experiment
exp.update({i:self.grouped_features[f].loc[enum]['experiment']})
comb=str(self.grouped_features[f].loc[enum]['experiment']+self.grouped_features[f].loc[enum]['KD'])
exp_kd.update({i:comb})
#KD_feature=str(self.grouped_features[f].loc[enum]['KD'])+f
#makes dataframes from the two dictionaries
temp1= | pd.DataFrame.from_dict(kd, orient='index', columns=['knockdown']) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import os
import time
import mshoot
# Set up logging
logging.basicConfig(filename='mpc_case3.log', filemode='w', level='DEBUG')
# Random seed
np.random.seed(12345)
# Paths
ms_file = os.path.join('examples', 'bs2019', 'measurements.csv')
fmu = os.path.join('examples', 'bs2019', 'case3', 'models', 'r1c1co2pid_dymola_1e-11.fmu')
# Simulation period
t0 = '2018-04-05 00:00:00'
t1 = '2018-04-08 00:00:00'
# Read measurements
ms = pd.read_csv(ms_file)
ms['datetime'] = | pd.to_datetime(ms['datetime']) | pandas.to_datetime |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from dsbox.ml.feature_engineering import project_continuous_on_categorical, CategoricalProjector, TagEncoder
class TestCategorical(unittest.TestCase):
def test_project_continuous_on_categorical_function_should_return_correct_results(self):
# given
df = pd.DataFrame({'item': ['A', 'A', 'B', 'B', 'B'],
'price': [0.0, 1.0, 2.0, 2.0, 1.0]})
# when
serie_transformed = project_continuous_on_categorical(df, 'item', 'price')
# then
serie_expected = pd.Series(data=[0.5, 0.5, 1.66666667, 1.66666667, 1.66666667],
index=df.index,
name='price')
| assert_series_equal(serie_expected, serie_transformed) | pandas.util.testing.assert_series_equal |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index(["A", "B"]).style._translate(True, True)
expected = [
{
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
},
{
"class": "index_name level1",
"type": "th",
"value": "B",
"is_visible": True,
"display_value": "B",
},
{
"class": "blank col0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
},
]
assert result["head"][1] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = DataFrame({0: [1, 2, 3]})
df.style._translate(True, True)
def test_apply_axis(self):
df = DataFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.max()}" for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_series_return(self, axis):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Series return where len(Series) < df.index or df.columns but labels OK
func = lambda s: pd.Series(["color: red;"], index=["Y"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Series return where labels align but different order
func = lambda s: pd.Series(["color: red;", "color: blue;"], index=["Y", "X"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_apply_dataframe_return(self, index, columns):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
df_styles = DataFrame("color: red;", index=idxs, columns=cols)
result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis):
result = (
self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
def test_applymap_subset(self, slice_):
result = (
self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:, pd.IndexSlice["x", "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, ["A", "C"]]], # missing col element
pd.IndexSlice[pd.IndexSlice["a", 1], :],
pd.IndexSlice[pd.IndexSlice[:, 1], :],
pd.IndexSlice[pd.IndexSlice[:, [1, 3]], :], # missing row element
pd.IndexSlice[:, ("x", "A")],
pd.IndexSlice[("a", 1), :],
],
)
def test_applymap_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
isinstance(slice_[-1], tuple)
and isinstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
isinstance(slice_[0], tuple)
and isinstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
# https://github.com/pandas-dev/pandas/issues/25858
# Checks styler.applymap works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
df = DataFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = pd.IndexSlice[:, pd.IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
df.loc[pct_subset]
df.style.applymap(color_negative_red, subset=pct_subset)
def test_empty(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = | Styler(df, na_rep="NA") | pandas.io.formats.style.Styler |
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = | pd.read_sql_query(query, self.engine) | pandas.read_sql_query |
'''
IKI Bangladesh (MIOASI): Calculate min/max dataset differences
Calculate min/max values of ERA5, IBTrACS and RA2 datasets for gust or MSLP.
Uses dask to parallelise the process. Run on SPICE.
#!/bin/bash -l
#SBATCH --qos=normal
#SBATCH --mem=20G
#SBATCH --ntasks=14
#SBATCH --time=00-00:10:00
#SBATCH --export=NONE
python3 -u s7b_validation_difference2.py
Python 3 compatible only.
Author: HS
Created: 26/5/20
'''
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import numpy as np
import dask
import dask.bag as db
import dataprocessing as dp
import matplotlib as mpl
mpl.use('Agg')
import pandas as pd
from ascend import shape
from pandas.plotting import register_matplotlib_converters
from user_vars import ERA5DIR, EVENTS, HCNC
import pdb
def _make_dataframe(model, storm, var, inten, time):
df = pd.DataFrame({'MODEL': model,
'STORM': storm,
'VAR': var,
'INTEN': inten,
'TIME': time})
return df
def matrixmin(a, b):
if len(b) == 0:
return np.array([np.nan] * 9)
# elif len(b) == 1:
# return a
else:
# Return the times of b that have the smallest difference with times of a
xs, ys = np.meshgrid(a, b)
return b[np.abs(xs - ys).argmin(axis=0)]
def process_storm(dfrow):
"""
Process one storm (row)
Args:
dfrow: EVENTS dataframe row
Returns: None
"""
print(f'Processing {dfrow.NAME}... ')
# loading the downsclaed data
gust44 = dp.get_ds_storm_ts(HCNC, 'fg.T1Hmax', dfrow.NAME, RES, shpmask=val_shape)
pres44 = dp.get_ds_storm_ts(HCNC, 'psl.T1Hmin', dfrow.NAME, RES, shpmask=val_shape) / 100. # Convert to hPa
wind44 = dp.get_wspd_ts(HCNC, dfrow.NAME, RES, shpmask=val_shape)
# converting gust units m/sec to knots
gust44 = dp.mpsec_to_knots(gust44)
wind44 = dp.mpsec_to_knots(wind44)
# Define time bounds
starttime = gust44.index[0] # start time of downscaled data
endtime = gust44.index[-1] # Get end time from last step of last run of downscaled
# loading ibtracs data
ibtracs = dp.get_ibtracs_ts(IBTRACSFILE, dfrow.IBID)[starttime:endtime]
# loading ERA5 data
era5 = dp.get_era5_ts(ERA5DIR, dfrow.NAME, shpmask=val_shape)[starttime:endtime]
era5.PRES = era5.PRES / 100 # Convert to hPa
# Find absolute values: ERA5
era5dfg = _make_dataframe('ERA5', dfrow.NAME, 'gust', [era5.GUST.max()]*9, era5.GUST.idxmax())
era5dfw = _make_dataframe('ERA5', dfrow.NAME, 'wind', [era5.WIND.max()]*9, era5.WIND.idxmax())
era5dfp = _make_dataframe('ERA5', dfrow.NAME, 'mslp', [era5.PRES.min()]*9, era5.PRES.idxmin())
# RA2
ra2dfg = _make_dataframe('RA2', dfrow.NAME, 'gust', gust44.max(), gust44.idxmax())
ra2dfw = _make_dataframe('RA2', dfrow.NAME, 'wind', wind44.max(), wind44.idxmax())
ra2dfp = _make_dataframe('RA2', dfrow.NAME, 'mslp', pres44.min(), pres44.idxmin())
# Account for possible join max/min times in IBTrACS data
# Resample to match RA2 hourly output
ibndw = ibtracs[ibtracs.NEWDELHI_WIND == ibtracs.NEWDELHI_WIND.max()].resample('1H').pad().index.values
ibusw = ibtracs[ibtracs.USA_WIND == ibtracs.USA_WIND.max()].resample('1H').pad().index.values
ibndp = ibtracs[ibtracs.NEWDELHI_PRES == ibtracs.NEWDELHI_PRES.min()].resample('1H').pad().index.values
ibusp = ibtracs[ibtracs.USA_PRES == ibtracs.USA_PRES.min()].resample('1H').pad().index.values
# New Delhi absolute values
nddfw = _make_dataframe('IBND',
dfrow.NAME,
'wind',
ibtracs.NEWDELHI_WIND.max(),
matrixmin(wind44.idxmax().values, ibndw))
nddfp = _make_dataframe('IBND',
dfrow.NAME,
'mslp',
ibtracs.NEWDELHI_PRES.min(),
matrixmin(pres44.idxmax().values, ibndp))
# US absolute values
usdfw = _make_dataframe('IBUS',
dfrow.NAME,
'wind',
ibtracs.USA_WIND.max(),
matrixmin(wind44.idxmax().values, ibusw))
usdfp = _make_dataframe('IBUS',
dfrow.NAME,
'mslp',
ibtracs.USA_PRES.min(),
matrixmin(pres44.idxmin().values, ibusp))
# Return differnce dataframes
print(f'Done {dfrow.NAME}!')
return pd.concat([usdfw, usdfp, nddfw, nddfp, era5dfg, era5dfw, era5dfp, ra2dfg, ra2dfw, ra2dfp], ignore_index=True)
# Pandas to matplotlib datetime conversion handling etc.
register_matplotlib_converters()
RES = '4p4' # '4p4' or '1p5'
DAYS = 1 # Days pre downscale data to plot
IBTRACSFILE = 'sup/ibtracs.NI.list.v04r00.csv'
# Load Validation area shapefile
val = shape.load_shp('sup/ValidationArea2.shp')
val_shape = val.unary_union()
# Start csv file for recording differences
DIFFCSV = 'validation_diff2.csv'
# # Dask Parallel processing
# # Determine the number of processors visible...
# cpu_count = multiprocessing.cpu_count()
# # .. or as given by slurm allocation.
# if 'SLURM_NTASKS' in os.environ:
# cpu_count = os.environ['SLURM_NTASKS']
# # Do not exceed the number of CPU's available, leaving 1 for the system.
# num_workers = cpu_count - 1
# print('Using {} workers from {} CPUs...'.format(num_workers, cpu_count))
# EVENTS = EVENTS[EVENTS.NAME != "FANI"]
with dask.config.set(num_workers=13):
dask_bag = db.from_sequence([row for index, row in EVENTS.iterrows()]).map(process_storm)
# dfs = dask_bag.compute(scheduler='single-threaded')
dfs = dask_bag.compute()
diffdf = | pd.concat(dfs, ignore_index=True) | pandas.concat |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas import testing as pdt
from rle_array.autoconversion import auto_convert_to_rle, decompress
from rle_array.dtype import RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
@pytest.mark.parametrize(
"orig, threshold, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
2.0,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.9,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.0,
# expected
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.0,
# expected
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.1,
# expected
pd.DataFrame({"x": pd.Series([], dtype=RLEDtype(np.int64))}),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
# threshold
1.0,
# expected
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
),
(
# orig
pd.DataFrame(),
# threshold
None,
# expected
pd.DataFrame(),
),
],
)
@pytest.mark.filterwarnings("ignore:.*would use a DatetimeBlock:UserWarning")
def test_auto_convert_to_rle_ok(
orig: pd.DataFrame, threshold: Optional[float], expected: pd.DataFrame
) -> None:
actual = auto_convert_to_rle(orig, threshold)
pdt.assert_frame_equal(actual, expected)
def test_datetime_warns() -> None:
df = pd.DataFrame(
{
"i1": pd.Series([1], dtype=np.int64),
"d1": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
"i2": pd.Series([1], dtype=np.int64),
"d2": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
}
)
with pytest.warns(None) as record:
auto_convert_to_rle(df, 0.5)
assert len(record) == 2
assert (
str(record[0].message)
== "Column d1 would use a DatetimeBlock and can currently not be RLE compressed."
)
assert (
str(record[1].message)
== "Column d2 would use a DatetimeBlock and can currently not be RLE compressed."
)
def test_auto_convert_to_rle_threshold_out_of_range() -> None:
df = pd.DataFrame({"x": [1]})
with pytest.raises(ValueError, match=r"threshold \(-0.1\) must be non-negative"):
auto_convert_to_rle(df, -0.1)
@pytest.mark.parametrize(
"orig, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(),
# expected
pd.DataFrame(),
),
],
)
def test_decompress_ok(orig: pd.DataFrame, expected: pd.DataFrame) -> None:
actual = decompress(orig)
| pdt.assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on 2017-8-16
@author: cheng.li
"""
import datetime as dt
import numpy as np
import pandas as pd
from PyFin.api import *
from alphamind.api import *
from matplotlib import pyplot as plt
start = dt.datetime.now()
engine = SqlEngine('postgresql+psycopg2://postgres:[email protected]/alpha')
universe = Universe('custom', ['zz500'])
neutralize_risk = ['SIZE'] + industry_styles
n_bins = 5
factor_weights = np.array([1.])
freq = '1w'
if freq == '1m':
horizon = 21
elif freq == '1w':
horizon = 4
elif freq == '1d':
horizon = 0
start_date = '2012-01-01'
end_date = '2012-08-01'
dates = makeSchedule(start_date,
end_date,
tenor=freq,
calendar='china.sse',
dateRule=BizDayConventions.Following)
prod_factors = ['EPS']
all_data = engine.fetch_data_range(universe, prod_factors, dates=dates, benchmark=905)
return_all_data = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
factor_all_data = all_data['factor']
total_df = pd.DataFrame()
for factor in prod_factors:
factors = [factor]
final_res = np.zeros((len(dates), n_bins))
factor_groups = factor_all_data.groupby('trade_date')
return_groups = return_all_data.groupby('trade_date')
for i, value in enumerate(factor_groups):
date = value[0]
data = value[1][['code', factor, 'isOpen', 'weight'] + neutralize_risk]
codes = data.code.tolist()
ref_date = value[0].strftime('%Y-%m-%d')
returns = return_groups.get_group(date)
total_data = pd.merge(data, returns, on=['code']).dropna()
print('{0}: {1}'.format(date, len(data)))
risk_exp = total_data[neutralize_risk].values.astype(float)
dx_return = total_data.dx.values
benchmark = total_data.weight.values
f_data = total_data[factors]
try:
res = quantile_analysis(f_data,
factor_weights,
dx_return,
risk_exp=risk_exp,
n_bins=n_bins,
benchmark=benchmark)
except Exception as e:
print(e)
res = np.zeros(n_bins)
final_res[i] = res / benchmark.sum()
df = | pd.DataFrame(final_res, index=dates) | pandas.DataFrame |
from itertools import product
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.validation import quality_mapping
def test_ok_user_flagged():
assert quality_mapping.DESCRIPTION_MASK_MAPPING['OK'] == 0
assert quality_mapping.DESCRIPTION_MASK_MAPPING['USER FLAGGED'] == 1
def test_description_dict_version_compatibility():
for dict_ in quality_mapping.BITMASK_DESCRIPTION_DICT.values():
assert dict_['VERSION IDENTIFIER 0'] == 1 << 1
assert dict_['VERSION IDENTIFIER 1'] == 1 << 2
assert dict_['VERSION IDENTIFIER 2'] == 1 << 3
def test_latest_version_flag():
# test valid while only identifiers 0 - 2 present
last_identifier = max(
int(vi.split(' ')[-1]) for vi in
quality_mapping.DESCRIPTION_MASK_MAPPING.keys() if
vi.startswith('VERSION IDENTIFIER'))
assert last_identifier == 2
assert (quality_mapping.LATEST_VERSION_FLAG ==
quality_mapping.LATEST_VERSION << 1)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask(flag_val):
flag, mask = flag_val
mask |= quality_mapping.LATEST_VERSION_FLAG
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([
mask, mask, quality_mapping.LATEST_VERSION_FLAG, mask,
quality_mapping.LATEST_VERSION_FLAG]))
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_none(flag_invert):
assert quality_mapping.convert_bool_flags_to_flag_mask(
None, *flag_invert) is None
@pytest.mark.parametrize('flag_invert', product(
quality_mapping.DESCRIPTION_MASK_MAPPING.keys(), [True, False]))
def test_convert_bool_flags_to_flag_mask_adds_latest_version(flag_invert):
ser = pd.Series([0, 0, 0, 1, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(
ser, *flag_invert)
assert (flags & quality_mapping.LATEST_VERSION_FLAG).all()
@pytest.fixture()
def ignore_latest_version(mocker):
mocker.patch(
'solarforecastarbiter.validation.quality_mapping.LATEST_VERSION_FLAG',
0)
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = pd.Series([0, 0, 1, 0, 1])
flags = quality_mapping.convert_bool_flags_to_flag_mask(ser, flag, True)
assert_series_equal(flags, pd.Series([mask, mask, 0, mask, 0]))
@pytest.mark.parametrize(
'flag_val', quality_mapping.DESCRIPTION_MASK_MAPPING.items())
def test_convert_bool_flags_to_flag_mask_no_invert(flag_val,
ignore_latest_version):
flag, mask = flag_val
ser = | pd.Series([0, 0, 1, 0, 1]) | pandas.Series |
'''
MIT License
Copyright (c) 2020 MINCIENCIA
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import json
from urllib import request
import sys
import pandas as pd
from datetime import datetime
class traffic:
def __init__(self, user, token):
self.user = user
self.token = token
self.df_clones = | pd.DataFrame(columns=['timestamp','count','uniques']) | pandas.DataFrame |
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
isna,
)
import pandas._testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert | isna(result.values) | pandas.isna |
#!/usr/bin/env python3
import argparse
import os
import numpy as np
import pandas as pd
from cobra.io import read_sbml_model
from cobra.util import solvers
from csm4cobra.io import read_json
from csm4cobra.manipulate import set_medium
from csm4cobra.context_specific_deletions import get_all_gene_ko_reactions
from csm4cobra.context_specific_deletions import get_gene_knockout_reactions
from csm4cobra.context_specific_deletions import context_specific_ko
EXPERIMENTS = ['cs_gene_ko', 'double_gene_ko']
SOLVERS = list(solvers.keys())
if 'cglpk' in SOLVERS:
SOLVERS.remove('cglpk')
SOLVERS.append('glpk')
def create_parser():
parser = argparse.ArgumentParser(description='Run an in-silico experiment genome-scale metabolic model.')
parser.add_argument('sbml_fname', action="store", help='SBML file to use a the model reference')
parser.add_argument('csv_confidences', action="store", help='CSV file storing the gene confidences')
parser.add_argument('experiment', action="store", choices=EXPERIMENTS,
help='Choose the in-silico experiment to be performed')
parser.add_argument('--ceres', action="store", dest="csv_ceres", default=None,
help='CSV file storing gene ceres score')
parser.add_argument('--media', action="store", dest="json_exchanges", default=None,
help='JSON file storing the exchange bounds')
parser.add_argument('--solver', action="store", dest="solver", choices=SOLVERS,
default='glpk', help='LP solver to perform optimizations')
parser.add_argument('--out', action="store", dest="output_folder", default=".",
help='Output folder to store the builded CSM')
parser.add_argument('--column-name', action="store", dest="col_name", default="confidence",
help='Column name where the RPKM values are stored')
return parser
def generate_output_fname(output_folder, model_id, experiment,
solver, output_format='tsv'):
fname = "_".join([model_id, experiment,solver])
fname = ".".join([fname, output_format])
fname = os.path.join(output_folder,fname)
return fname
def run_context_specific_ko(model, conf_genes, threshold=2,
objectives=('biomass_reaction', 'ATPM')):
result_dict = {}
model_genes = {g.id for g in model.genes}
objectives = [model.reactions.get_by_id(r) for r in objectives]
for r in model.reactions:
if r.objective_coefficient == 0:
continue
r.objective_coefficient = 0
for obj_rxn in objectives:
result_dict[obj_rxn.id] = {}
obj_rxn.objective_coefficient = 1
solution = model.optimize()
result_dict[obj_rxn.id]['wild_type'] = solution.objective_value
obj_rxn.objective_coefficient = 0
gene_ko_reactions = get_gene_knockout_reactions(model)
all_genes_ko_reactions = get_all_gene_ko_reactions(model, conf_genes, threshold=threshold)
# Set of genes identified by contextualized gpr evaluation
cs_gene_ko = set(all_genes_ko_reactions.keys()) - set(gene_ko_reactions.keys())
for gene, rxn_list in all_genes_ko_reactions.items():
bounds_dict = {}
for rxn in rxn_list:
bounds_dict[rxn.id] = rxn.bounds
rxn.bounds = (0, 0)
for obj_rxn in objectives:
obj_rxn.objective_coefficient = 1
solution = model.optimize()
result_dict[obj_rxn.id][gene] = solution.objective_value
obj_rxn.objective_coefficient = 0
for rxn in rxn_list:
rxn.bounds = bounds_dict[rxn.id]
result_dict['confidence'] = {}
result_dict['in_model'] = {}
result_dict['is_cs_ko'] = {}
result_dict['inactivate_reactions'] = {}
genes_inactivate_reactions = set(all_genes_ko_reactions.keys())
for gene, conf in conf_genes.items():
result_dict['confidence'][gene] = conf
result_dict['is_cs_ko'][gene] = False
if gene in genes_inactivate_reactions:
result_dict['inactivate_reactions'][gene] = len(all_genes_ko_reactions[gene])
if gene in cs_gene_ko:
result_dict['is_cs_ko'][gene] = True
else:
result_dict['inactivate_reactions'][gene] = 0
result_dict['biomass_reaction'][gene] = result_dict['biomass_reaction']['wild_type']
result_dict['ATPM'][gene] = result_dict['ATPM']['wild_type']
if gene in model_genes:
result_dict['in_model'][gene] = True
else:
result_dict['in_model'][gene] = False
df_results = | pd.DataFrame(result_dict) | pandas.DataFrame |
from typing import Optional, Dict, List
from pandas import DataFrame
class VespaResponse(object):
def __init__(self, json, status_code, url, operation_type):
self.json = json
self.status_code = status_code
self.url = url
self.operation_type = operation_type
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.json == other.json
and self.status_code == other.status_code
and self.url == other.url
and self.operation_type == other.operation_type
)
def trec_format(
vespa_result, id_field: Optional[str] = None, qid: int = 0
) -> DataFrame:
"""
Function to format Vespa output according to TREC format.
TREC format include qid, doc_id, score and rank.
:param vespa_result: raw Vespa result from query.
:param id_field: Name of the Vespa field to use as 'doc_id' value.
:param qid: custom query id.
:return: pandas DataFrame with columns qid, doc_id, score and rank.
"""
hits = vespa_result.get("root", {}).get("children", [])
records = []
for rank, hit in enumerate(hits):
records.append(
{
"qid": qid,
"doc_id": hit["fields"][id_field]
if id_field is not None
else hit["id"],
"score": hit["relevance"],
"rank": rank,
}
)
return | DataFrame.from_records(records) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 16:22:52 2018
@author: <NAME>
Laboratory for Atmospheric Research
Dept. of Civil and Environmental Engineering
Washington State University
<EMAIL>
"""
import re
import pandas as pd
import glob
import os
import numpy as np
import datetime
import warnings
# Change this path to the directory where the LTAR_Flux_QC.py file is located
os.chdir(r'C:\Users\Eric\Documents\GitHub\LTAR_Phenolog_Initiative_EC_Processing')
import LTAR_Pheno_QC_Functions as LLT
import Reddy_Format as REF
ds = '2018-01-01'
de = '2019-01-01'
files = glob.glob('C:\\Users\\Eric\\Desktop\\LTAR\\LTAR_National_Projects\\PhenologyInitiative\\EC Data\\Processed\\Unprocessed\\*.csv') #Directory or file name with file names here
# File with upper and lower limits for the flux values for each site based on visual inspection of each dataset
QC = | pd.read_csv('C:\\Users\\Eric\\Desktop\\LTAR\\LTAR_National_Projects\\PhenologyInitiative\\QC_Limits_List.csv',header = 0, index_col = 'Site') | pandas.read_csv |
"""
make_figs_cleaned.py
"""
import os
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import seaborn as sns
import pandas as pd
import numpy as np
import argparse
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import rdMolDraw2D
import scipy.stats as stats
from scipy.interpolate import interp1d
from scipy.integrate import simps
from sklearn import metrics
from tqdm import tqdm, trange
from pathos import multiprocessing # speeds up large map functions
from numpy import nan
from ast import literal_eval
METHOD_ORDER = ["evidence", "dropout", "ensemble", "sigmoid"]
METHOD_COLORS = {
method: sns.color_palette()[index]
for index, method in enumerate(METHOD_ORDER)
}
DATASET_MAPPING = {"lipo" : "Lipo",
"delaney" : "Delaney",
"freesolv" : "Freesolv",
"qm7": "QM7",
"bbbp" : "BBBP",
"sider" : "Sider",
"clintox" : "Clintox",
"hiv" : "HIV",
"tox21" : "Tox21",
"qm9" : "QM9",
"enamine" : "Enamine",
"qm9" : "QM9",
"enamine" : "Enamine",
"ppbr_az" : "PPBR",
"clearance_hepatocyte_az" : "Clearance",
"ld50_zhu" : "LD50",
}
# Datasets that we want to make ev tuning plots for
DATASETS = DATASET_MAPPING.values()
CLASSIF_SUMMARY_NAMES = ["avg_pr_entropy", "accuracy_entropy", "auc_roc_entropy",
"brier_entropy", "likelihood_entropy",
'Expected Probability', 'Predicted Probability']
REGR_SUMMARY_NAMES = ["rmse", "mae", "Predicted Probability",
"Expected Probability"]
def rename_method_df_none(df_column, rename_key):
""" Transform method names """
return [rename_key.get(i, None) for i in df_column]
def convert_dataset_names(dataset_series):
""" Convert the dataset series in to the desired labeling"""
ret_ar = []
for i in dataset_series:
ret_ar.append(DATASET_MAPPING.get(i, i))
return ret_ar
def convert_to_std(full_df):
""" Convert confidence to std where applicable"""
## Convert all the confidence into std's
new_confidence = full_df["stds"].values
# At all the locations that *don't* have an std value, replace new
# confidence with the value in confidence
std_na = pd.isna(new_confidence)
new_confidence[std_na] = full_df['confidence'][std_na].values
full_df["confidence"] = new_confidence
def make_cutoff_table(df, outdir = "results/figures", out_name = "cutoff_table.txt",
export_stds=True, output_metric = "rmse",
table_data_name = "D-MPNN (RMSE)",
significant_best=True,
higher_better=False):
"""make_cutoff_table.
Create the output latex results table and save in a text file.
Args:
df: Summary df of the data
outdir: Save dir
out_name: Name of outdirectory
export_stds: If true, add \pm
output_metric: Name of output metric to use. Default rmse.
table_data_name: Str name of the data table. This can be used to
distinguish this table as "Atomistic", "Classification", "High N",
"Low N", etc. Defaults to "D-MPNN (RMSE)"
significant_best: Bold best
higher_better: If true, higher is better for this metric
"""
# cutoffs = [0, 0.5, 0.75, 0.9, 0.95, 0.99][::-1]
top_k = np.array([1, 0.5, 0.25, 0.1, 0.05])[::-1]
df = df.copy()
df["Method"] = df["method_name"]
df["Data"] = convert_dataset_names(df["dataset"])
uniq_methods = set(df["Method"].values)
unique_datasets = set(df["Data"].values)
data_order = [j for j in DATASETS if j in unique_datasets]
method_order = [j for j in METHOD_ORDER if j in uniq_methods]
table_items = []
for data_group, data_df in df.groupby(["Data"]):
for data_method, data_method_df in data_df.groupby(["Method"]):
# Skip useless methods
if data_method.lower() not in [method.lower() for method in method_order]:
continue
metric_sub = data_method_df[output_metric]
# Total length of metric sub (use min for debugging)
num_tested = np.min([len(i) for i in metric_sub])
for cutoff in top_k:
num_items = int(cutoff * num_tested)
# NOTE: As above, the metric summary is already a mean over confidence
# cutoffs and we should only take a point estimate from each trial run
temp = np.reshape([j[-num_items] for j in metric_sub], -1)
metric_cutoff_mean = np.mean(temp)
metric_cutoff_std = stats.sem(temp) # standard error of the mean
table_items.append({"Data" : data_group,
"Method": data_method,
"Cutoff" : cutoff,
"METRIC_MEAN" : metric_cutoff_mean,
"METRIC_STD": metric_cutoff_std})
metric_summary = pd.DataFrame(table_items)
means_tbl = pd.pivot_table(metric_summary,
values="METRIC_MEAN",
columns=["Cutoff"],
index=["Data", "Method"])
stds_tbl = pd.pivot_table(metric_summary,
values="METRIC_STD",
columns=["Cutoff"],
index=["Data", "Method"])
# Sort columns according to the cutoff values
means_tbl = means_tbl.reindex(sorted(means_tbl.columns)[::-1], axis=1)
stds_tbl = stds_tbl.reindex(sorted(stds_tbl.columns)[::-1], axis=1)
output_tbl = means_tbl.astype(str)
for cutoff in means_tbl.keys():
cutoff_means = means_tbl[cutoff]
cutoff_stds = stds_tbl[cutoff]
for dataset in data_order:
means = cutoff_means[dataset].round(5)
stds = cutoff_stds[dataset]
str_repr = means.astype(str)
if export_stds:
str_repr += " $\\pm$ "
str_repr += stds.round(5).astype(str)
if higher_better:
if significant_best:
# significant_best finds the runs that are best by a
# statistically significant margin (ie. a standard dev)
METRIC_mins = means-stds
METRIC_maxs = means+stds
highest_metric_min = np.max(METRIC_mins)
best_methods = METRIC_maxs > highest_metric_min
else:
# else, best is just the best mean performer
best_methods = (means == means.max())
else:
if significant_best:
# significant_best finds the runs that are best by a
# statistically significant margin (ie. a standard dev)
METRIC_mins = means-stds
METRIC_maxs = means+stds
smallest_metric_max = np.min(METRIC_maxs)
best_methods = METRIC_mins < smallest_metric_max
else:
# else, best is just the best mean performer
best_methods = (means == means.min())
# Bold the items that are best
str_repr[best_methods] = "\\textbf{" + str_repr[best_methods] + "}"
output_tbl[cutoff][dataset] = str_repr
# Sort such that methods and datasets are in correct order
output_tbl = output_tbl.reindex( pd.MultiIndex.from_product([ data_order,
method_order]))
assert(isinstance(table_data_name, str))
output_tbl = output_tbl.set_index(pd.MultiIndex.from_product([[table_data_name],
data_order,
method_order]))
# Write out
with open(os.path.join(outdir, out_name), "w") as fp:
fp.write(output_tbl.to_latex(escape=False))
def average_summary_df_tasks(df, avg_columns):
""" Create averages of the summary df across tasks."""
new_df = []
# Columns to have after averaging
keep_cols = ["dataset", "method_name", "trial_number"]
subsetted = df.groupby(keep_cols)
for subset_indices, subset_df in subsetted:
return_dict = {}
return_dict.update(dict(zip(keep_cols, subset_indices)))
for column in avg_columns:
task_values = subset_df[column].values
min_length = min([len(i) for i in task_values])
new_task_values = []
for j in task_values:
j = np.array(j)
if len(j) > min_length:
percentiles = np.linspace(0, len(j) - 1, min_length).astype(int)
new_task_values.append(j[percentiles])
else:
new_task_values.append(j)
avg_task = np.mean(np.array(new_task_values), axis=0).tolist()
return_dict[column] = avg_task
new_df.append(return_dict)
return pd.DataFrame(new_df)
def evidence_tuning_plots(df, x_input = "Mean Predicted Avg",
y_input = "Empirical Probability",
x_name="Mean Predicted",
y_name="Empirical Probability"):
""" Plot the tuning plot at different evidence values """
def lineplot(x, y, trials, methods, **kwargs):
"""method_lineplot.
Args:
y:
methods:
kwargs:
"""
uniq_methods = set(methods.values)
method_order = sorted(uniq_methods)
method_new_names = [f"$\lambda={i:0.4f}$" for i in method_order]
method_df = []
for method_idx, (method, method_new_name) in enumerate(zip(method_order,
method_new_names)):
lines_y = y[methods == method]
lines_x = x[methods == method]
for index, (xx, yy,trial) in enumerate(zip(lines_x, lines_y, trials)):
to_append = [{x_name : x,
y_name: y,
"Method": method_new_name,
"Trial" : trial}
for i, (x,y) in enumerate(zip(xx,yy))]
method_df.extend(to_append)
method_df = pd.DataFrame(method_df)
x = np.linspace(0,1,100)
plt.plot(x, x, linestyle='--', color="black")
sns.lineplot(x=x_name, y=y_name, hue="Method",
alpha=0.8,
hue_order=method_new_names, data=method_df,)
# estimator=None, units = "Trial")
df = df.copy()
# Query methods that have evidence_new_reg_2.0
df = df[["evidence" in i for i in
df['method_name']]].reset_index()
# Get the regularizer and reset coeff
coeff = [float(i.split("evidence_new_reg_")[1]) for i in df['method_name']]
df["method_name"] = coeff
df["Data"] = convert_dataset_names(df["dataset"])
df["Method"] = df["method_name"]
g = sns.FacetGrid(df, col="Data", height=6, sharex = False, sharey = False)
g.map(lineplot, x_input, y_input, "trial_number",
methods=df["Method"]).add_legend()
def plot_spearman_r(full_df, std=True):
""" Plot spearman R summary stats """
if std:
convert_to_std(full_df)
full_df["Data"] = convert_dataset_names(full_df["dataset"])
grouped_df = full_df.groupby(["dataset", "method_name", "trial_number", "task_name"])
spearman_r = grouped_df.apply(lambda x : stats.spearmanr(x['confidence'].values, np.abs(x['error'].values )).correlation)
new_df = spearman_r.reset_index().rename({0: "Spearman Rho" },
axis=1)
method_order = [i for i in METHOD_ORDER
if i in pd.unique(new_df['method_name'])]
new_df['Method'] = new_df['method_name']
new_df['Dataset'] = new_df['dataset']
plot_width = 2.6 * len(pd.unique(new_df['Dataset']))
plt.figure(figsize=(plot_width, 5))
sns.barplot(data=new_df , x="Dataset", y="Spearman Rho",
hue="Method", hue_order = method_order)
spearman_r_summary = new_df.groupby(["dataset", "method_name"]).describe()['Spearman Rho'].reset_index()
return spearman_r_summary
def make_tuning_plot_rmse(df, error_col_name="rmse",
error_title = "Top 10% RMSE",
cutoff = 0.10):
""" Create the tuning plot for different lambda evidence parameters, but
plot 10% RMSE instead of calibration. """
df = df.copy()
# Get the regularizer and reset coeff
coeff = [float(i.split("evidence_new_reg_")[1]) if "evidence" in i else i for i in df['method_name']]
df["method_name"] = coeff
df["Data"] = convert_dataset_names(df["dataset"])
df["Method"] = df["method_name"]
# Get appropriate datasets
trials = 'trial_number'
methods = 'Method'
# Make area plot
uniq_methods = set(df["Method"].values)
method_order = sorted(uniq_methods,
key=lambda x : x if isinstance(x, float) else -1)
method_df = []
datasets = set()
for data, sub_df in df.groupby("Data"):
# Add datasets
datasets.add(data)
rmse_sub = sub_df[error_col_name]
methods_sub = sub_df["Method"]
trials_sub= sub_df['trial_number']
for method_idx, method in enumerate(method_order):
# Now summarize these lines
bool_select = (methods_sub == method)
rmse_method = rmse_sub[bool_select]
trials_temp = trials_sub[bool_select]
areas = []
# create area!
for trial, rmse_trial in zip(trials_sub, rmse_method):
num_tested = len(rmse_trial)
cutoff_index = int(cutoff * num_tested) - 1
rmse_val = rmse_trial[-cutoff_index]
to_append = {error_title: rmse_val,
"Regularizer Coeff, $\lambda$": method,
"method_name": method,
"Data": data,
"Trial" : trial}
method_df.append(to_append)
method_df = pd.DataFrame(method_df)
# Filter out dropout
method_df = method_df[[i != "dropout" for i in
method_df['method_name']]].reset_index()
# Normalize by dataset
for dataset in datasets:
# Make a divison vector of ones and change it to a different value only
# for the correct dataset of interest to set max rmse to 1
division_factor = np.ones(len(method_df))
indices = (method_df["Data"] == dataset)
# Normalize with respect to the ensemble so that this is 1
max_val = method_df[indices].query("method_name == 'ensemble'").mean()[error_title]
# Take the maximum of the AVERAGE so it's normalized to 1
division_factor[indices] = max_val
method_df[error_title] = method_df[error_title] / division_factor
method_df_evidence = method_df[[isinstance(i, float) for i in
method_df['method_name']]].reset_index()
method_df_ensemble = method_df[["ensemble" in str(i) for i in
method_df['method_name']]].reset_index()
data_colors = {
dataset : sns.color_palette()[index]
for index, dataset in enumerate(datasets)
}
min_x = np.min(method_df_evidence["Regularizer Coeff, $\lambda$"])
max_x= np.max(method_df_evidence["Regularizer Coeff, $\lambda$"])
sns.lineplot(x="Regularizer Coeff, $\lambda$", y=error_title,
hue="Data", alpha=0.8, data=method_df_evidence,
palette = data_colors)
for data, subdf in method_df_ensemble.groupby("Data"):
color = data_colors[data]
area = subdf[error_title].mean()
std = subdf[error_title].std()
plt.hlines(area, min_x, max_x, linestyle="--", color=color, alpha=0.8)
# Add ensemble baseline
ensemble_line = plt.plot([], [], color='black', linestyle="--",
label="Ensemble")
# Now make ensemble plots
plt.legend(bbox_to_anchor=(1.1, 1.05))
def make_area_plots(df, x_input = "Mean Predicted Avg",
y_input = "Empirical Probability"):
""" Make evidence tuning plots """
df = df.copy()
# Get the regularizer and reset coeff
coeff = [float(i.split("evidence_new_reg_")[1]) if "evidence" in i else i for i in df['method_name']]
df["method_name"] = coeff
df["Data"] = convert_dataset_names(df["dataset"])
df["Method"] = df["method_name"]
trials = 'trial_number'
methods = 'Method'
# Make area plot
uniq_methods = set(df["Method"].values)
method_order = sorted(uniq_methods,
key=lambda x : x if isinstance(x, float) else -1)
method_df = []
datasets = set()
for data, sub_df in df.groupby("Data"):
# Add datasets
datasets.add(data)
x_vals = sub_df[x_input]
y_vals = sub_df[y_input]
methods_sub = sub_df["Method"]
trials_sub= sub_df['trial_number']
for method_idx, method in enumerate(method_order):
# Now summarize these lines
bool_select = (methods_sub == method)
lines_y = y_vals[bool_select]
lines_x = x_vals[bool_select]
trials_temp = trials_sub[bool_select]
areas = []
# create area!
for trial, line_x, line_y in zip(trials_sub, lines_x, lines_y):
new_y = np.abs(np.array(line_y) - np.array(line_x))
area = simps(new_y, line_x)
to_append = {"Area from parity": area,
"Regularizer Coeff, $\lambda$": method,
"method_name": method,
"Data": data,
"Trial" : trial}
method_df.append(to_append)
method_df = pd.DataFrame(method_df)
method_df_evidence = method_df[[isinstance(i, float) for i in
method_df['method_name']]].reset_index()
method_df_ensemble = method_df[["ensemble" in str(i) for i in
method_df['method_name']]].reset_index()
data_colors = {
dataset : sns.color_palette()[index]
for index, dataset in enumerate(datasets)
}
min_x = np.min(method_df_evidence["Regularizer Coeff, $\lambda$"])
max_x= np.max(method_df_evidence["Regularizer Coeff, $\lambda$"])
sns.lineplot(x="Regularizer Coeff, $\lambda$", y="Area from parity",
hue="Data", alpha=0.8, data=method_df_evidence,
palette = data_colors)
for data, subdf in method_df_ensemble.groupby("Data"):
color = data_colors[data]
area = subdf["Area from parity"].mean()
std = subdf["Area from parity"].std()
plt.hlines(area, min_x, max_x, linestyle="--", color=color, alpha=0.8)
ensemble_line = plt.plot([], [], color='black', linestyle="--",
label="Ensemble")
# Now make ensemble plots
plt.legend(bbox_to_anchor=(1.1, 1.05))
def save_plot(outdir, outname):
""" Save current plot"""
plt.savefig(os.path.join(outdir, "png", outname+".png"), bbox_inches="tight")
plt.savefig(os.path.join(outdir, "pdf", outname+".pdf"), bbox_inches="tight")
plt.close()
def plot_calibration(df, x_input = "Mean Predicted Avg",
y_input = "Empirical Probability",
x_name="Mean Predicted",
y_name="Empirical Probability",
method_order = METHOD_ORDER,
avg_x = False):
""" plot_calibration.
avg_x can be used to indicate that the x axis should be averaged position
wise. That is, for classification calibration plots, we compute the
confidence in different interval bands (e.g. we compute empirically the
number of targets in the bin with predicted probability in the range of
0.5,0.6). However, this average changes and the results are therefore very
noisy. To enable averaging, we average across this.
"""
methods = df['method_name']
uniq_methods = pd.unique(methods)
method_order = [j for j in METHOD_ORDER if j in uniq_methods]
method_df = []
if avg_x:
df_copy = df.copy()
new_list = [0]
new_x_map = {}
for method in uniq_methods:
temp_vals = df[df['method_name'] == method][x_input]
new_ar = np.vstack(temp_vals)
new_ar = np.nanmean(new_ar, 0) # avg columnwise
new_x_map[method] = new_ar
df_copy[x_input] = [new_x_map[method] for method in methods]
df = df_copy
x, y = df[x_input].values, df[y_input].values
method_df = [{x_name : xx, y_name : yy, "Method" : method}
for x_i, y_i, method in zip(x, y, methods)
for xx,yy in zip(x_i,y_i)]
method_df = | pd.DataFrame(method_df) | pandas.DataFrame |
"""
This preprocessing script is adapted from https://github.com/kathrynchapman/LA_MC2C/blob/main/process_data.py
to read clef2019 (german), cantemist, codiEsp, and gutman datasets and output in <train/dev/test>.json files.
"""
# TODO: clean up and add comments
# TODO: SNOMED CT CODE in labels to description mapping file/dict pickle
import argparse
import json
from sklearn.preprocessing import MultiLabelBinarizer
from collections import Counter
from pathlib import Path
import shutil
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm, trange
from argparse import Namespace
# ***IMPORTANT*** import the iterative_stratification from this repo and not the installed skmultilearn library!!!
from iterative_stratification import IterativeStratification, iterative_train_test_split
from skmultilearn.model_selection.measures import get_combination_wise_output_matrix
def cl_parser(argv=None):
"""
Parse command line arguments
:param argv:
:return:
"""
parser = argparse.ArgumentParser(description="Preprocess Data")
parser.add_argument("--label_threshold",
default=0,
type=int,
help="Exclude labels which occur <= threshold",
)
parser.add_argument("--data_dir", type=str, default="clef2019")
parser.add_argument("--partition",
type=str,
default="test",
help="test | development | training -- should match naming style in dataset")
parser.add_argument("--binarize_labels",
type=bool,
default=True)
parser.add_argument("--output_dir",
type=str,
default="preprocessed")
parser.add_argument("--plot",
type=bool,
default=True)
# use the following args for Guttman pre-processing only
parser.add_argument("--force_rerun",
type=bool,
default=False)
parser.add_argument("--random_state",
type=int,
default=35)
# only applicable to CodiEsp and/or Cantemist
parser.add_argument("--subdir",
type=str,
default="",
help="subdir if any for the dataset; dir containing test/dev/train subdirectories")
parser.add_argument("--track",
type=str,
default="D",
help="Codiesp: D (diagnostic) | P (procedure) | X (both); Cantemist: coding | ner | norm")
parser.add_argument("--lang",
type=str,
default="esp",
help="choose language esp (spanish) | en (english); use esp as default for Cantemist")
parser.add_argument("--version",
type=str,
default="",
help="Cantemist dev partition 1 or 2 for reformatting to clef2019; else an empty str")
return parser.parse_args(argv)
def write_to_json(data, path):
"""
:param data: [{id:"doc id", "doc":"doc text", "labels_id": ["label1", "label2"]}, {...}]
:param path:
:return: None
"""
with open(path, mode="w", encoding="utf-8") as out_file:
json.dump(data, out_file, indent=4, ensure_ascii=False)
def read_from_json(og_data, path):
"""
:param og_data: the original data that was written to json
:param path:
:return:
"""
with open(path, mode="r", encoding="utf-8") as in_file:
data = json.load(in_file)
assert og_data == data
def write_to_file(list_of_lines, path, delimiter="\n", overwrite=True):
"""
Write list or iterable of str to file path, with specified delimiter
:param overwrite: True for mode = w, False to append to existing path
:param delimiter: newline or other delimiter characters
:param list_of_lines: list/iterable of str
:param path: out file path
:return:
"""
with open(path, mode="w" if overwrite else "a", encoding="utf-8") as out_file:
for line in list_of_lines:
out_file.write(f"{line}{delimiter}")
def lines_from_file(path):
"""
Yield line from file path with trailing whitespaces removed
:param path: path to file
:return: each line with trailing whitespaces removed
"""
with open(path) as f:
for line in f:
yield line.rstrip()
class Preprocess:
def __init__(self, args=None):
# arguments and paths
self.args = args
self.data_root_dir = Path(__file__).resolve().parent / args.data_dir
self.data_dir = self.data_root_dir / args.partition if args.partition == "test" \
else self.data_root_dir / "train_dev"
self.ids_file_path = self.data_dir / f"ids_{args.partition}.txt"
self.annotation_file_path = self.data_dir / f"anns_{args.partition}.txt" if args.partition == "test" \
else self.data_dir / f"anns_train_dev.txt"
self.docs_dir = self.data_dir / "docs" if args.partition == "test" else self.data_dir / "docs-training"
self.outfile_dir = Path(__file__).resolve().parent / self.args.output_dir / self.args.data_dir
# data components
self.class_counter = Counter()
self.mlb = MultiLabelBinarizer()
self.doc_label_dict = dict()
self.docs_labels_list = []
self.num_docs = 0
self.unseen_labels = []
def __len__(self):
if not self.docs_labels_list:
self.make_docs_labels_list()
return len(self.docs_labels_list)
def _extract_ids(self):
yield from lines_from_file(self.ids_file_path)
def _extract_annotation(self):
"""
Read in the annotation file into dict of {"doc_id": ["label1", "label2", "label3", "..."]}.
Also count frequency of each label type and store in self.class_counter
:return:
"""
for doc_labels in lines_from_file(self.annotation_file_path):
try:
doc_id, labels = doc_labels.split("\t")
except ValueError:
# empty labels
print(f"Empty labels in {doc_id}")
doc_id, *labels = doc_labels.split("\t")
try:
labels = labels.split("|")
except AttributeError:
print(f"Empty labels cannot be split at |")
# empty labels
labels = []
self.doc_label_dict[doc_id] = labels
# if self.args.partition == "training":
self.class_counter.update(labels)
def _extract_doc(self, doc_id):
"""
:param doc_id: id of the document in the partition to extract text
:return: str text of a single doc corresponding to the doc_id; if doc has multiple lines, all lines are
concatenated into part of 1 text string
"""
doc_file_path = self.docs_dir / f"{doc_id}.txt"
return " ".join([line for line in lines_from_file(doc_file_path)])
def make_docs_labels_list(self):
"""
Create list of docs : labels dicts where each dict follows this format:
{"id": "doc_id", "doc": "doc text...", "labels_id": ["label1", "label2", "label3", ...]}
:return: None
"""
if not self.doc_label_dict:
self._extract_annotation()
for doc_id in tqdm(self._extract_ids(), desc=f"making id-doc-labels"):
a_doc_labels_dict = dict()
a_doc_labels_dict["id"] = doc_id
a_doc_labels_dict["doc"] = self._extract_doc(doc_id)
try:
a_doc_labels_dict["labels_id"] = self.doc_label_dict[doc_id]
except KeyError:
a_doc_labels_dict["labels_id"] = []
self.docs_labels_list.append(a_doc_labels_dict)
self.num_docs += 1
def create_dataset_json(self):
"""
Preprocess the dataset partition to the standard json format:
[{"id": "doc_id", "doc": "doc text...", "labels_id": ["label1", "label2", "label3", ...]},
...
]
:return: None
"""
if not self.docs_labels_list:
self.make_docs_labels_list()
try:
self.outfile_dir.mkdir(parents=True, exist_ok=False)
print(f"{self.outfile_dir} created to store pre-processed files.")
except FileExistsError:
print(f"{self.outfile_dir} already exists! File will be saved here.")
json_file_path = self.outfile_dir / f"{self.args.partition}.json"
write_to_json(self.docs_labels_list, json_file_path)
try:
read_from_json(self.docs_labels_list, json_file_path)
except AssertionError:
print(f"WARNING: json file not identical to original data!!!")
print(f"Pre-processed {self.args.data_dir} {self.args.partition} partition saved to {json_file_path}.")
def plot_label_distribution(self, save_plot=True, show_plot=False):
"""
Plot the frequency distribution of the labels in the dataset partition.
:return:
"""
if not self.class_counter:
self._extract_annotation()
labels, counts = zip(*self.class_counter.items())
labels, counts = list(labels), list(counts)
assert len(labels) == len(counts)
# without seaborn
# indexes = np.arange(len(labels))
# width = 1
# plt.bar(indexes, counts, width)
# plt.xticks(indexes + width * 0.5, labels, rotation="vertical")
# plt.show()
plt.figure(num=None, figsize=(20, 18), dpi=80, facecolor='w', edgecolor='r')
plot = sns.barplot(x=counts, y=labels, orient="h")
plot.set_title(f"Frequency Distribution of Dataset Labels in {self.args.data_dir.title()} "
f"{self.args.partition.title()} Partition")
plot.bar_label(plot.containers[0])
if save_plot:
plot_dir = self.outfile_dir / "plots"
try:
plot_dir.mkdir(parents=True, exist_ok=False)
print(f"{plot_dir} created to store pre-processed files.")
except FileExistsError:
print(f"{plot_dir} already exists! Plots will be saved here.")
outfile_path = plot_dir / f"{self.args.partition}_label_distribution.png"
plt.savefig(outfile_path)
if show_plot:
plt.show()
@classmethod
def get_another_preprocessed_partition(cls, one_args, other_args):
another_preprocessed_partition = cls(other_args)
try:
assert one_args.data_dir == other_args.data_dir
except AssertionError:
print(f"WARNING: Comparing partitions from different datasets!")
another_preprocessed_partition.make_docs_labels_list()
return another_preprocessed_partition
def compare_labels(self, other_args):
"""
:param other_args: parsed args for another partition; should contain the partition name of the partition to
which labels need to be compared
:return:
"""
another_preprocessed_partition = self.get_another_preprocessed_partition(self.args, other_args)
if not self.unseen_labels or other_args.force_rerun:
self.unseen_labels = [label for label in self.class_counter.keys() if label not in
another_preprocessed_partition.class_counter.keys()]
# print(
# f"this partition: \n{self.class_counter.keys()} vs train:\n{another_preprocessed_partition.class_counter.keys()}")
return self.unseen_labels
def write_unseen_labels(self, other_args):
"""
Write to file the list of labels in this partition not seen in the training partition
:param other_args:
:return:
"""
try:
self.outfile_dir.mkdir(parents=True, exist_ok=False)
print(f"{self.outfile_dir} created to store pre-processed files.")
except FileExistsError:
print(f"{self.outfile_dir} already exists! File will be saved here.")
outfile_path = self.outfile_dir / f"{self.args.partition}_unseen_in_{other_args.partition}.txt"
if not self.unseen_labels:
unseen = self.compare_labels(other_args)
write_to_file(self.unseen_labels, outfile_path)
class PreprocessGuttman(Preprocess):
def __init__(self, args):
super(PreprocessGuttman, self).__init__(args)
# overriding Preprocess attributes
self.data_dir = self.data_root_dir
self.ids_file_path = self.data_dir / f"output.txt"
self.annotation_file_path = self.data_dir / f"output.txt"
self.docs_dir = self.data_dir / f"annotation_Conny_final_npwd.xlsm"
# Guttman specific attributes
self.num_patients = 0
self.num_missing_qualifier_docs = 0
self.id_to_concept = dict()
self.concept_to_id = dict()
self.pt_to_concept_id = dict()
self.present_concepts = Counter()
self.mentioned_absent = Counter()
self.mentioned_unknown_concepts = Counter()
self.missing_qualifier_concepts = Counter()
self.doc_id_to_doc_texts = self._extract_doc_texts()
self.partitions = dict()
self.partitions_labels = dict()
def _extract_doc_texts(self):
"""
:return: dict mapping doc_id to doc text; doc_id == patientID_noteNumber e.g. "5333328_0"
"""
excel_file = | pd.ExcelFile(self.docs_dir) | pandas.ExcelFile |
# Extra evaluation functions for additional ML tasks
import pandas as pd
import numpy as np
import pickle
import os
from pathlib import Path
from cvss import CVSS2
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_curve, auc, matthews_corrcoef
from xgboost import XGBClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from lightgbm import LGBMClassifier
from scipy.sparse import hstack, vstack, coo_matrix
# Settings
time_partition = True # Use time-based folds
# Manual features
man_features = ['STARS', 'COMMITS', 'NS', 'LA', 'LD', 'LT', 'NDEV', 'AGE', 'NUC', 'EXP', 'NRF', 'abstract', 'assert', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class', 'continue', 'const', 'default', 'do', 'double', 'else', 'enum', 'exports', 'extends', 'false', 'final', 'finally', 'float', 'for', 'goto', 'if', 'implements', 'import', 'instanceof', 'int', 'interface', 'long', 'module', 'native', 'new', 'null', 'package', 'private', 'protected', 'public', 'requires', 'return', 'short', 'static', 'strictfp', 'super', 'switch', 'synchronized', 'this', 'throw', 'throws', 'transient', 'true', 'try', 'var', 'void', 'volatile', 'while']
# Classift CVSS score severity
def severity(score):
if 0 <= score < 4: return('LOW')
elif 4 <= score < 7: return('Medium')
elif 7 <= score <= 10: return('High')
else: return 'Eh?'
# Get the optimal classifier from the holdout validation process. Return the hyperparameters
def get_best_classifier(problem, feature_type, feature_scope, token, alg, fold, sampling):
# Load the validation results
if problem in ['multiclass', 'binary']: results = pd.read_csv("ml_results/validate.csv")
elif problem == 'combined': results = pd.read_csv("ml_results/combined_validate.csv")
elif 'cvss' in problem: results = pd.read_csv("ml_results/cvss_validate.csv")
results = results[(results['problem'] == problem) & (results['feature_type'] == feature_type) & (results['feature_scope'] == feature_scope) & (results['token'] == token) & (results['classifier'] == alg) & (results['partition'] == int(fold)) & (results['resampling'] == sampling)].reset_index()
# Check empty
if results.empty:
return False
# Get the best model
test_result = results.iloc[results['mcc'].idxmax()]
return test_result['parameters']
# Get results for the multiclass case using the best configuration for the binary case.
def transfer_folds():
# Open the results file
if not os.path.exists("ml_results/transfer.csv"):
outfile = open("ml_results/transfer.csv", 'w')
outfile.write("problem,partition,feature_type,feature_scope,token,classifier,parameters,accuracy,precision,recall,gmean,f1,mcc,f1_difference,mcc_difference,mc_mcc_variance,mc_f1_variance,binary_mcc_variance,binary_f1_variance\n")
else:
outfile = open("ml_results/transfer.csv", 'a')
# Load the validation results
all_results = pd.read_csv("ml_results/validate.csv")
for feature_type in ['code', 'ast', 'manual']:
for feature_scope in ['hunk', 'ss', 'hc']:
for token in ['<KEY>']:
for alg in ['lr', 'knn', 'svm', 'rf', 'lgbm', 'xgb', 'all']:
if feature_type == 'manual':
feature_scope, token = '-', '-'
if alg != 'all':
results = all_results[(all_results['feature_type'] == feature_type) & (all_results['feature_scope'] == feature_scope) & (all_results['token'] == token) & (all_results['classifier'] == alg) & (all_results['partition'] != 'holdout')]
else:
results = all_results[(all_results['feature_type'] == feature_type) & (all_results['feature_scope'] == feature_scope) & (all_results['token'] == token) & (all_results['partition'] != 'holdout')]
results['partition'] = results['partition'].astype(int) # Average partitions
group_features = ['problem', 'parameters', 'feature_type', 'feature_scope', 'token', 'classifier'] # Features to average the folds by
results = results.groupby(group_features).agg({i: 'mean' for i in results.columns if i not in group_features}).reset_index()
# Get best fold averages
binary_result = results.iloc[results[results['problem'] == 'binary']['mcc'].idxmax()]
mc_result = results.iloc[results[results['problem'] == 'multiclass']['mcc'].idxmax()]
# Get worst fold averages, for variance
binary_result_worst = results.iloc[results[results['problem'] == 'binary']['mcc'].idxmin()]
mc_result_worst = results.iloc[results[results['problem'] == 'multiclass']['mcc'].idxmin()]
# Get transfer result
if alg != 'all':
transfer_result = results[(results['problem'] == 'multiclass') & (results['parameters'] == binary_result['parameters'])]
else:
transfer_result = results[(results['problem'] == 'multiclass') & (results['classifier'] == binary_result['classifier']) & (results['parameters'] == binary_result['parameters'])]
# Calculate transfer difference
transfer_result['f1_diff'] = transfer_result['f1'] - mc_result['f1']
transfer_result['mcc_diff'] = transfer_result['mcc'] - mc_result['mcc']
# Calculate hyperparameter variance
transfer_result['binary_f1_variance'] = binary_result['f1'] - binary_result_worst['f1']
transfer_result['binary_mcc_variance'] = binary_result['mcc'] - binary_result_worst['mcc']
transfer_result['mc_f1_variance'] = mc_result['f1'] - mc_result_worst['f1']
transfer_result['mc_mcc_variance'] = mc_result['mcc'] - mc_result_worst['mcc']
# Fix column names
if alg == 'all': transfer_result['classifier'] = 'all_(' + transfer_result['classifier'] + ')'
transfer_result = transfer_result.iloc[0]
# Re-order and save
transfer_result = transfer_result[['problem', 'partition', 'feature_type', 'feature_scope', 'token', 'classifier', 'parameters', 'accuracy', 'precision', 'recall', 'gmean', 'f1', 'mcc', 'f1_diff', 'mcc_diff', 'mc_mcc_variance', 'mc_f1_variance', 'binary_mcc_variance', 'binary_f1_variance']]
outfile.write(','.join([str(round(x, 4)) if isinstance(x, float) else x for x in transfer_result.tolist()])+'\n')
# Break early for manual
if feature_type == 'manual':
return
# Compare the optimal classifier for each task combination (considering ML, DL and all)
def optimal_classifiers():
all_results = pd.read_csv("ml_results/vcc_results_all.csv")
# Remove 'best' columns
all_results = all_results[[c for c in all_results.columns if 'best' not in c.lower()]]
# Convert to long format
columns = all_results.columns.tolist()
id_cols = ['Task', 'Feature type', 'Feature scope', 'Token']
var_cols = [x for x in columns if x not in id_cols]
all_results = pd.melt(all_results, id_vars=id_cols, value_vars=var_cols, var_name='Classifier', value_name='MCC')
# Only keep real values
all_results = all_results[all_results['MCC'] != '-']
all_results['MCC'] = all_results['MCC'].astype(float)
# Get ML and DL models
ml_classifiers = ['LR', 'KNN', 'SVM', 'RF', 'LGBM', 'XGB']
ml_results = all_results[all_results['Classifier'].isin(ml_classifiers)].reset_index(drop=True)
# dl_classifiers = ['MLP (best)', 'MLP (David)', 'MLP (Triet)', 'Sequential CNN (best)', 'Sequential CNN', 'Sequential RNN (best)', 'Sequential RNN', 'Siamese CNN (best)', 'Siamese CNN', 'Siamese (MLP-Best)', 'Siamese (MLP)']
dl_classifiers = ['MLP (Triet)', 'Sequential CNN', 'Sequential RNN', 'Siamese CNN', 'Siamese (MLP)']
dl_results = all_results[all_results['Classifier'].isin(dl_classifiers)].reset_index(drop=True)
# Group task combinations
ml = ml_results.loc[ml_results.groupby(id_cols)['MCC'].idxmax()]
dl = dl_results.loc[dl_results.groupby(id_cols)['MCC'].idxmax()]
all = all_results.loc[all_results.groupby(id_cols)['MCC'].idxmax()].rename(columns={'Classifier': 'Classifier_all', 'MCC': 'MCC_all'})
merged = pd.merge(ml, dl, how='left', on=id_cols, suffixes=('_ml', '_dl'))
merged = merged.merge(all, how='left', on=id_cols)
# Compare optimal classifiers
merged['ml_same'] = 'No'
merged['dl_same'] = 'No'
merged['all_same'] = 'No'
for index, row in merged.iterrows():
split = 13 if index < len(merged)/2 else -13
if merged.at[index, 'Classifier_ml'] == merged.at[index+split, 'Classifier_ml']: merged.at[index, 'ml_same'] = 'Yes'
if merged.at[index, 'Classifier_dl'] == merged.at[index+split, 'Classifier_dl']: merged.at[index, 'dl_same'] = 'Yes'
if merged.at[index, 'Classifier_all'] == merged.at[index+split, 'Classifier_all']: merged.at[index, 'all_same'] = 'Yes'
merged.to_csv('ml_results/best_classifiers.csv', index=False)
# Get the transfer and variance in performance for MC, DL and ALL classifiers
result_transfer = pd.DataFrame(columns=['Feature type', 'Feature scope', 'Token', 'Classifier', 'ML Max', 'ML Transfer', 'DL Max', 'DL Transfer', 'All Max', 'All Transfer'])
for feature_type in ['Code', 'AST', 'Manual']:
for feature_scope in ['Hunk only', 'Smallest scope', 'Hunk Context']:
for token in ['BoW', 'W2V']:
if feature_type == 'Manual':
feature_scope, token = '-', '-'
# ML
ml_bin = ml_results.loc[ml_results[(ml_results['Task'] == 'Binary') & (ml_results['Feature type'] == feature_type) & (ml_results['Feature scope'] == feature_scope) & (ml_results['Token'] == token)]['MCC'].idxmax()]
ml_max = ml_results.loc[ml_results[(ml_results['Task'] == 'Multiclass') & (ml_results['Feature type'] == feature_type) & (ml_results['Feature scope'] == feature_scope) & (ml_results['Token'] == token)]['MCC'].idxmax()]
ml_transfer = ml_results[(ml_results['Task'] == 'Multiclass') & (ml_results['Feature type'] == ml_bin['Feature type']) & (ml_results['Feature scope'] == ml_bin['Feature scope']) & (ml_results['Token'] == ml_bin['Token']) & (ml_results['Classifier'] == ml_bin['Classifier'])]
# DL
dl_bin = dl_results.loc[dl_results[(dl_results['Task'] == 'Binary') & (dl_results['Feature type'] == feature_type) & (dl_results['Feature scope'] == feature_scope) & (dl_results['Token'] == token)]['MCC'].idxmax()]
dl_max = dl_results.loc[dl_results[(dl_results['Task'] == 'Multiclass') & (dl_results['Feature type'] == feature_type) & (dl_results['Feature scope'] == feature_scope) & (dl_results['Token'] == token)]['MCC'].idxmax()]
dl_transfer = dl_results[(dl_results['Task'] == 'Multiclass') & (dl_results['Feature type'] == dl_bin['Feature type']) & (dl_results['Feature scope'] == dl_bin['Feature scope']) & (dl_results['Token'] == dl_bin['Token']) & (dl_results['Classifier'] == dl_bin['Classifier'])]
# ALL
all_bin = all_results.loc[all_results[(all_results['Task'] == 'Binary') & (all_results['Feature type'] == feature_type) & (all_results['Feature scope'] == feature_scope) & (all_results['Token'] == token)]['MCC'].idxmax()]
all_max = all_results.loc[all_results[(all_results['Task'] == 'Multiclass') & (all_results['Feature type'] == feature_type) & (all_results['Feature scope'] == feature_scope) & (all_results['Token'] == token)]['MCC'].idxmax()]
all_transfer = all_results[(all_results['Task'] == 'Multiclass') & (all_results['Feature type'] == all_bin['Feature type']) & (all_results['Feature scope'] == all_bin['Feature scope']) & (all_results['Token'] == all_bin['Token']) & (all_results['Classifier'] == all_bin['Classifier'])]
# Append
result = [feature_type, feature_scope, token, ml_bin['Classifier'], ml_max['MCC'], ml_transfer['MCC'].values[0], dl_max['MCC'], dl_transfer['MCC'].values[0], all_max['MCC'], all_transfer['MCC'].values[0]]
result_transfer.loc[len(result_transfer)] = result
if feature_type == 'Manual':
break
if feature_type == 'Manual':
break
result_transfer.to_csv('ml_results/transfer_all.csv', index=False)
# Re-evaluate the one-task classifier (cvss and binary vulnerability prediction) as either a binary or multiclass classifier.
def evaluate_one_task(feature_type, feature_scope, token):
# Load the one-task test results
onetask_results = pd.read_csv("ml_results/combined_test.csv")
# Load the normal test results
norm_results = pd.read_csv("ml_results/test.csv")
compare_results = pd.DataFrame(columns=['comparison', 'partition', 'feature_type', 'feature_scope', 'token', 'classifier', 'parameters', 'f1', 'f1_diff', 'mcc', 'mcc_diff'])
# Compare to binary and multiclass scenario
for comparison in ['binary', 'multiclass']:
# Load the commit map
commits = pd.read_csv("binary_map.csv")
# Get labels
if comparison == 'binary':
commits['cwe'] = np.where(commits['cwe']=='-', 0, 1)
for fold in [str(x) for x in range(10)]:
train_commits = commits[commits['partition'] != int(fold)]
test_commits = commits[commits['partition'] == int(fold)]
if comparison == 'multiclass':
train_commits = train_commits[train_commits['cwe'] != '-']
test_commits = test_commits[test_commits['cwe'] != '-']
# Load the inferred features
if feature_type == 'manual':
data = | pd.read_parquet("inferred_features/manual.parquet") | pandas.read_parquet |
from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
import pandas.io.formats.printing as printing
class TestSeriesMisc:
def test_scalarop_preserve_name(self, datetime_series):
result = datetime_series * 2
assert result.name == datetime_series.name
def test_copy_name(self, datetime_series):
result = datetime_series.copy()
assert result.name == datetime_series.name
def test_copy_index_name_checking(self, datetime_series):
# don't want to be able to modify the index stored elsewhere after
# making a copy
datetime_series.index.name = None
assert datetime_series.index.name is None
assert datetime_series is datetime_series
cp = datetime_series.copy()
cp.index.name = "foo"
printing.pprint_thing(datetime_series.index.name)
assert datetime_series.index.name is None
def test_append_preserve_name(self, datetime_series):
result = datetime_series[:5].append(datetime_series[5:])
assert result.name == datetime_series.name
def test_binop_maybe_preserve_name(self, datetime_series):
# names match, preserve
result = datetime_series * datetime_series
assert result.name == datetime_series.name
result = datetime_series.mul(datetime_series)
assert result.name == datetime_series.name
result = datetime_series * datetime_series[:-2]
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "something else"
result = datetime_series + cp
assert result.name is None
result = datetime_series.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = datetime_series.copy()
result = getattr(s, op)(s)
assert result.name == datetime_series.name
# names don't match, don't preserve
cp = datetime_series.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self, datetime_series):
result = datetime_series.combine_first(datetime_series[:5])
assert result.name == datetime_series.name
def test_getitem_preserve_name(self, datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[[0, 2, 4]]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
def test_pickle_datetimes(self, datetime_series):
unp_ts = self._pickle_roundtrip(datetime_series)
tm.assert_series_equal(unp_ts, datetime_series)
def test_pickle_strings(self, string_series):
unp_series = self._pickle_roundtrip(string_series)
tm.assert_series_equal(unp_series, string_series)
def _pickle_roundtrip(self, obj):
with tm.ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
def test_sort_index_name(self, datetime_series):
result = datetime_series.sort_index(ascending=False)
assert result.name == datetime_series.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d)
expected = Series(d, index=sorted(d.keys()))
tm.assert_series_equal(result, expected)
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
expected = Series(dict(data.items()))
tm.assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = Series(data)
expected = Series(list(data.values()), list(data.keys()))
tm.assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = Series(A(data))
tm.assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
tm.assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = Series(d)
expected = Series(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
tm.assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = Series(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
tm.assert_series_equal(result, expected)
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return sorted(set(results))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, sorted(set(ok_for_cat)))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(["a{}".format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
msg = "'Series' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(s_empty)
with pytest.raises(TypeError, match=msg):
hash(s)
def test_contains(self, datetime_series):
tm.assert_contains_all(datetime_series.index, datetime_series)
def test_iter_datetimes(self, datetime_series):
for i, val in enumerate(datetime_series):
assert val == datetime_series[i]
def test_iter_strings(self, string_series):
for i, val in enumerate(string_series):
assert val == string_series[i]
def test_keys(self, datetime_series):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = datetime_series.keys
assert getkeys() is datetime_series.index
def test_values(self, datetime_series):
tm.assert_almost_equal(
datetime_series.values, datetime_series, check_dtype=False
)
def test_iteritems_datetimes(self, datetime_series):
for idx, val in datetime_series.iteritems():
assert val == datetime_series[idx]
def test_iteritems_strings(self, string_series):
for idx, val in string_series.iteritems():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.iteritems(), "reverse")
def test_items_datetimes(self, datetime_series):
for idx, val in datetime_series.items():
assert val == datetime_series[idx]
def test_items_strings(self, string_series):
for idx, val in string_series.items():
assert val == string_series[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(string_series.items(), "reverse")
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
with pytest.raises(AttributeError, match=msg):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype="float64")
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
def test_copy_tzaware(self):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
for deep in [None, False, True]:
s = Series([Timestamp("2012/01/01", tz="UTC")])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected)
else:
# we DID modify the original Series
tm.assert_series_equal(s2, expected2)
tm.assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
tm.assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
assert s.dropna().sum("rows") == 3
assert s._get_axis_number("rows") == 0
assert s._get_axis_name("rows") == "index"
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
def test_numpy_unique(self, datetime_series):
# it works!
np.unique(datetime_series)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(
np.random.randn(1000, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=1000),
)
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
with tm.assert_produces_warning(FutureWarning):
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype="float64")
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
# compress
# GH 6658
s = Series([0, 1.0, -1], index=list("abc"))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=["b"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="object"))
tm.assert_series_equal(result, exp)
s = | Series([0, 1.0, -1], index=[0.1, 0.2, 0.3]) | pandas.Series |
"""
Script for grabbing weather data from DarkSky API dumping to file
"""
import os
import json
import logging
from datetime import datetime, timedelta
import requests
import pandas as pd
LOG = logging.getLogger("homesweetpi.dark_sky_data_grab")
SECRET_KEY = os.getenv("DARKSKYKEY")
LOCATION = "TempelhoferFeld"
LATITUDE = '52.475752' # Tempelhofer Feld
LONGITUDE = '13.407762'
TIMEZONE = 'CET' # Central European Time
PATH = os.path.join(os.path.expanduser("~"),
"spiced", "data", "homesweetpi", "weather_data")
def get_weather_data(time_string):
'''
Query the DarkSky API for data for a given time_string
Returns a json
'''
url_stem = 'https://api.darksky.net/forecast/{}/{},{},{}?units=si'
url = url_stem.format(SECRET_KEY, LATITUDE, LONGITUDE, time_string)
LOG.debug("requesting data for url %s", url)
response = requests.get(url)
return response.json()
def dump_to_json(json_obj, time_string, location=LOCATION, path=PATH,
filename_template="DarkSky_{}_{}.json"):
"""
Dump a json object (json_obj) to file
"""
filename = filename_template.format(location, time_string.replace(':', ''))
filename = os.path.join(path, filename)
LOG.debug("dumping json for time_string %s", time_string)
with open(filename, 'w') as filepath:
json.dump(json_obj, filepath)
def convert_to_df(weather_data, level='hourly', timezone=TIMEZONE):
"""
Convert a json from the DarkSky API to a DataFrame using the specified time
level
Return a Pandas DataFrame
"""
LOG.debug("converting %s data to dataframe", level)
weather_data = pd.DataFrame(weather_data[level]['data'])
datetime_series = pd.to_datetime(weather_data['time'], unit='s')
weather_data['time'] = datetime_series.dt.tz_localize('UTC')\
.dt.tz_convert(timezone)
return weather_data
def dump_jsons_for_date_range(date_range,
location=LOCATION,
path=PATH,
filename_template="DarkSky_{}_{}.json"):
"""
Fetch data for a range of times and dump jsons to file
"""
time_strings = [dt.strftime('%Y-%m-%dT%H:%M:%S')
for dt in date_range]
for time_t in time_strings:
LOG.debug("requesting data for time_string %s", time_t)
weather_data = get_weather_data(time_t)
dump_to_json(weather_data, time_t, location=location, path=path,
filename_template=filename_template)
def get_dfs_for_date_range(date_range, path=PATH, level='hourly',
dump_json=True, location=LOCATION):
"""
Fetch data for a range of times and add to a pandas dataframe
"""
time_strings = [dt.strftime('%Y-%m-%dT%H:%M:%S')
for dt in date_range]
list_of_dfs = []
for time_t in time_strings:
LOG.debug("requesting data for time_string %s", time_t)
weather_data = get_weather_data(time_t)
if dump_json:
dump_to_json(weather_data, time_t, location=location, path=path,
filename_template="DarkSky_{}_{}.json")
df_t = convert_to_df(weather_data, level=level, timezone=TIMEZONE)
list_of_dfs.append(df_t)
concatenated_data = | pd.concat(list_of_dfs, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Keyframe Detector
Input: LFA-CSV
Output: KFD-CSV
Original file is located at
https://colab.research.google.com/drive/1K4faoIdDHiNNFTZhrdlGjdyT100H-az8
# Key Frame Detection
from CSV LFA 2021 to CSV Training Dataset
LFA has min,max
"""
import argparse
import logging
import sys
import pandas as pd
import numpy as np
from mlr import util, profile_box
from lfa import __version__
__author__ = "<NAME>"
__copyright__ = "Chayapol Moemeng"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def prepare_filenames(csvFilename):
import os
# csvFilename = "D:/GoogleDrive-VMS/Research/lip-reading/datasets/angsawee/avi/v01.lfa.csv"
filename, file_extension = os.path.splitext(csvFilename)
# print(f"Name:\t{filename}\nExt:\t{file_extension}")
csvFilename1kf = f"{filename}.1kf.csv"
csvFilename3kf = f"{filename}.3kf.csv"
return (csvFilename1kf, csvFilename3kf)
def appendColName(df, postfix):
columns = {}
# print(list(df.columns))
for c in list(df.columns):
columns[c] = f"{c}{postfix}"
# print(columns)
return df.rename(columns=columns)
def compute_keyframes(viseme, csvFilename):
import pandas as pd
import numpy as np
"""
Compute keyframe 1 and 3
and then export to 1kf.CSV and 3kf.CSV
"""
csvFilename1kf, csvFilename3kf = prepare_filenames(csvFilename)
df = pd.read_csv(csvFilename)
df["viseme"] = viseme
# df.columns
# df[['frame#','sum','min','max','teeth_LAB','teeth_LUV']]
# df[['frame#','sum','min','max','teeth_LAB','teeth_LUV']]
df2 = df[['frame#', 'sum', 'min', 'max', 'teeth_LAB', 'teeth_LUV']]
df2[['sum', 'min', 'max', 'teeth_LAB', 'teeth_LUV']].plot()
df3 = df2.loc[df2['max'].notnull() | df2['min'].notnull()]
"""# Cleaning Up
Assume that min and max values do not appear in the same row.
"""
# Method 1: 3-Keyframe Data
# df3 contains non-null min and max
minFound = 0
maxFound = 0
maxR = minR = None
L = []
for index, row in df3.iterrows():
# if row['max'] != float('nan'):
if not np.isnan(row['max']):
print('1---', row['max'], type(row['max']), row['max']
!= float('nan'), np.isnan(row['max']))
maxFound += 1
minFound = 0
maxR = row
if maxFound == 1 and minFound == 0:
if minR is not None:
L.append(minR)
print('minR', minR['min'], minR['max'])
elif minFound == 0 and maxR['max'] < row['max']:
maxR = row
# if row['min'] != float('nan'):
if not np.isnan(row['min']):
print('2---', row['min'], type(row['min']), row['min']
!= float('nan'), np.isnan(row['min']))
minFound += 1
maxFound = 0
minR = row
if minFound == 1 and maxFound == 0:
if maxR is not None:
L.append(maxR)
print('maxR', maxR['min'], maxR['max'])
elif maxFound == 0 and minR['min'] > row['min']:
minR = row
df5 = pd.DataFrame(L)
# df5[['min','max']]
# df5.columns
# df5
"""## Use frame# to select midpoint between min-max and max-min."""
L = []
ticker = 0
firstRow = True
for index, row in df5.iterrows():
fno = int(row['frame#'])
if firstRow:
firstRow = False
# print(row)
if np.isnan(row['min']): # there could be chance that the first row is not min, skip it
print(f"Skip first row {fno}")
continue
# print(f"{fno} ticker={ticker}")
if ticker == 0:
# print(row)
if np.isnan(row['min']):
raise Exception("Assertion error: expect min")
minfno1 = fno
if ticker == 1:
if np.isnan(row['max']):
raise Exception("Assertion error: expect max")
maxfno = fno
midfno1 = int((minfno1 + maxfno) / 2)
L.append(midfno1)
L.append(maxfno)
# print(midfno1,maxfno)
if ticker == 2:
if np.isnan(row['min']):
raise Exception("Assertion error: expect min")
minfno1 = fno
minfno2 = fno
midfno2 = int((minfno2 + maxfno) / 2)
L.append(midfno2)
# print(midfno2)
ticker = 0
ticker += 1
# L
# print(L[0:3])
# print(L[3:6])
# print(L[6:9])
f1 = df[df['frame#'] == 30].drop(['Unnamed: 0'], axis=1).reset_index()
f2 = df[df['frame#'] == 38].drop(['Unnamed: 0'], axis=1).reset_index()
f3 = df[df['frame#'] == 44].drop(['Unnamed: 0'], axis=1).reset_index()
f1 = appendColName(f1, 'a')
f2 = appendColName(f2, 'b')
f3 = appendColName(f3, 'c')
# print(f1)
f = pd.concat([f1, f2, f3], axis=1)
print(len(L))
samples = int(len(L)/3)
L3 = []
for i in range(samples):
fnos = L[i*3:i*3+3]
# print(fnos)
f1 = df[df['frame#'] == fnos[0]].drop(['Unnamed: 0'], axis=1).reset_index()
f2 = df[df['frame#'] == fnos[1]].drop(['Unnamed: 0'], axis=1).reset_index()
f3 = df[df['frame#'] == fnos[2]].drop(['Unnamed: 0'], axis=1).reset_index()
f1 = appendColName(f1, 'a')
f2 = appendColName(f2, 'b')
f3 = appendColName(f3, 'c')
f = | pd.concat([f1, f2, f3], axis=1) | pandas.concat |
from sklearn.utils import shuffle
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import re
import numpy as np
import pandas as pd
from contextlib import redirect_stdout
import pickle
import os
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
import matplotlib.pyplot as plt
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
# Filter warning messages
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# Download list of NLTK Dutch stopwords
stop_words = stopwords.words('Dutch')
# Add list of new stopwords to the NLTK stopwords
with open('../results/output/dutch_stopwords/stopwords_new.txt') as f:
content = f.readlines()
# Remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
new_stopword_list = content + stop_words
def lda_document_topic_distribution():
""" Document Topic Distribution
Using LDA, this function distinguish 10 topic category from the text
and calculates document-topic matrix.
"""
# Load and shuffle the dataset
df = | pd.read_csv(r'..\data\processed\motivation_liwc_meta_pos_lang.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from helper import Helper
class Statistics():
#### Statistical functions
def get_globalStats(self, data, column):
'''
calculate global statistical numbers
return: mean (float), median (float), standard deviation (float)
'''
mean = data[column].mean()
median = data[column].median()
std = data[column].std()
return round(mean, 4), round(median, 4), round(std, 4)
def get_sma(self, data, n):
'''
calculates the simple moving average of the stock price
return: dataframe
'''
sma = data.rolling(window=n).mean()
return pd.DataFrame(sma)
def get_rstd(self, data, n):
'''
calculates the rolling standard deviation
return: dataframe
'''
rstd = data.rolling(window=n).std()
return pd.DataFrame(rstd)
def get_BollingerBand(self, sma, rstd):
'''
calculates the bollinger bandwith
return: series upper & series lower
'''
# upper bound
upper_band = sma + (rstd * 2)
# lower bound
lower_band = sma - (rstd * 2)
return upper_band, lower_band
def get_DailyReturn(self, data):
'''
calculates the daily return of the stockprice in percent
return: dataframe
'''
# p_today / p_yesterday - 1
d_ret = round((data[1:] / data[:-1].values) - 1, 4)
return pd.DataFrame(d_ret) # .bfill(axis = 0,inplace=True)
def get_CumulativeReturn(self, data):
'''
calculates the price development since the beginning
of the records
'''
# p_today / p_begin -1
d_retcom = round((data / data.iloc[0]) - 1, 4)
return pd.DataFrame(d_retcom)
def get_Tickerprice(self, price, low, high):
'''
calculates the ticker price
return: Dataframe
'''
ticker = (price + low + high) / 3
return | pd.DataFrame(ticker) | pandas.DataFrame |
"""
__author__ = <NAME>
"""
from unittest import TestCase
from pysight.nd_hist_generator.line_signal_validators.validation_tools import *
import pandas as pd
import numpy as np
class TestFrame(TestCase):
"""
Tests for the validation functions
"""
dict_of_data = dict(
PMT1=pd.DataFrame([1, 10, 20, 30], columns=["abs_time"]),
Lines=pd.DataFrame([0, 5, 10, 15, 20, 25, 30, 35], columns=["abs_time"]),
)
vlad = SignalValidator(dict_of_data)
def test_last_event_only_pmt(self):
dict_of_data = {"PMT1": pd.DataFrame([1, 5, 3], columns=["abs_time"])}
last = SignalValidator(dict_of_data)._SignalValidator__calc_last_event_time()
self.assertEqual(last, 5)
def test_last_event_two_pmts(self):
dict_of_data = {
"PMT1": pd.DataFrame([1, 5, 3], columns=["abs_time"]),
"PMT2": pd.DataFrame([1, 2, 3], columns=["abs_time"]),
}
last = SignalValidator(dict_of_data)._SignalValidator__calc_last_event_time()
self.assertEqual(last, 5)
def test_last_event_with_frames(self):
frame_data = pd.DataFrame([0, 100], columns=["abs_time"])
dict_of_data = {
"PMT1": pd.DataFrame([1, 2, 3], columns=["abs_time"]),
"Frames": frame_data,
"Lines": [1, 2, 3],
}
last = SignalValidator(dict_of_data)._SignalValidator__calc_last_event_time()
self.assertEqual(last, 200)
def test_last_event_with_single_frame(self):
frame_data = pd.DataFrame([100], columns=["abs_time"])
dict_of_data = {
"PMT1": pd.DataFrame([1, 2, 3], columns=["abs_time"]),
"Frames": frame_data,
"Lines": [1, 2, 3],
}
last = SignalValidator(dict_of_data)._SignalValidator__calc_last_event_time()
self.assertEqual(last, 200)
def test_last_event_with_lines_less_than_needed_single_frame(self):
line_data = pd.DataFrame([0, 10, 20], columns=["abs_time"])
dict_of_data = {
"PMT1": pd.DataFrame([1, 2, 3], columns=["abs_time"]),
"Lines": line_data,
}
last = SignalValidator(
dict_of_data, num_of_lines=5
)._SignalValidator__calc_last_event_time()
self.assertEqual(last, 50)
def test_last_event_with_lines_less_than_needed_more_frames(self):
line_data = | pd.DataFrame([0, 10, 20, 30, 40], columns=["abs_time"]) | pandas.DataFrame |
from datetime import datetime
import httpx
import pandas as pd
def fetch_data_from_cna():
url = "https://infographics.channelnewsasia.com/covid-19/sgsitrepv2.json"
resp = httpx.get(url)
data = resp.json()["values"]
df = pd.DataFrame(data=data[1:], columns=list(map(str.strip, data[0])))
part1 = df["Date"][1:276].map(
lambda date: datetime.strptime(date + " 2020", "%b %d %Y").date()
)
part2 = df["Date"][276:].map(
lambda date: datetime.strptime(date + " 2021", "%b %d %Y").date()
)
df = df.assign(Date=pd.to_datetime( | pd.concat([part1, part2]) | pandas.concat |
# Adapted from https://github.com/mirnylab/cooler
import simplejson as json
import six
import os
import re
from contextlib import contextmanager
from pandas.api.types import is_integer_dtype
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
import h5py
# The 4DN data portal and hic2cool store these weight vectors in divisive form
_4DN_DIVISIVE_WEIGHTS = {"KR", "VC", "VC_SQRT"}
@contextmanager
def open_hdf5(fp, mode="r", *args, **kwargs):
"""
Context manager like ``h5py.File`` but accepts already open HDF5 file
handles which do not get closed on teardown.
Parameters
----------
fp : str or ``h5py.File`` object
If an open file object is provided, it passes through unchanged,
provided that the requested mode is compatible.
If a filepath is passed, the context manager will close the file on
tear down.
mode : str
* r Readonly, file must exist
* r+ Read/write, file must exist
* a Read/write if exists, create otherwise
* w Truncate if exists, create otherwise
* w- or x Fail if exists, create otherwise
"""
if isinstance(fp, six.string_types):
own_fh = True
fh = h5py.File(fp, mode, *args, **kwargs)
else:
own_fh = False
if mode == "r" and fp.file.mode == "r+":
# warnings.warn("File object provided is writeable but intent is read-only")
pass
elif mode in ("r+", "a") and fp.file.mode == "r":
raise ValueError("File object provided is not writeable")
elif mode == "w":
raise ValueError("Cannot truncate open file")
elif mode in ("w-", "x"):
raise ValueError("File exists")
fh = fp
try:
yield fh
finally:
if own_fh:
fh.close()
class closing_hdf5(h5py.Group):
def __init__(self, grp):
super(closing_hdf5, self).__init__(grp.id)
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.file.close()
def close(self):
self.file.close()
class TreeNode(object):
def __init__(self, obj, depth=0, level=None):
self.obj = obj
self.depth = depth
self.level = level
def get_type(self):
return type(self.obj).__name__
def get_children(self):
if hasattr(self.obj, "values"):
if self.level is None or self.depth < self.level:
depth = self.depth + 1
children = self.obj.values()
return [
self.__class__(o, depth=depth, level=self.level) for o in children
]
return []
def get_text(self):
name = self.obj.name.split("/")[-1] or "/"
if hasattr(self.obj, "shape"):
name += " {} {}".format(self.obj.shape, self.obj.dtype)
return name
MAGIC = u"HDF5::Cooler"
URL = u"https://github.com/mirnylab/cooler"
def _is_cooler(grp):
fmt = grp.attrs.get("format", None)
url = grp.attrs.get("format-url", None)
if fmt == MAGIC or url == URL:
keys = ("chroms", "bins", "pixels", "indexes")
if not all(name in grp.keys() for name in keys):
print("Cooler path {} appears to be corrupt".format(grp.name))
return True
return False
def visititems(group, func, level=None):
"""Like :py:method:`h5py.Group.visititems`, but much faster somehow.
"""
def _visititems(node, func, result=None):
children = node.get_children()
if children:
for child in children:
result[child.obj.name] = func(child.obj.name, child.obj)
_visititems(child, func, result)
return result
root = TreeNode(group, level=level)
return _visititems(root, func, {})
def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def list_coolers(filepath):
"""
List group paths to all cooler data collections in a file.
Parameters
----------
filepath : str
Returns
-------
list
Cooler group paths in the file.
"""
if not h5py.is_hdf5(filepath):
raise OSError("'{}' is not an HDF5 file.".format(filepath))
listing = []
def _check_cooler(pth, grp):
if _is_cooler(grp):
listing.append("/" + pth if not pth.startswith("/") else pth)
with h5py.File(filepath, "r") as f:
_check_cooler("/", f)
visititems(f, _check_cooler)
return natsorted(listing)
def parse_cooler_uri(s):
"""
Parse a Cooler URI string
e.g. /path/to/mycoolers.cool::/path/to/cooler
"""
parts = s.split("::")
if len(parts) == 1:
file_path, group_path = parts[0], "/"
elif len(parts) == 2:
file_path, group_path = parts
if not group_path.startswith("/"):
group_path = "/" + group_path
else:
raise ValueError("Invalid Cooler URI string")
return file_path, group_path
def parse_humanized(s):
_NUMERIC_RE = re.compile("([0-9,.]+)")
_, value, unit = _NUMERIC_RE.split(s.replace(",", ""))
if not len(unit):
return int(value)
value = float(value)
unit = unit.upper().strip()
if unit in ("K", "KB"):
value *= 1000
elif unit in ("M", "MB"):
value *= 1000000
elif unit in ("G", "GB"):
value *= 1000000000
else:
raise ValueError("Unknown unit '{}'".format(unit))
return int(value)
def parse_region_string(s):
"""
Parse a UCSC-style genomic region string into a triple.
Parameters
----------
s : str
UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA
style sequence names are allowed. End coordinate must be greater than
or equal to start.
Returns
-------
(str, int or None, int or None)
"""
def _tokenize(s):
token_spec = [
("HYPHEN", r"-"),
("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"),
("OTHER", r".+"),
]
tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec)
tok_regex = re.compile(tok_regex, re.IGNORECASE)
for match in tok_regex.finditer(s):
typ = match.lastgroup
yield typ, match.group(typ)
def _check_token(typ, token, expected):
if typ is None:
raise ValueError("Expected {} token missing".format(" or ".join(expected)))
else:
if typ not in expected:
raise ValueError('Unexpected token "{}"'.format(token))
def _expect(tokens):
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["COORD"])
start = parse_humanized(token)
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["HYPHEN"])
typ, token = next(tokens, (None, None))
if typ is None:
return start, None
_check_token(typ, token, ["COORD"])
end = parse_humanized(token)
if end < start:
raise ValueError("End coordinate less than start")
return start, end
parts = s.split(":")
chrom = parts[0].strip()
if not len(chrom):
raise ValueError("Chromosome name cannot be empty")
if len(parts) < 2:
return (chrom, None, None)
start, end = _expect(_tokenize(parts[1]))
return (chrom, start, end)
def parse_region(reg, chromsizes=None):
"""
Genomic regions are represented as half-open intervals (0-based starts,
1-based ends) along the length coordinate of a contig/scaffold/chromosome.
Parameters
----------
reg : str or tuple
UCSC-style genomic region string, or
Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``.
chromsizes : mapping, optional
Lookup table of scaffold lengths to check against ``chrom`` and the
``end`` coordinate. Required if ``end`` is not supplied.
Returns
-------
A well-formed genomic region triple (str, int, int)
"""
if isinstance(reg, six.string_types):
chrom, start, end = parse_region_string(reg)
else:
chrom, start, end = reg
start = int(start) if start is not None else start
end = int(end) if end is not None else end
try:
clen = chromsizes[chrom] if chromsizes is not None else None
except KeyError:
raise ValueError("Unknown sequence label: {}".format(chrom))
start = 0 if start is None else start
if end is None:
if clen is None: # TODO --- remove?
raise ValueError("Cannot determine end coordinate.")
end = clen
if end < start:
raise ValueError("End cannot be less than start")
if start < 0 or (clen is not None and end > clen):
raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end))
return chrom, start, end
class Cooler(object):
"""
A convenient interface to a cooler data collection.
Parameters
----------
store : str, :py:class:`h5py.File` or :py:class:`h5py.Group`
Path to a cooler file, URI string, or open handle to the root HDF5
group of a cooler data collection.
root : str, optional [deprecated]
HDF5 Group path to root of cooler group if ``store`` is a file.
This option is deprecated. Instead, use a URI string of the form
:file:`<file_path>::<group_path>`.
kwargs : optional
Options to be passed to :py:class:`h5py.File()` upon every access.
By default, the file is opened with the default driver and mode='r'.
Notes
-----
If ``store`` is a file path, the file will be opened temporarily in
when performing operations. This allows :py:class:`Cooler` objects to be
serialized for multiprocess and distributed computations.
Metadata is accessible as a dictionary through the :py:attr:`info`
property.
Table selectors, created using :py:meth:`chroms`, :py:meth:`bins`, and
:py:meth:`pixels`, perform range queries over table rows,
returning :py:class:`pd.DataFrame` and :py:class:`pd.Series`.
A matrix selector, created using :py:meth:`matrix`, performs 2D matrix
range queries, returning :py:class:`numpy.ndarray` or
:py:class:`scipy.sparse.coo_matrix`.
"""
def __init__(self, store, root=None, **kwargs):
if isinstance(store, six.string_types):
if root is None:
self.filename, self.root = parse_cooler_uri(store)
elif h5py.is_hdf5(store):
with open_hdf5(store, **kwargs) as h5:
self.filename = h5.file.filename
self.root = root
else:
raise ValueError("Not a valid path to a Cooler file")
self.uri = self.filename + "::" + self.root
self.store = self.filename
self.open_kws = kwargs
else:
# Assume an open HDF5 handle, ignore open_kws
self.filename = store.file.filename
self.root = store.name
self.uri = self.filename + "::" + self.root
self.store = store.file
self.open_kws = {}
self._refresh()
def _refresh(self):
try:
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
_ct = chroms(grp)
_ct["name"] = _ct["name"].astype(object)
self._chromsizes = _ct.set_index("name")["length"]
self._chromids = dict(zip(_ct["name"], range(len(_ct))))
self._info = info(grp)
mode = self._info.get("storage-mode", u"symmetric-upper")
self._is_symm_upper = mode == u"symmetric-upper"
except KeyError:
err_msg = "No cooler found at: {}.".format(self.store)
listing = list_coolers(self.store)
if len(listing):
err_msg += (
" Coolers found in {}. ".format(listing)
+ "Use '::' to specify a group path"
)
raise KeyError(err_msg)
def _load_dset(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return grp[path][:]
def _load_attrs(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return dict(grp[path].attrs)
def open(self, mode="r", **kwargs):
""" Open the HDF5 group containing the Cooler with :py:mod:`h5py`
Functions as a context manager. Any ``open_kws`` passed during
construction are ignored.
Parameters
----------
mode : str, optional [default: 'r']
* ``'r'`` (readonly)
* ``'r+'`` or ``'a'`` (read/write)
Notes
-----
For other parameters, see :py:class:`h5py.File`.
"""
grp = h5py.File(self.filename, mode, **kwargs)[self.root]
return closing_hdf5(grp)
@property
def storage_mode(self):
"""Indicates whether ordinary sparse matrix encoding is used
(``"square"``) or whether a symmetric matrix is encoded by storing only
the upper triangular elements (``"symmetric-upper"``).
"""
return self._info.get("storage-mode", u"symmetric-upper")
@property
def binsize(self):
""" Resolution in base pairs if uniform else None """
return self._info["bin-size"]
@property
def chromsizes(self):
""" Ordered mapping of reference sequences to their lengths in bp """
return self._chromsizes
@property
def chromnames(self):
""" List of reference sequence names """
return list(self._chromsizes.index)
def offset(self, region):
""" Bin ID containing the left end of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
int
Examples
--------
# >>> c.offset('chr3') # doctest: +SKIP
1311
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_offset(
grp, self._chromids, parse_region(region, self._chromsizes)
)
def extent(self, region):
""" Bin IDs containing the left and right ends of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
2-tuple of ints
Examples
--------
# >>> c.extent('chr3') # doctest: +SKIP
(1311, 2131)
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
@property
def info(self):
""" File information and metadata
Returns
-------
dict
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return info(grp)
@property
def shape(self):
return (self._info["nbins"],) * 2
def chroms(self, **kwargs):
""" Chromosome table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return chroms(grp, lo, hi, fields, **kwargs)
return RangeSelector1D(None, _slice, None, self._info["nchroms"])
def bins(self, **kwargs):
""" Bin table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return bins(grp, lo, hi, fields, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
return RangeSelector1D(None, _slice, _fetch, self._info["nbins"])
def pixels(self, join=False, **kwargs):
""" Pixel table selector
Parameters
----------
join : bool, optional
Whether to expand bin ID columns into chrom, start, and end
columns. Default is ``False``.
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return pixels(grp, lo, hi, fields, join, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
i0, i1 = region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
lo = grp["indexes"]["bin1_offset"][i0]
hi = grp["indexes"]["bin1_offset"][i1]
return lo, hi
return RangeSelector1D(None, _slice, _fetch, self._info["nnz"])
def matrix(
self,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=False,
ignore_index=True,
divisive_weights=None,
max_chunk=500000000,
):
""" Contact matrix selector
Parameters
----------
field : str, optional
Which column of the pixel table to fill the matrix with. By
default, the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing
the desired balancing weights. Set to False to return untransformed
counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID
columns into (chrom, start, end). Has no effect when requesting a
rectangular matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the
pixel IDs to improve performance. Default is True.
divisive_weights : bool, optional
Force balancing weights to be interpreted as divisive (True) or
multiplicative (False). Weights are always assumed to be
multiplicative by default unless named KR, VC or SQRT_VC, in which
case they are assumed to be divisive by default.
Returns
-------
Matrix selector
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If
``as_pixels=False``, those missing non-zero elements will
automatically be filled in.
"""
if balance in _4DN_DIVISIVE_WEIGHTS and divisive_weights is None:
divisive_weights = True
def _slice(field, i0, i1, j0, j1):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return matrix(
grp,
i0,
i1,
j0,
j1,
field,
balance,
sparse,
as_pixels,
join,
ignore_index,
divisive_weights,
max_chunk,
self._is_symm_upper,
)
def _fetch(region, region2=None):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
if region2 is None:
region2 = region
region1 = parse_region(region, self._chromsizes)
region2 = parse_region(region2, self._chromsizes)
i0, i1 = region_to_extent(grp, self._chromids, region1)
j0, j1 = region_to_extent(grp, self._chromids, region2)
return i0, i1, j0, j1
return RangeSelector2D(field, _slice, _fetch, (self._info["nbins"],) * 2)
def __repr__(self):
if isinstance(self.store, six.string_types):
filename = os.path.basename(self.store)
container = "{}::{}".format(filename, self.root)
else:
container = repr(self.store)
return '<Cooler "{}">'.format(container)
def _region_to_extent(h5, chrom_ids, region, binsize):
chrom, start, end = region
cid = chrom_ids[chrom]
if binsize is not None:
chrom_offset = h5["indexes"]["chrom_offset"][cid]
yield chrom_offset + int(np.floor(start / binsize))
yield chrom_offset + int(np.ceil(end / binsize))
else:
chrom_lo = h5["indexes"]["chrom_offset"][cid]
chrom_hi = h5["indexes"]["chrom_offset"][cid + 1]
chrom_bins = h5["bins"]["start"][chrom_lo:chrom_hi]
yield chrom_lo + np.searchsorted(chrom_bins, start, "right") - 1
yield chrom_lo + np.searchsorted(chrom_bins, end, "left")
def region_to_offset(h5, chrom_ids, region, binsize=None):
return next(_region_to_extent(h5, chrom_ids, region, binsize))
def region_to_extent(h5, chrom_ids, region, binsize=None):
return tuple(_region_to_extent(h5, chrom_ids, region, binsize))
def get(grp, lo=0, hi=None, fields=None, convert_enum=True, as_dict=False):
"""
Query a range of rows from a table as a dataframe.
A table is an HDF5 group containing equal-length 1D datasets serving as
columns.
Parameters
----------
grp : ``h5py.Group`` or any dict-like of array-likes
Handle to an HDF5 group containing only 1D datasets or any similar
collection of 1D datasets or arrays
lo, hi : int, optional
Range of rows to select from the table.
fields : str or sequence of str, optional
Column or list of columns to query. Defaults to all available columns.
A single string returns a Series instead of a DataFrame.
convert_enum : bool, optional
Whether to convert HDF5 enum datasets into ``pandas.Categorical``
columns instead of plain integer columns. Default is True.
kwargs : optional
Options to pass to ``pandas.DataFrame`` or ``pandas.Series``.
Returns
-------
DataFrame or Series
Notes
-----
HDF5 ASCII datasets are converted to Unicode.
"""
series = False
if fields is None:
fields = list(grp.keys())
elif isinstance(fields, six.string_types):
fields = [fields]
series = True
data = {}
for field in fields:
dset = grp[field]
if convert_enum:
dt = h5py.check_dtype(enum=dset.dtype)
else:
dt = None
if dt is not None:
data[field] = pd.Categorical.from_codes(
dset[lo:hi], sorted(dt, key=dt.__getitem__), ordered=True
)
elif dset.dtype.type == np.string_:
data[field] = dset[lo:hi].astype("U")
else:
data[field] = dset[lo:hi]
if as_dict:
return data
if data and lo is not None:
index = np.arange(lo, lo + len(next(iter(data.values()))))
else:
index = None
if series:
return pd.Series(data[fields[0]], index=index, name=field)
else:
return pd.DataFrame(data, columns=fields, index=index)
def info(h5):
"""
File and user metadata dict.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
Returns
-------
dict
"""
d = {}
for k, v in h5.attrs.items():
if isinstance(v, six.string_types):
try:
v = json.loads(v)
except ValueError:
pass
d[k] = v
return d
def chroms(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the chromosomes/scaffolds/contigs used.
They appear in the same order they occur in the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["name", "length"])
.append(pd.Index(h5["chroms"].keys()))
.drop_duplicates()
)
return get(h5["chroms"], lo, hi, fields, **kwargs)
def bins(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the genomic bins that make up the axes of the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["chrom", "start", "end"])
.append(pd.Index(h5["bins"].keys()))
.drop_duplicates()
)
# If convert_enum is not explicitly set to False, chrom IDs will get
# converted to categorical chromosome names, provided the ENUM header
# exists in bins/chrom. Otherwise, they will return as integers.
out = get(h5["bins"], lo, hi, fields, **kwargs)
# Handle the case where the ENUM header doesn't exist but we want to
# convert integer chrom IDs to categorical chromosome names.
if "chrom" in fields:
convert_enum = kwargs.get("convert_enum", True)
if isinstance(fields, six.string_types):
chrom_col = out
else:
chrom_col = out["chrom"]
if is_integer_dtype(chrom_col.dtype) and convert_enum:
chromnames = chroms(h5, fields="name")
chrom_col = pd.Categorical.from_codes(chrom_col, chromnames, ordered=True)
if isinstance(fields, six.string_types):
out = | pd.Series(chrom_col, out.index) | pandas.Series |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
def discriminated_antis(all_antis):
try:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv')
except:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv', encoding='gbk')
def isanti(x):
df_抗菌药物['药品'] = x.抗菌药物
df1 = df_抗菌药物[df_抗菌药物['规则等级']==1]
if x.抗菌药物 in list(df1['匹配规则'].values):
return df1[df1['匹配规则']==x.抗菌药物].reset_index(drop=True).loc[0]['抗菌药物通用名']
else:
df2 = df_抗菌药物[df_抗菌药物['规则等级']==2]
df2['是否匹配'] = df2.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df2['匹配长度'] = df2.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df2[~df2['是否匹配'].isnull()].shape[0]==0:
df3 = df_抗菌药物[df_抗菌药物['规则等级']==3]
df3['是否匹配'] = df3.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df3['匹配长度'] = df3.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df3[~df3['是否匹配'].isnull()].shape[0]==0:
df4 = df_抗菌药物[df_抗菌药物['规则等级']==4]
df4['是否匹配'] = df4.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df4['匹配长度'] = df4.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df4[~df4['是否匹配'].isnull()].shape[0]==0:
return np.nan
else:
return df4[~df4['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df3[~df3['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df2[~df2['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
all_antis['抗菌药物通用名'] = all_antis.apply(isanti, axis=1)
return all_antis
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'给药': "select '给药' as 业务类型 ,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where BEGINTIME is not null group by substr(BEGINTIME,1,7)",
'菌检出': " select '菌检出' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
'药敏': " select '药敏' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
print('抗菌药物-菌检出-药敏一级图一',bus)
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('anti_bar_drug_first_level_first_fig','figure'),
Output('anti_bar_drug_first_level_first_fig_data','data'),
Input('anti_bar_drug_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(anti_bar_drug_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if anti_bar_drug_first_level_first_fig_data is None:
anti_bar_drug_first_level_first_fig_data = {}
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data['btime'] = btime
anti_bar_drug_first_level_first_fig_data['etime'] = etime
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig_data = json.loads(anti_bar_drug_first_level_first_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_first_fig_data['hosname']:
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig = pd.read_json(anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'], orient='split')
anti_bar_drug_first_level_first_fig_data = dash.no_update
#
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig[(anti_bar_drug_first_level_first_fig['month']>=btime) & (anti_bar_drug_first_level_first_fig['month']<=etime)]
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(anti_bar_drug_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,anti_bar_drug_first_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取抗菌药物-菌检出-药敏一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '科室', '科室名称', 'num'])
bus_dic = {'8种耐药菌检出': f""" select '8种耐药菌检出' as 业务类型, t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
"限制级特殊级抗菌药物使用" : f"""select '限制级特殊级抗菌药物使用' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from ANTIBIOTICS where ALEVEL in ('限制类', '特殊类')
and substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
'药敏结果为耐药': f""" select '药敏结果为耐药' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from DRUGSUSCEPTIBILITY where SUSCEPTIBILITY like '%耐药%'
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
"""
}
for bus in bus_dic:
temp = pd.read_sql(bus_dic[bus],con=engine)
temp = temp[0:8]
res_数据关键字缺失及汇总 = res_数据关键字缺失及汇总.append(temp)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('anti_bar_drug_first_level_second_fig','figure'),
Output('anti_bar_drug_first_level_second_fig_data','data'),
# Output('rank_month_choice','min'),
# Output('rank_month_choice','max'),
# Output('rank_month_choice','value'),
# Output('rank_month_choice','marks'),
Input('anti_bar_drug_first_level_second_fig_data','data'),
# Input('rank_month_choice','value'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# Input('rank_month_choice','marks'),
# prevent_initial_call=True
)
# def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,rank_month_choice,db_con_url,count_time,marks):
def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,db_con_url,count_time):
# def unixTimeMillis(dt):
# return int(time.mktime(dt.timetuple()))
#
# def unixToDatetime(unix):
# return pd.to_datetime(unix, unit='s')
#
# def getMarks(start, end, Nth=100):
# result = {}
# for i, date in enumerate(daterange):
# result[unixTimeMillis(date)] = str(date.strftime('%Y-%m'))
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
min = dash.no_update
max = dash.no_update
value = dash.no_update
marks = dash.no_update
if anti_bar_drug_first_level_second_fig_data is None:
anti_bar_drug_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime+'-01', periods=((end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# marks = getMarks(daterange.min(), daterange.max())
else:
anti_bar_drug_first_level_second_fig_data = json.loads(anti_bar_drug_first_level_second_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps( anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime + '-01', periods=( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# print(value)
# marks = getMarks(daterange.min(), daterange.max())
else:
if anti_bar_drug_first_level_second_fig_data['btime'] != btime or anti_bar_drug_first_level_second_fig_data['etime'] != etime:
# if rank_month_choice is not None and len(rank_month_choice)>0:
# print(rank_month_choice)
# btime1 = time.gmtime(rank_month_choice[0])
# etime1 = time.gmtime(rank_month_choice[1])
# btime = f"{btime1.tm_year}-0{btime1.tm_mon}" if btime1.tm_mon<10 else f"{btime1.tm_year}-{btime1.tm_mon}"
# etime = f"{etime1.tm_year}-0{etime1.tm_mon}" if etime1.tm_mon<10 else f"{etime1.tm_year}-{etime1.tm_mon}"
# print(btime,etime)
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
anti_bar_drug_first_level_second_fig_data = dash.no_update
# print("一级第二张图数据:")
# print(rank_month_choice)
# print(marks)
bar = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='8种耐药菌检出']
anti = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='限制级特殊级抗菌药物使用']
drug = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='药敏结果为耐药']
bar = bar.sort_values(['num'], ascending=True)
anti = anti.sort_values(['num'], ascending=True)
drug = drug.sort_values(['num'], ascending=True)
fig = make_subplots(rows=1,cols=3)
fig.add_trace(
go.Bar(x=anti['num'], y=anti['科室名称'], orientation='h', name='给药', marker_color=px.colors.qualitative.Dark24[0]),
row=1, col=1
)
fig.add_trace(
go.Bar(x=drug['num'], y=drug['科室名称'], orientation='h', name='药敏',
marker_color=px.colors.qualitative.Dark24[1]),
row=1, col=2,
)
fig.add_trace(
go.Bar(x=bar['num'],y=bar['科室名称'],orientation='h',name='菌检出', marker_color=px.colors.qualitative.Dark24[2]),
row=1,col=3
)
# 设置水平图例及位置
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
return fig,anti_bar_drug_first_level_second_fig_data
# return fig,anti_bar_drug_first_level_second_fig_data,min ,max ,value ,marks
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
bus_dic = {'用药目的': f" select '用药目的缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'药物等级': f" select '药物等级缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'医嘱开始时间大于结束时间': f" select '医嘱开始时间大于结束时间' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime group by substr(BEGINTIME,1,7) ",
'医嘱时间在出入院时间之外' : f""" select '医嘱时间在出入院时间之外' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
group by substr(BEGINTIME,1,7)
""",
}
for bus in bus_dic:
res_数据科室信息缺失及汇总 = res_数据科室信息缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据科室信息缺失及汇总
# 更新二级图一
@app.callback(
Output('anti_second_level_first_fig','figure'),
Output('anti_second_level_first_fig_data','data'),
Input('anti_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(anti_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_first_fig_data is None:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data={}
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split', date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig_data = json.loads(anti_second_level_first_fig_data)
if db_con_url['hosname'] != anti_second_level_first_fig_data['hosname']:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
if anti_second_level_first_fig_data['btime'] != btime or anti_second_level_first_fig_data['etime'] != etime:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig = pd.read_json(anti_second_level_first_fig_data['anti_second_level_first_fig'], orient='split')
anti_second_level_first_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
bus_opts = anti_second_level_first_fig[['业务类型']].drop_duplicates().reset_index(drop=True)
# res_数据科室信息缺失及汇总 = anti_second_level_first_fig.sort_values(['month','业务类型'])
print(anti_second_level_first_fig)
for tem,bus in bus_opts.iterrows():
print(tem,)
print(bus,)
temp = anti_second_level_first_fig[anti_second_level_first_fig['业务类型']==bus['业务类型']]
print(temp)
temp = temp.sort_values(['month'])
if temp.shape[0]>0:
fig_概览一级_科室映射缺失.add_trace(
go.Scatter(x=temp['month'], y=temp['num'], name=bus['业务类型'] ,marker_color=px.colors.qualitative.Dark24[tem] )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="问题数量")
fig_概览一级_科室映射缺失.update_xaxes(title_text="月份")
return fig_概览一级_科室映射缺失,anti_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('anti_second_level_first_fig_date_detail', 'data'),
Input('anti_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'用药目的缺失': f" select * from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') ",
'药物等级缺失': f" select t1.* from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
'医嘱开始时间大于结束时间': f" select t1.* from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime ",
'医嘱时间在出入院时间之外': f""" select t1.* from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7)
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}抗菌药物问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第二张图数据
def get_second_level_second_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(f" select ANAME as 抗菌药物,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' group by substr(BEGINTIME,1,7),ANAME ",con=engine)
return res_业务逻辑问题数据汇总
# 更新二级图
@app.callback(
Output('anti_second_level_second_fig','figure'),
Output('anti_second_level_second_fig_data','data'),
Input('anti_second_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(anti_second_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_second_fig_data is None:
anti_second_level_second_fig_data = {}
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split', date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig_data = json.loads(anti_second_level_second_fig_data)
if db_con_url['hosname'] != anti_second_level_second_fig_data['hosname']:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
if anti_second_level_second_fig_data['btime'] != btime or anti_second_level_second_fig_data['etime'] != etime:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig = pd.read_json(anti_second_level_second_fig_data['anti_second_level_second_fig'], orient='split')
anti_second_level_second_fig_data = dash.no_update
antis_dict = discriminated_antis(anti_second_level_second_fig[['抗菌药物']].drop_duplicates())
anti_second_level_second_fig = anti_second_level_second_fig.merge(antis_dict,on='抗菌药物',how='left')
anti_second_level_second_fig['抗菌药物通用名'] = np.where(anti_second_level_second_fig['抗菌药物通用名'].isnull(),anti_second_level_second_fig['抗菌药物'],anti_second_level_second_fig['抗菌药物通用名'])
anti_second_level_second_fig = anti_second_level_second_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_second_fig, x="月份", y="num", color='抗菌药物通用名' ,color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_second_fig_data
# ----------------------------------------------------------------------------------------------------- 二级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第三张图数据
def get_second_level_third_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(
f" select ALEVEL as 抗菌药物等级,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ALEVEL is not null group by substr(BEGINTIME,1,7),ALEVEL ",
con=engine)
return res_业务逻辑问题数据汇总
# 三级第一张图更新
@app.callback(
Output('anti_second_level_third_fig','figure'),
Output('anti_second_level_third_fig_data', 'data'),
Input('anti_second_level_third_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(anti_second_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_third_fig_data is None:
anti_second_level_third_fig_data = {}
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json( orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig_data = json.loads(anti_second_level_third_fig_data)
if db_con_url['hosname'] != anti_second_level_third_fig_data['hosname']:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
if anti_second_level_third_fig_data['btime'] != btime or anti_second_level_third_fig_data['etime'] != etime:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig = pd.read_json( anti_second_level_third_fig_data['anti_second_level_third_fig'], orient='split')
anti_second_level_third_fig_data = dash.no_update
anti_second_level_third_fig = anti_second_level_third_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_third_fig, x="月份", y="num", color='抗菌药物等级', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_third_fig_data
# # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取菌检出三级第一张图数据
def get_third_level_first_fig_date(engine,btime,etime):
res = pd.read_sql(f"""select substr(REQUESTTIME,1,7) as month,BACTERIA as 菌,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}'
group by BACTERIA, substr(REQUESTTIME,1,7)
""",con=engine)
return res
# 三级第一张图更新
@app.callback(
Output('bar_third_level_first_fig', 'figure'),
Output('bar_third_level_first_fig_data', 'data'),
Input('bar_third_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(bar_third_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if bar_third_level_first_fig_data is None:
bar_third_level_first_fig_data = {}
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json( orient='split', date_format='iso')
bar_third_level_first_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
bar_third_level_first_fig_data = json.loads(bar_third_level_first_fig_data)
if db_con_url['hosname'] != bar_third_level_first_fig_data['hosname']:
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json(orient='split', date_format='iso')
bar_third_level_first_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
if bar_third_level_first_fig_data['btime'] != btime or bar_third_level_first_fig_data['etime'] != etime:
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json(orient='split', date_format='iso')
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
bar_third_level_first_fig = pd.read_json( bar_third_level_first_fig_data['bar_third_level_first_fig'], orient='split')
bar_third_level_first_fig_data = dash.no_update
bar_third_level_first_fig = bar_third_level_first_fig.sort_values(['month' ])
print(bar_third_level_first_fig)
fig1 = px.line(bar_third_level_first_fig, x='month', y= 'num' , color= '菌', color_discrete_sequence=px.colors.qualitative.Dark24)
fig1.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig1.update_yaxes(title_text= '菌检出数量', )
fig1.update_xaxes(title_text= '月份', )
return fig1,bar_third_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 三级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取菌检出三级第二张图数据
def get_third_level_second_fig_date(engine,btime,etime):
res_信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month'])
bus_dic = {
'菌检出类型': f" select '菌检出类型缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and BTYPE is null group by substr(REQUESTTIME,1,7) ",
'院内外': f" select '院内外标识缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and OUTSIDE is null group by substr(REQUESTTIME,1,7) ",
'标本缺失': f" select '标本缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SPECIMEN is null group by substr(REQUESTTIME,1,7) ",
'检验项目': f" select '检验项目缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SUBJECT is null group by substr(REQUESTTIME,1,7) ",
'申请时间大于报告时间': f" select '菌检出申请时间大于报告时间' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>REPORTTIME group by substr(REQUESTTIME,1,7) ",
'申请时间在出入院时间之外': f""" select '菌检出申请时间在出入院时间之外' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_信息缺失及汇总 = res_信息缺失及汇总.append(pd.read_sql(bus_dic[bus], con=engine))
return res_信息缺失及汇总
# 三级第二张图更新
@app.callback(
Output('bar_third_level_second_fig', 'figure'),
Output('bar_third_level_second_fig_data', 'data'),
Input('bar_third_level_second_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_second_fig(bar_third_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if bar_third_level_second_fig_data is None:
bar_third_level_second_fig_data = {}
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json( orient='split', date_format='iso')
bar_third_level_second_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
bar_third_level_second_fig_data = json.loads(bar_third_level_second_fig_data)
if db_con_url['hosname'] != bar_third_level_second_fig_data['hosname']:
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json(orient='split', date_format='iso')
bar_third_level_second_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
if bar_third_level_second_fig_data['btime'] != btime or bar_third_level_second_fig_data['etime'] != etime:
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json(orient='split', date_format='iso')
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
bar_third_level_second_fig = pd.read_json( bar_third_level_second_fig_data['bar_third_level_second_fig'], orient='split')
bar_third_level_second_fig_data = dash.no_update
bar_third_level_second_fig = bar_third_level_second_fig.sort_values(['month' ])
fig1 = px.line(bar_third_level_second_fig, x='month', y= 'num' , color= '业务类型', color_discrete_sequence=px.colors.qualitative.Dark24)
fig1.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig1.update_yaxes(title_text= '数据缺失数量', )
fig1.update_xaxes(title_text= '月份', )
return fig1,bar_third_level_second_fig_data
# 下载三级图二明细
@app.callback(
Output('bar_third_level_second_fig_data_detail', 'data'),
Input('bar_third_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'菌检出类型缺失': f" select * from BACTERIA where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and BTYPE is null ",
'院内外标识缺失': f" select t1.* from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and OUTSIDE is null ",
'标本缺失': f" select t1.* from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SPECIMEN is null ",
'检验项目缺失': f" select t1.* from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SUBJECT is null ",
'申请时间大于报告时间': f" select t1.* from BACTERIA t1 where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>REPORTTIME",
'申请时间在出入院时间之外': f""" select t1.* from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}菌检出问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 四级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取药敏四级第一张图数据
def get_fourth_level_first_fig_date(engine,btime,etime):
res_信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month'])
bus_dic = {
'药敏结果': f" select '药敏结果缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and SUSCEPTIBILITY is null group by substr(REQUESTTIME,1,7) ",
'标本缺失': f" select '标本缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SPECIMEN is null group by substr(REQUESTTIME,1,7) ",
'申请时间大于报告时间': f" select '药敏申请时间大于报告时间' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY t1 where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>REPORTTIME group by substr(REQUESTTIME,1,7) ",
'申请时间在出入院时间之外': f""" select '药敏申请时间在出入院时间之外' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_信息缺失及汇总 = res_信息缺失及汇总.append(pd.read_sql(bus_dic[bus], con=engine))
return res_信息缺失及汇总
# 四级第一张图更新
@app.callback(
Output('drug_fourth_level_first_fig', 'figure'),
Output('drug_fourth_level_first_fig_data', 'data'),
Input('drug_fourth_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_second_fig(drug_fourth_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if drug_fourth_level_first_fig_data is None:
drug_fourth_level_first_fig_data = {}
drug_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_first_fig_data['drug_fourth_level_first_fig'] = drug_fourth_level_first_fig.to_json( orient='split', date_format='iso')
drug_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
drug_fourth_level_first_fig_data['btime'] = btime
drug_fourth_level_first_fig_data['etime'] = etime
drug_fourth_level_first_fig_data = json.dumps(drug_fourth_level_first_fig_data)
else:
drug_fourth_level_first_fig_data = json.loads(drug_fourth_level_first_fig_data)
if db_con_url['hosname'] != drug_fourth_level_first_fig_data['hosname']:
drug_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_first_fig_data['drug_fourth_level_first_fig'] = drug_fourth_level_first_fig.to_json(orient='split', date_format='iso')
drug_fourth_level_first_fig_data['hosname'] = db_con_url['hosname']
drug_fourth_level_first_fig_data['btime'] = btime
drug_fourth_level_first_fig_data['etime'] = etime
drug_fourth_level_first_fig_data = json.dumps(drug_fourth_level_first_fig_data)
else:
if drug_fourth_level_first_fig_data['btime'] != btime or drug_fourth_level_first_fig_data['etime'] != etime:
drug_fourth_level_first_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_first_fig_data['drug_fourth_level_first_fig'] = drug_fourth_level_first_fig.to_json(orient='split', date_format='iso')
drug_fourth_level_first_fig_data['btime'] = btime
drug_fourth_level_first_fig_data['etime'] = etime
drug_fourth_level_first_fig_data = json.dumps(drug_fourth_level_first_fig_data)
else:
drug_fourth_level_first_fig = pd.read_json( drug_fourth_level_first_fig_data['drug_fourth_level_first_fig'], orient='split')
drug_fourth_level_first_fig_data = dash.no_update
drug_fourth_level_first_fig = drug_fourth_level_first_fig.sort_values(['month' ])
fig1 = px.line(drug_fourth_level_first_fig, x='month', y= 'num' , color= '业务类型', color_discrete_sequence=px.colors.qualitative.Dark24)
fig1.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig1.update_yaxes(title_text= '数据缺失数量', )
fig1.update_xaxes(title_text= '月份', )
return fig1,drug_fourth_level_first_fig_data
# 下载四级图一明细
@app.callback(
Output('drug_fourth_level_first_fig_data_detail', 'data'),
Input('drug_fourth_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'药敏结果缺失': f" select * from DRUGSUSCEPTIBILITY where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and SUSCEPTIBILITY is null ",
'标本缺失': f" select t1.* from DRUGSUSCEPTIBILITY t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SPECIMEN is null ",
'申请时间大于报告时间': f" select t1.* from DRUGSUSCEPTIBILITY t1 where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>REPORTTIME ",
'申请时间在出入院时间之外': f""" select t1.* from DRUGSUSCEPTIBILITY t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}药敏问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 四级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取药敏四级第二张图数据
def get_fourth_level_second_fig_date(engine,btime,etime):
res = pd.read_sql(f"""select count(1) as num,substr(REQUESTTIME,1,7) as month from (
select * from bacteria where (caseid,testno) not in (select caseid,testno from drugsusceptibility) and bacteria !='无菌' and bacteria not like '%酵母%' and bacteria not like '%念珠%' and bacteria not like '%真菌%'
) t1 where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{btime}' group by substr(REQUESTTIME,1,7)
""",con=engine)
return res
# 四级第二张图更新
@app.callback(
Output('drug_fourth_level_second_fig', 'figure'),
Output('drug_fourth_level_second_fig_data', 'data'),
Input('drug_fourth_level_second_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_second_fig(drug_fourth_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if drug_fourth_level_second_fig_data is None:
drug_fourth_level_second_fig_data = {}
drug_fourth_level_second_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_second_fig_data['drug_fourth_level_second_fig'] = drug_fourth_level_second_fig.to_json( orient='split', date_format='iso')
drug_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
drug_fourth_level_second_fig_data['btime'] = btime
drug_fourth_level_second_fig_data['etime'] = etime
drug_fourth_level_second_fig_data = json.dumps(drug_fourth_level_second_fig_data)
else:
drug_fourth_level_second_fig_data = json.loads(drug_fourth_level_second_fig_data)
if db_con_url['hosname'] != drug_fourth_level_second_fig_data['hosname']:
drug_fourth_level_second_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_second_fig_data['drug_fourth_level_second_fig'] = drug_fourth_level_second_fig.to_json(orient='split', date_format='iso')
drug_fourth_level_second_fig_data['hosname'] = db_con_url['hosname']
drug_fourth_level_second_fig_data['btime'] = btime
drug_fourth_level_second_fig_data['etime'] = etime
drug_fourth_level_second_fig_data = json.dumps(drug_fourth_level_second_fig_data)
else:
if drug_fourth_level_second_fig_data['btime'] != btime or drug_fourth_level_second_fig_data['etime'] != etime:
drug_fourth_level_second_fig = get_fourth_level_first_fig_date(engine, btime, etime)
drug_fourth_level_second_fig_data['drug_fourth_level_second_fig'] = drug_fourth_level_second_fig.to_json(orient='split', date_format='iso')
drug_fourth_level_second_fig_data['btime'] = btime
drug_fourth_level_second_fig_data['etime'] = etime
drug_fourth_level_second_fig_data = json.dumps(drug_fourth_level_second_fig_data)
else:
drug_fourth_level_second_fig = pd.read_json( drug_fourth_level_second_fig_data['drug_fourth_level_second_fig'], orient='split')
drug_fourth_level_second_fig_data = dash.no_update
drug_fourth_level_second_fig = drug_fourth_level_second_fig.sort_values(['month'])
fig1 = px.line(drug_fourth_level_second_fig, x='month', y= 'num' , color_discrete_sequence=px.colors.qualitative.Dark24)
fig1.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig1.update_yaxes(title_text= '有菌检出无药敏数据量', )
fig1.update_xaxes(title_text= '月份', )
return fig1,drug_fourth_level_second_fig_data
# 下载四级图二明细
@app.callback(
Output('drug_fourth_level_second_fig_data_detail', 'data'),
Input('drug_fourth_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'有菌检出结果无药敏结果数据': f""" select t1.* from (
select * from bacteria where (caseid,testno) not in (select caseid,testno from drugsusceptibility) and bacteria !='无菌' and bacteria not like '%酵母%' and bacteria not like '%念珠%' and bacteria not like '%真菌%'
) t1 where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}'
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}有菌检出结果无药敏结果数据明细.xlsx')
else:
return dash.no_update
#
# # ----------------------------------------------------------------------------------------------------- 全部下载 ----------------------------------------------------------------------------------------------------------------------
# 页面数据统计结果下载
@app.callback(
Output("down-anti-bar-drug", "data"),
Input("anti-all-count-data-down", "n_clicks"),
Input("anti_bar_drug_first_level_first_fig_data", "data"),
Input("anti_bar_drug_first_level_second_fig_data", "data"),
Input("anti_second_level_first_fig_data", "data"),
Input("anti_second_level_second_fig_data", "data"),
Input("anti_second_level_third_fig_data", "data"),
Input("bar_third_level_first_fig_data", "data"),
Input("bar_third_level_second_fig_data", "data"),
Input("drug_fourth_level_first_fig_data", "data"),
Input("drug_fourth_level_second_fig_data", "data"),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def get_all_count_data(n_clicks, anti_bar_drug_first_level_first_fig_data,
anti_bar_drug_first_level_second_fig_data,
anti_second_level_first_fig_data,
anti_second_level_second_fig_data,
anti_second_level_third_fig_data,
bar_third_level_first_fig_data,
bar_third_level_second_fig_data,
drug_fourth_level_first_fig_data,
drug_fourth_level_second_fig_data,
db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
hosName = db_con_url['hosname']
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
now_time = str(datetime.now())[0:19].replace(' ', '_').replace(':', '_')
if anti_bar_drug_first_level_first_fig_data is not None and anti_bar_drug_first_level_second_fig_data is not None and anti_second_level_first_fig_data is not None and \
anti_second_level_second_fig_data is not None and anti_second_level_third_fig_data is not None and bar_third_level_first_fig_data is not None and \
bar_third_level_second_fig_data is not None and drug_fourth_level_first_fig_data is not None and drug_fourth_level_second_fig_data is not None :
anti_bar_drug_first_level_first_fig_data = json.loads(anti_bar_drug_first_level_first_fig_data )
anti_bar_drug_first_level_second_fig_data = json.loads(anti_bar_drug_first_level_second_fig_data )
anti_second_level_first_fig_data = json.loads(anti_second_level_first_fig_data )
anti_second_level_second_fig_data = json.loads(anti_second_level_second_fig_data )
anti_second_level_third_fig_data = json.loads(anti_second_level_third_fig_data )
bar_third_level_first_fig_data = json.loads(bar_third_level_first_fig_data )
bar_third_level_second_fig_data = json.loads(bar_third_level_second_fig_data )
drug_fourth_level_first_fig_data = json.loads(drug_fourth_level_first_fig_data )
drug_fourth_level_second_fig_data = json.loads(drug_fourth_level_second_fig_data )
if anti_bar_drug_first_level_first_fig_data['hosname'] == hosName and anti_bar_drug_first_level_first_fig_data['btime'] == btime and anti_bar_drug_first_level_first_fig_data['etime'] == etime and \
anti_bar_drug_first_level_second_fig_data['hosname'] == hosName and anti_bar_drug_first_level_second_fig_data['btime'] == btime and anti_bar_drug_first_level_second_fig_data['etime'] == etime and \
anti_second_level_first_fig_data['hosname'] == hosName and anti_second_level_first_fig_data['btime'] == btime and anti_second_level_first_fig_data['etime'] == etime and \
anti_second_level_second_fig_data['hosname'] == hosName and anti_second_level_second_fig_data['btime'] == btime and anti_second_level_second_fig_data['etime'] == etime and \
anti_second_level_third_fig_data['hosname'] == hosName and anti_second_level_third_fig_data['btime'] == btime and anti_second_level_third_fig_data['etime'] == etime and \
bar_third_level_first_fig_data['hosname'] == hosName and bar_third_level_first_fig_data['btime'] == btime and bar_third_level_first_fig_data['etime'] == etime and \
bar_third_level_second_fig_data['hosname'] == hosName and bar_third_level_second_fig_data['btime'] == btime and bar_third_level_second_fig_data['etime'] == etime and \
drug_fourth_level_first_fig_data['hosname'] == hosName and drug_fourth_level_first_fig_data['btime'] == btime and drug_fourth_level_first_fig_data['etime'] == etime and \
drug_fourth_level_second_fig_data['hosname'] == hosName and drug_fourth_level_second_fig_data['btime'] == btime and drug_fourth_level_second_fig_data['etime'] == etime and \
anti_second_level_second_fig_data['hosname'] == hosName and anti_second_level_second_fig_data['btime'] == btime and anti_second_level_second_fig_data['etime'] == etime :
anti_bar_drug_first_level_first_fig = pd.read_json(anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'], orient='split')
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig[(anti_bar_drug_first_level_first_fig['month'] >= btime) & (anti_bar_drug_first_level_first_fig['month'] <= etime)]
anti_bar_drug_first_level_second_fig = | pd.read_json( anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'], orient='split') | pandas.read_json |
import pandas as pd
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from scipy import stats
import scikit_posthocs as sp
from decimal import Decimal
from math import log10, floor
pd.options.mode.chained_assignment = None # default='warn'
class ExperimentResult(object):
#inputs
df_x: pd.DataFrame
df_eff_in = [pd.DataFrame()] * 4 #change this number based on number of plates
df_vero_in = [pd.DataFrame()] * 4
# temporary storage
df_combined_ctrl: pd.DataFrame
df_combined_ctrl_2: pd.DataFrame
df_temp = [pd.DataFrame()] * 4
# output
df_eff: pd.DataFrame
df_vero: pd.DataFrame
df_all: pd.DataFrame
df_tab = [pd.DataFrame()] * 7
def __init__(self, file_name):
xls = | pd.ExcelFile(file_name) | pandas.ExcelFile |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 18:03:13 2017
@author: lfiorito
"""
import pdb
import os
import logging
from collections import Counter
from functools import reduce
import numpy as np
import pandas as pd
from sandy.formats.records import read_cont
from sandy.formats import (mf1,
mf3,
mf4,
mf5,
mf8,
mf33,
mf34,
mf35,
)
from sandy.formats.utils import (
Xs,
Lpc,
Fy,
XsCov,
EdistrCov,
LpcCov,
triu_matrix,
corr2cov,
)
from sandy.settings import SandyError
from sandy.functions import find_nearest
__author__ = "<NAME>"
__all__ = ["Endf6", "Errorr", "Gendf"]
#def split_endf(text):
# """
# Read ENDF-6 formatted file and split it into columns based on field widths:
# C1 C2 L1 L2 N1 N2 MAT MF MT
# 11 11 11 11 11 11 4 2 3.
# Store list in dataframe.
# """
# from io import StringIO
# def read_float(x):
# try:
# return float(x[0] + x[1:].replace('+', 'E+').replace('-', 'E-'))
# except:
# return x
# widths = [11,11,11,11,11,11,4,2,3]
# columns = ["C1", "C2", "L1", "L2", "N1", "N2","MAT", "MF", "MT"]
# converters = dict(zip(columns[:6],[read_float]*6))
# frame = pd.read_fwf(StringIO(text), widths=widths, names=columns, converters=converters)
# return frame.query("MAT>0 & MF>0 & MT>0")
#
#
class _BaseFile(pd.DataFrame):
"""This class is to be inherited by all classes that parse and analyze
nuclear data evaluated files in ENDF-6 or derived (ERRORR) formats.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Attributes
----------
labels : `list` of `str`
index labels MAT, MT and MT
Methods
-------
add_sections
Collapse two tapes into a single one
delete_sections
Delete sections from the dataframe
filter_by
Filter dataframe based on MAT, MF, MT lists
from_file
Create dataframe by reading a endf6 file
from_text
Create dataframe from endf6 text in string
Raises
------
`SandyError`
if the tape is empty
`SandyError`
if the same combination MAT/MF/MT is found more than once
"""
labels = ['MAT', 'MF', 'MT']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.empty:
raise SandyError("tape is empty")
self.index.names = self.labels
self.columns = ["TEXT"]
self.sort_index(level=self.labels, inplace=True)
if self.index.duplicated().any():
raise SandyError("found duplicate MAT/MF/MT")
@classmethod
def from_file(cls, file):
"""Create dataframe by reading a file.
Parameters
----------
file : `str`
file name
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
with open(file) as f:
text = f.read()
return cls.from_text(text)
@classmethod
def from_text(cls, text):
"""Create dataframe from endf6 text in string.
Parameters
----------
text : `str`
string containing the evaluated data
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Dataframe containing ENDF6 data grouped by MAT/MF/MT
"""
from io import StringIO
tape = pd.read_fwf(
StringIO(text),
widths = [66, 4, 2, 3],
names = ["TEXT", "MAT", "MF", "MT"],
converters = {"MAT" : np.int, "MF" : np.int, "MT" : np.int},
usecols = cls.labels
)
tape["TEXT"] = text.splitlines(True)
tape = tape.loc[(tape.MAT>0) & (tape.MF>0) & (tape.MT>0)]. \
groupby(cls.labels). \
apply(lambda x: "".join(x.TEXT.values)). \
to_frame()
return cls(tape)
def add_sections(self, tape):
"""Collapse two tapes into a single one.
If MAT/MF/MT index is present in both tapes, take it from the second.
Parameters
----------
tape : `sandy.formats.endf6.BaseFile` or derived instance
dataframe for ENDF-6 formatted file
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe with merged content
"""
outdf = pd.concat([pd.DataFrame(self), tape]). \
reset_index(). \
drop_duplicates(self.labels, keep='last'). \
set_index(self.labels)
return self.__class__(outdf)
def delete_sections(self, *tuples):
"""Given a sequence of tuples (MAT,MF,MT), delete the corresponding sections
from the dataframe.
Parameters
----------
tuples : sequence of `tuple`
each tuple should have the format (MAT, MF, MT)
To delete, say, a given MF independentently from the MAT and MT, assign `None`
to the MAT and MT position in the tuple.
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
dataframe without given sections
"""
queries = []
for mat,mf,mt in tuples:
conditions = []
if mat is not None:
conditions.append("MAT == {}".format(mat))
if mf is not None:
conditions.append("MF == {}".format(mf))
if mt is not None:
conditions.append("MT == {}".format(mt))
if not conditions:
continue
queries.append("not (" + " & ".join(conditions) + ")")
if not queries:
logging.warn("given MAT/MF/MT sections were not found")
return self
else:
query = " & ".join(queries)
newdf = self.query(query)
return self.__class__(newdf)
def filter_by(self, listmat=None, listmf=None, listmt=None):
"""Filter dataframe based on MAT, MF, MT lists.
Parameters
----------
listmat : `list` or `None`
list of requested MAT values (default is `None`: use all MAT)
listmf : `list` or `None`
list of requested MF values (default is `None`: use all MF)
listmt : `list` or `None`
list of requested MT values (default is `None`: use all MT)
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Copy of the original instance with filtered MAT, MF and MT sections
"""
_listmat = range(1,10000) if listmat is None else listmat
_listmf = range(1,10000) if listmf is None else listmf
_listmt = range(1,10000) if listmt is None else listmt
cond_mat = self.index.get_level_values("MAT").isin(_listmat)
cond_mf = self.index.get_level_values("MF").isin(_listmf)
cond_mt = self.index.get_level_values("MT").isin(_listmt)
df = self.loc[cond_mat & cond_mf & cond_mt]
return self.__class__(df)
@property
def mat(self):
return sorted(self.index.get_level_values("MAT").unique())
@property
def mf(self):
return sorted(self.index.get_level_values("MF").unique())
@property
def mt(self):
return sorted(self.index.get_level_values("MT").unique())
def get_file_format(self):
"""Determine ENDF-6 format type by reading flags "NLIB" and "LRP" of first MAT in file:
* `NLIB = -11 | NLIB = -12` : errorr
* `NLIB = -1` : gendf
* `LRP = 2` : pendf
* `LRP != 2` : endf6
Returns
-------
`str`
type of ENDF-6 format
"""
lines = self.TEXT.loc[self.mat[0], 1, 451].splitlines()
C, i = read_cont(lines, 0)
if C.N1 == -11 or C.N1 == -12:
ftype = "errorr"
elif C.N1 == -1:
ftype = "gendf"
else:
if C.L1 == 2:
ftype = "pendf"
else:
ftype = "endf6"
return ftype
class Endf6(_BaseFile):
"""Class to contain the content of ENDF-6 files, grouped by MAT/MF/MT.
**Index**:
- MAT : (`int`) MAT number to identify the isotope
- MF : (`int`) MF number to identify the data type
- MT : (`int`) MT number to identify the reaction
**Columns**:
- TEXT : (`string`) MAT/MF/MT section reported as a single string
Methods
-------
"""
def get_nsub(self):
"""Determine ENDF-6 sub-library type by reading flag "NSUB" of first MAT in file:
* `NSUB = 10` : Incident-Neutron Data
* `NSUB = 11` : Neutron-Induced Fission Product Yields
Returns
-------
`int`
NSUB value
"""
return self.read_section(self.mat[0], 1, 451)["NSUB"]
def read_section(self, mat, mf, mt):
"""Parse MAT/MF/MT section.
"""
if mf == 1:
foo = mf1.read
elif mf == 3:
foo = mf3.read
elif mf == 4:
foo = mf4.read
elif mf == 5:
foo = mf5.read
elif mf == 8:
foo = mf8.read
elif mf == 33 or mf == 31:
foo = mf33.read
elif mf == 34:
foo = mf34.read
elif mf == 35:
foo = mf35.read
else:
raise SandyError("SANDY cannot parse section MAT{}/MF{}/MT{}".format(mat,mf,mt))
if (mat,mf,mt) not in self.index:
raise SandyError("section MAT{}/MF{}/MT{} is not in tape".format(mat,mf,mt))
return foo(self.loc[mat,mf,mt].TEXT)
def write_string(self, title=" "*66, skip_title=False, skip_fend=False):
"""Collect all rows in `Endf6` and write them into string.
Parameters
----------
title : `str`
title of the file
skip_title : `bool`
do not write the title
skip_fend : `bool`
do not write the last FEND line
Returns
-------
`str`
"""
from .records import write_cont
tape = self.copy()
string = ""
if not skip_title:
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(title, 1, 0, 0, 0)
for mat,dfmat in tape.groupby('MAT', sort=True):
for mf,dfmf in dfmat.groupby('MF', sort=True):
for mt,dfmt in dfmf.groupby('MT', sort=True):
for text in dfmt.TEXT:
string += text.encode('ascii', 'replace').decode('ascii')
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), int(mf), 0, 99999)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), int(mat), 0, 0, 0)
string += "{:<66}{:4}{:2}{:3}{:5}\n".format(*write_cont(*[0]*6), 0, 0, 0, 0)
if not skip_fend:
string += "{:<66}{:4}{:2}{:3}{:5}".format(*write_cont(*[0]*6), -1, 0, 0, 0)
return string
def get_xs(self, listmat=None, listmt=None):
""" Extract selected cross sections (xs).
xs are linearized on unique grid.
Missing points are linearly interpolated (use zero when out of domain).
Conditions:
- Interpolation law must be lin-lin
- No duplicate points on energy grid
"""
condition = self.index.get_level_values("MF") == 3
tape = self[condition]
if listmat is not None:
conditions = [tape.index.get_level_values("MAT") == x for x in listmat]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
if listmt is not None:
conditions = [tape.index.get_level_values("MT") == x for x in listmt]
condition = reduce(lambda x,y: np.logical_or(x, y), conditions)
tape = tape[condition]
ListXs = []
for ix,text in tape.TEXT.iteritems():
X = self.read_section(*ix)
xs = | pd.Series(X["XS"], index=X["E"], name=(X["MAT"],X["MT"])) | pandas.Series |
import pandas as pd
class Evaluation(object):
def __init__(self, trueResultFileDir, preResultFileDir):
self.dfTrueResult = pd.DataFrame()
self.dfPreResult = pd.DataFrame()
self.acc = 0
# preprocess scholar_final_truth.txt, 共有5367个scholars
with open(trueResultFileDir, 'r', encoding = 'utf-8') as f:
trueResultList = list(f.read().split('\n')[:-1])
# 提取最终true数据中task2的部分
flag = 0
trueData = []
for item in trueResultList:
if item == '<task2>':
flag = 1
if item == '</task2>':
break
if flag == 1:
trueData.append(item)
trueDataList = trueData[2:]
# 将true数据中task2的部分存为dataframe
self.dfTrueResult = | pd.DataFrame(columns=['author', 'Interest1', 'Interest2', 'Interest3']) | pandas.DataFrame |
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete_{i}.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = | pd.concat([wham, z],axis=1) | pandas.concat |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = pd.concat([p_balance, net])
# Calculate percentages of level 0
balance_df['%'] = 100.0 * balance_df.div(balance_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = balance_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = balance_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
balance_df = balance_df.combine_first(l1_totals)
balance_df = balance_df.combine_first(l0_totals)
# Update columns with period
balance_df.columns = pd.MultiIndex.from_product([[p], balance_df.columns])
# Add to main list
balance_sheets.append(balance_df)
# Concatenate all the periods together
balance_sheets_df = pd.concat(balance_sheets, 1)
return balance_sheets_df
def calc_income(paychecks=None, transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily income of grouped revenue/expenses/taxes based on `category_dict`s from `paychecks` and `transactions`,
returns a DataFrame.
Income Statement is split into these sections:
Revenue
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Expenses
Operating
Medical
...
Non-Operating
...
Taxes
Operating
Federal
State
...
All levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
'Revenue': {
'Operating': {
# Paychecks
'Technical Services': {
'source': 'paycheck', # Optional string to select data source, defaults to 'transactions'
'categories': {'Paycheck', ...}, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '', # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Expenses': {
'Operating': {...},
'Non-Operating': {..}
},
'Taxes': {
'Operating': {...},
'Non-Operating': {..}
}
```
"""
# Clean category
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('source'):
category_dict[k0][k1][k2]['source'] = 'transactions'
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('agg'):
category_dict[k0][k1][k2]['agg'] = np.ones(len(category_dict[k0][k1][k2]['categories']))
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
income_dict = {}
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if v2['source'] == 'transactions':
income_dict[(k0, k1, k2)] = transactions[
(
# If it is in the category
transactions['Category'].isin(v2['categories'])
& transactions['Account Name'].isin(tax_type[v2['tax_type']])
) & (
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
else:
income_dict[(k0, k1, k2)] = (v2['agg'] * paychecks[list(v2['categories'])]).sum(axis=1)
# Convert to DataFrame
cats = income_dict.keys()
cats.sort()
income = pd.DataFrame(
data=[],
columns=pd.MultiIndex.from_tuples(cats),
index=pd.date_range(transactions.index[-1], transactions.index[0])
)
for cat in income_dict:
cat_df = pd.DataFrame(income_dict[cat].values, index=income_dict[cat].index, columns=pd.MultiIndex.from_tuples([cat]))
income[cat] = cat_df.groupby(lambda x: x.date()).sum()
return income.fillna(0.0)
def income_statement(income=None, period=datetime.datetime.now().year, nettax=None):
"""
Calculate and return an Income Statement.
Income will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
income = calc_income(paychecks=paychecks, transactions=transactions, category_dict=categories)
incomestatement = income_statement(income, period=2016)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
income_statements = []
for p in period:
# Force period to string and set default nettax
p = str(p)
nettax = nettax if nettax else {'Taxes'}
# Convert to DataFrame
p_income = pd.DataFrame(income[p].sum(), columns=['$'])
p_income.index.names = ['Category', 'Type', 'Item']
# Calculate percentages of level 0
p_income['%'] = 100.0 * p_income.div(p_income.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = p_income.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = p_income.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
p_income = p_income.combine_first(l1_totals)
p_income = p_income.combine_first(l0_totals)
# Calculate Net
before = [(x, 'Total', ' ') for x in set(p_income.index.levels[0]).difference(nettax)]
after = [(x, 'Total', ' ') for x in set(p_income.index.levels[0])]
net = pd.DataFrame({
'$': [
p_income.loc[before]['$'].sum(),
p_income.loc[after]['$'].sum(),
p_income.loc[after]['$'].sum()
]
}, index=pd.MultiIndex.from_tuples([
('Net', 'Net Income', 'Before Taxes'),
('Net', 'Net Income', 'After Taxes'),
('Net', 'Total', ' ')
]))
# Add Net
income_df = pd.concat([p_income, net])
# Update columns with period
income_df.columns = pd.MultiIndex.from_product([[p], income_df.columns])
# Add to main list
income_statements.append(income_df)
# Concatenate all the periods together
income_statement_df = pd.concat(income_statements, 1)
return income_statement_df
def calc_cashflow(transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily cashflow of grouped inflow/outflow based on `category_dict`s from `transactions`, returns a DataFrame.
Cashflow is split into these sections:
Inflow
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Outflow
Operating
Rent
Food
...
Non-Operating
Interest Payments
...
All of the first 3 levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
categories = {
'Inflow': {
'Operating': {
# Paychecks
'Technical Services': {
'categories': {'Paycheck', }, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '' # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Outflow': {
'Operating': {...},
'Non-Operating': {..}
}
}
```
"""
# Add empty 'labels' key to dictionary if they do not have the item
# Add default 'logic' if it does not exist
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate transactions based on category definition, via 3 level dictionary comprehension
#pylint: disable=cell-var-from-loop
cashflow_dict = {
(k0, k1, k2): transactions[
# If it is in the category & in the tax type
(transactions['Category'].isin(v2['categories']) & transactions['Account Name'].isin(tax_type[v2['tax_type']])) &
(
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
cols = cashflow_dict.keys()
cols.sort()
cashflow = pd.DataFrame(
data=[],
columns=pd.MultiIndex.from_tuples(cols),
index=pd.date_range(transactions.index[-1], transactions.index[0])
)
for cat in cashflow_dict:
c = pd.DataFrame(cashflow_dict[cat].values, index=cashflow_dict[cat].index, columns=pd.MultiIndex.from_tuples([cat]))
cashflow[cat] = c.groupby(lambda x: x.date()).sum()
return cashflow.fillna(0.0)
def cashflow_statement(cashflow=None, period=datetime.datetime.now().year):
"""
Return a Cashflow Statement for a period from cashflow DataFrame.
Cashflow will be based on the last entry of account data (e.g. December 31st) for the given `period` time period, which
defaults to the current year. A Net section is automagically calculated.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
cashflow = calc_cashflow(transactions, category_dict=categories)
cashflowstatement = cashflow_statement(cashflow, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
cashflow_statements = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_cashflow = pd.DataFrame(cashflow[p].sum(), columns=['$'])
p_cashflow.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_cashflow[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
cashflow_df = pd.concat([p_cashflow, net])
# Calculate percentages of level 0
cashflow_df['%'] = 100.0 * cashflow_df.div(cashflow_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = cashflow_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = cashflow_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
cashflow_df = cashflow_df.combine_first(l1_totals)
cashflow_df = cashflow_df.combine_first(l0_totals)
# Update columns with period
cashflow_df.columns = | pd.MultiIndex.from_product([[p], cashflow_df.columns]) | pandas.MultiIndex.from_product |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import subprocess as sp
import os
from matplotlib import pyplot as plt
from scipy.stats import pearsonr
from matplotlib.gridspec import GridSpec
import matplotlib.cm as cm
import seaborn as sns
from IPython.core.pylabtools import figsize
from pylab import *
import matplotlib.colors as mcolors
import matplotlib as mpl
import numpy as np
from matplotlib import pyplot as plt
mpl.rcParams["mathtext.fontset"] = "stix"
def plot_pca(pca_tab, psi_tab, mrna_tab, reads_tab, cj, event, mrna_min, plot_dir, cmap='viridis',
filter_cells=True, print_pseudotime=False, vmin=0, vmax=1, alpha_missing=0.2, width = 8.5):
# figsize(10,8)
#pass_cells = psi_tab.columns[(mrna_tab.loc[event] > mrna_min) & (rpm_tab.loc[event] >= rpm_tab.mean(axis=1).quantile(0.1))]
if filter_cells:
read_min = mrna_min*cj*(1+psi_tab.loc[event])
pass_cells = psi_tab.columns[(mrna_tab.loc[event] > mrna_min) & (reads_tab.loc[event] >= read_min)]
else:
pass_cells = psi_tab.columns[(mrna_tab.loc[event] > -1) & (reads_tab.loc[event] >= -1)]
figsize(width, 5)
fig = plt.figure()
ax = plt.subplot(1,1,1)
ax.scatter(pca_tab.PC1, pca_tab.PC2, c='gray', s=75, edgecolors='none', alpha=alpha_missing)
sc = ax.scatter(pca_tab.loc[pass_cells, 'PC1'], pca_tab.loc[pass_cells, 'PC2'],
c=psi_tab.loc[event, pass_cells], s=100, edgecolors='none', vmin=vmin, vmax=vmax, cmap=cmap)
ax.tick_params(labelsize=28, length=5)
cb = plt.colorbar(sc, shrink = 0.8)
cb.set_label(label='$\Psi$',size=28)
cb.ax.tick_params(labelsize=28, length=5)
cb.outline.set_visible(False)
#plt.plot(chen_pca.line_1, -chen_pca.line_2, c='red',linewidth=3, label='lineage')
ax.set_xlim([-65, 102])
ax.set_ylim([-55, 63])
# cb = plt.colorbar(sc, shrink = 0.8)
# cb.set_label(label='$\Psi$',size=28)
# cb.ax.tick_params(labelsize=28, length=5)
# cb.outline.set_visible(False)
#plt.legend(frameon=False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.tick_params(labelsize=28, length=5)
plt.ylabel('PC2', fontsize=28)
plt.xlabel('PC1', fontsize=28)
plt.title(event.split('_')[0], fontsize=28)
if filter_cells:
plt.savefig(plot_dir+event+'.pca.svg', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pca.pdf', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pca.png', dpi=300, bbox_inches='tight', transparent=True)
else:
plt.savefig(plot_dir+event+'.pca.no_filter.svg', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pca.no_filter.pdf', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pca.no_filter.png', dpi=300, bbox_inches='tight', transparent=True)
#plt.show()
if print_pseudotime:
####figsize(6, 4)
figsize(6, 4)
fig = plt.figure()
ax = plt.subplot(1,1,1)
ax.scatter(pca_tab.pseudotime, psi_tab.loc[event], c='skyblue', s=75, edgecolors='none', alpha=0.5)
ax.scatter(pca_tab.loc[pass_cells, 'pseudotime'], psi_tab.loc[event, pass_cells],
c='navy', s=150, edgecolors='none')
ax.set_ylim((-0.05,1.05))
#plt.plot(chen_pca.line_1, -chen_pca.line_2, c='red',linewidth=3, label='lineage')
#plt.legend(frameon=False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.tick_params(labelsize=28, length=5)
plt.ylabel('$\Psi$', fontsize=28)
plt.xlabel('pseudotime', fontsize=28)
plt.title(event.split('_')[0], fontsize=28)
if filter_cells:
plt.savefig(plot_dir+event+'.pseudotime.svg', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pseudotime.pdf', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pseudotime.png', dpi=300, bbox_inches='tight', transparent=True)
else:
plt.savefig(plot_dir+event+'.pseudotime.no_filter.svg', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pseudotime.no_filter.pdf', bbox_inches='tight', transparent=True)
plt.savefig(plot_dir+event+'.pseudotime.no_filter.png', dpi=300, bbox_inches='tight', transparent=True)
plt.show()
def get_bins_table(PSI_table, total_counts, nbins = 25, int_min = 0.2):
'''
This function gets a sorted heatmap of PSI distribution, ready to plot.
output
'''
steps = 1/nbins
expression_sort = total_counts.mean(axis=1).sort_values().index
bins_data = | pd.DataFrame() | pandas.DataFrame |
import secrets
import time
import random
import pandas as pd
from typing import Dict, Callable, Any
from cadCAD.configuration import Configuration
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from .specs import (
Deposit, DepositData, BeaconState,
SECONDS_PER_SLOT, SLOTS_PER_EPOCH,
initialize_beacon_state_from_eth1,
)
from .network import (
Network,
update_network,
disseminate_attestations,
disseminate_block,
disseminate_chunk_responses,
disseminate_bit_challenges,
knowledge_set,
)
from .utils.cadCADsupSUP import (
get_observed_psubs,
get_observed_initial_conditions,
add_loop_ic,
add_loop_psubs,
)
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import Bitlist, uint64
from eth2spec.utils.hash_function import hash
from .utils.eth2 import eth_to_gwei
## Initialisation
def get_initial_deposits(validators):
"""Produce deposits
Args:
validators (Sequence[BRValidator]): Validators of the simulation
Returns:
List[Deposit]: The list of deposits
"""
return [Deposit(
data=DepositData(
amount=eth_to_gwei(32),
pubkey=v.pubkey)
) for v in validators]
def get_genesis_state(validators, seed="hello"):
block_hash = hash(seed.encode("utf-8"))
eth1_timestamp = 1578009600
return initialize_beacon_state_from_eth1(
block_hash, eth1_timestamp, get_initial_deposits(validators)
)
def skip_genesis_block(validators):
for validator in validators:
validator.forward_by(SECONDS_PER_SLOT)
## State transitions
def tick(_params, step, sL, s, _input):
# Move the simulation by one step
frequency = _params[0]["frequency"]
network_update_rate = _params[0]["network_update_rate"]
# Probably overkill
assert frequency >= network_update_rate
network = s["network"]
update_prob = float(network_update_rate) / float(frequency)
# If we draw a success, based on `update_prob`, update the network
if random.random() < update_prob:
update_network(network)
# Push validators' clocks by one step
for validator in network.validators:
validator.update_time(frequency)
if s["timestep"] % 100 == 0:
print("timestep", s["timestep"], "of run", s["run"])
return ("network", network)
def update_attestations(_params, step, sL, s, _input):
# Get the attestations and disseminate them on-the-wire
network = s["network"]
disseminate_attestations(network, _input["attestations"])
return ('network', network)
def update_blocks(_params, step, sL, s, _input):
# Get the blocks proposed and disseminate them on-the-wire
network = s["network"]
for block in _input["blocks"]:
disseminate_block(network, block.message.proposer_index, block)
return ('network', network)
def update_chunk_responses(_params, step, sL, s, _input):
# Get the blocks proposed and disseminate them on-the-wire
network = s["network"]
disseminate_chunk_responses(network, _input["chunk_responses"])
return ('network', network)
def update_bit_challenges(_params, step, sL, s, _input):
network = s["network"]
disseminate_bit_challenges(network, _input["bit_challenges"])
return ('network', network)
## Policies
### Attestations
def attest_policy(_params, step, sL, s):
# Pinging validators to check if anyone wants to attest
network = s['network']
produced_attestations = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index)
attestation = validator.attest(validator, known_items)
if attestation is not None:
produced_attestations.append([validator_index, attestation])
return ({ 'attestations': produced_attestations })
### Block proposal
def propose_policy(_params, step, sL, s):
# Pinging validators to check if anyone wants to propose a block
network = s['network']
produced_blocks = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index)
block = validator.propose(validator, known_items)
if block is not None:
produced_blocks.append(block)
return ({ 'blocks': produced_blocks })
### Chunk Challenge Response
def chunk_response_policy(_params, step, sL, s):
# Pinging validators to check if anyone wants to respond to a chunk challenge.
network = s['network']
responses = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index)
chunk_response = validator.chunk_response(validator, known_items)
if chunk_response is not None:
responses.append([validator_index, chunk_response])
return ({ 'chunk_responses': responses })
# Bit Challenge
def bit_challenge_policy(_params, step, sL, s):
# Pinging validators to check if anyone wants to response to a chunk challenge.
network = s['network']
bit_challenges = []
for validator_index, validator in enumerate(network.validators):
known_items = knowledge_set(network, validator_index)
bit_challenge = validator.bit_challenge(validator, known_items)
if bit_challenge is not None:
bit_challenges.append([validator_index, bit_challenge])
return ({ 'bit_challenges': bit_challenges})
### Simulator shell
class SimulationParameters:
num_epochs: uint64
num_run: uint64
frequency: uint64
network_update_rate: float
def __init__(self, obj):
self.num_epochs = obj["num_epochs"]
self.num_run = obj["num_run"]
self.frequency = obj["frequency"]
self.network_update_rate = obj["network_update_rate"]
def simulate(network: Network, parameters: SimulationParameters,
observers: Dict[str, Callable[[BeaconState], Any]] = {}) -> pd.DataFrame:
"""
Args:
network (Network): Network of :py:class:`beaconrunner.validatorlib.BRValidator`
parameters (BRSimulationParameters): Simulation parameters
Returns:
pandas.DataFrame: Results of the simulation contained in a pandas data frame
"""
initial_conditions = {
'network': network
}
psubs = [
{
'policies': {
'action': attest_policy # step 1
},
'variables': {
'network': update_attestations # step 2
}
},
{
'policies': {
'action': chunk_response_policy # step 3
},
'variables': {
'network': update_chunk_responses # step 4
}
},
{
'policies': {
'action': bit_challenge_policy # step 3
},
'variables': {
'network': update_bit_challenges # step 4
}
},
{
'policies': {
'action': propose_policy # step 3
},
'variables': {
'network': update_blocks # step 4
}
},
{
'policies': {
},
'variables': {
'network': tick # step 5
}
},
]
# Determine how many steps the simulation is running for
num_slots = parameters.num_epochs * SLOTS_PER_EPOCH
steps = num_slots * SECONDS_PER_SLOT * parameters.frequency
simulation_parameters = {
'T': range(steps),
'N': 1,
'M': {
"frequency": [parameters.frequency],
"network_update_rate": [parameters.network_update_rate],
}
}
print("will simulate", parameters.num_epochs, "epochs (", num_slots, "slots ) at frequency", parameters.frequency, "moves/second")
print("total", steps, "simulation steps")
# Add our observers to the simulation
observed_ic = add_loop_ic(get_observed_initial_conditions(initial_conditions, observers))
observed_psubs = add_loop_psubs(get_observed_psubs(psubs, observers))
# Final simulation parameters and execution
configs = []
for sim_param in config_sim(simulation_parameters):
config = Configuration(sim_param,
initial_state=observed_ic,
partial_state_update_blocks=observed_psubs)
configs.append(config)
exec_mode = ExecutionMode()
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=configs)
raw_result, tensor_field = run.execute()
return | pd.DataFrame(raw_result) | pandas.DataFrame |
import numpy as np
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
import pandas as pd
import pandas._testing as tm
from .base import BaseExtensionTests
class BaseInterfaceTests(BaseExtensionTests):
"""Tests that the basic interface is satisfied."""
# ------------------------------------------------------------------------
# Interface
# ------------------------------------------------------------------------
def test_len(self, data):
assert len(data) == 100
def test_size(self, data):
assert data.size == 100
def test_ndim(self, data):
assert data.ndim == 1
def test_can_hold_na_valid(self, data):
# GH-20761
assert data._can_hold_na is True
def test_contains(self, data, data_missing):
# GH-37867
# Tests for membership checks. Membership checks for nan-likes is tricky and
# the settled on rule is: `nan_like in arr` is True if nan_like is
# arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
# check the presence of na_value
assert na_value in data_missing
assert na_value not in data
# the data can never contain other nan-likes than na_value
for na_value_obj in tm.NULL_OBJECTS:
if na_value_obj is na_value:
continue
assert na_value_obj not in data
assert na_value_obj not in data_missing
def test_memory_usage(self, data):
s = pd.Series(data)
result = s.memory_usage(index=False)
assert result == s.nbytes
def test_array_interface(self, data):
result = np.array(data)
assert result[0] == data[0]
result = np.array(data, dtype=object)
expected = np.array(list(data), dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_is_extension_array_dtype(self, data):
assert is_extension_array_dtype(data)
assert is_extension_array_dtype(data.dtype)
assert is_extension_array_dtype(pd.Series(data))
assert isinstance(data.dtype, ExtensionDtype)
def test_no_values_attribute(self, data):
# GH-20735: EA's with .values attribute give problems with internal
# code, disallowing this for now until solved
assert not hasattr(data, "values")
assert not hasattr(data, "_values")
def test_is_numeric_honored(self, data):
result = pd.Series(data)
assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric
def test_isna_extension_array(self, data_missing):
# If your `isna` returns an ExtensionArray, you must also implement
# _reduce. At the *very* least, you must implement any and all
na = data_missing.isna()
if | is_extension_array_dtype(na) | pandas.core.dtypes.common.is_extension_array_dtype |
"""
Script to evaluate the UCBOG and OG of multiple experiments
"""
import os
import os.path as osp
import pandas as pd
import pyrado
from matplotlib import pyplot as plt
from pyrado.logger.experiment import load_dict_from_yaml
from pyrado.plotting.curve import render_mean_std
from pyrado.sampling.sequences import *
from pyrado.utils.experiments import filter_los_by_lok
if __name__ == '__main__':
save_name = 'FILL_IN'
# Add every experiment with (partially) matching key
filter_key = ['FILL_IN']
# Get the experiments' directories to load from
ex_dirs = []
ex_dirs.extend([tmp[0] for tmp in os.walk(osp.join(pyrado.EXP_DIR, 'FILL_IN', 'FILL_IN'))][1:])
ex_dirs = filter_los_by_lok(ex_dirs, filter_key)
print(f'Number of loaded experiments: {len(ex_dirs)}')
dfs = []
for ex_dir in ex_dirs:
dfs.append(pd.read_csv(osp.join(ex_dir, 'OG_log.csv')))
df = | pd.concat(dfs, axis=0) | pandas.concat |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with | ensure_clean_path(setup_path) | pandas.tests.io.pytables.common.ensure_clean_path |
import pandas as pd
import openpyxl
import numpy as np
import os
import string
import glob
''' This program compiles all (individual) saved excel files to compare different models in one environment
'''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
path_core = __location__+ "/Results/Train/"
print("OK")
# SELECT THE ENVIRONMENTS
# env_path_list = ["Env_1",
# "Env_2",
# "Env_3",
# "Env_8",
# "Env_9",
# "Env_10",
# "Env_11"]
env_path_list = ["Env_1",
"Env_2",
"Env_3",
"Env_4"]
env_path_list = ["Env_1"]
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO', 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ']
list_sheets = ["Run_Conf", "Score", "Percent", "Loss", "Time"]
for env_path in env_path_list:
file_path_list = []
path = path_core + env_path + "/Train_Env_1_DQN*.xlsx"
for fname in sorted(glob.glob(path)):
file_path_list.append(fname)
print("LEN(FILE_PATH_LIST):", len(file_path_list))
load_path = __location__+ "/Results/Train/Compare_Models.xlsx"
excel_data_base = pd.ExcelFile(load_path)
load_path_new = __location__+ "/Results/Train/" + env_path + "/Compare_Models_new_" + env_path + ".xlsx"
excel_writer_to_append = pd.ExcelWriter(load_path_new)
workbook = excel_writer_to_append.book
excel_data_base_col = pd.read_excel(excel_data_base, sheetname="Run_Conf")
df_Run_Conf_list = pd.DataFrame()
df_Score_list = pd.DataFrame()
df_Percent_list = pd.DataFrame()
df_Loss_list = pd.DataFrame()
df_Time_list = pd.DataFrame()
for i in range(len(file_path_list)):
print("File:", i)
excel_file = pd.ExcelFile(file_path_list[i])
# print("excel_file ", excel_file )
df_Run_Conf = pd.read_excel(excel_file, sheetname=list_sheets[0], converters={'A': str})
df_Run_Conf = df_Run_Conf.set_index(list_sheets[0])
df_Score = pd.read_excel(excel_file, sheetname=list_sheets[1], parse_cols="A:B")
df_Score = df_Score.set_index(list_sheets[1])
df_Percent = pd.read_excel(excel_file, sheetname=list_sheets[2], parse_cols="A:B")
df_Percent = df_Percent.set_index(list_sheets[2])
df_Loss = pd.read_excel(excel_file, sheetname=list_sheets[3], parse_cols="A:B")
df_Loss = df_Loss.set_index(list_sheets[3])
df_Time = pd.read_excel(excel_file, sheetname=list_sheets[4], parse_cols="A:B")
df_Time = df_Time.set_index(list_sheets[4])
df_Run_Conf_list = pd.concat([df_Run_Conf_list, df_Run_Conf], axis=1, join="outer")
df_Score_list = | pd.concat([df_Score_list, df_Score], axis=1, join="outer") | pandas.concat |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CG.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
#os.chdir('../')
#os.chdir(outputFolder)
logm("Merging ML within samples for CG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
logm("Merging ML between samples for CG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("Merging MeH between samples for CG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result.dropna(axis = 0, thresh=4, inplace = True)
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
Result.to_csv(Folder + con + '_' +'Results.csv' ,index = False,header=True)
print("All done.",len(bam_list),"bam files processed and merged for CG.")
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHG.")
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = | pd.read_csv(toapp_dir) | pandas.read_csv |
import numpy as np
import pandas as pd
import json
import random
from matplotlib import pyplot as plt
from ai4netmon.Analysis.bias import bias_utils as bu
from ai4netmon.Analysis.bias import radar_chart
### Example 1 - simple example
print('####### Example 1 - simple example of calculating bias')
v1 = np.array([1,2,3,4,5,6,7,8,9])
v2 = np.array([1,3,9])
print('Target data (groundtruth):', list(v1))
print('Sample data:', list(v1))
print('Bias score (KL - numerical): {}'.format(bu.bias_score(v1, v2, method='kl_divergence', **{'data_type':'numerical', 'bins':3, 'alpha':0.01})))
print('Bias score (KL - categorical): {}'.format(bu.bias_score(v1, v2, method='kl_divergence', **{'data_type':'categorical', 'alpha':0.01})))
print('Bias score (KS-test): {}'.format(bu.bias_score(v1,v2, method='ks_test')))
print()
### Example 2 - bias in RIPE monitors
print('####### Example 2 - bias in RIPE monitors')
## datasets
AGGREGATE_DATA_FNAME = '../data/aggregate_data/asn_aggregate_data_20211201.csv'
RIPE_RIS_FNAME = '../data/misc/RIPE_RIS_peers_ip2asn.json'
## features
CATEGORICAL_FEATURES = ['AS_rank_source', 'AS_rank_iso', 'AS_rank_continent', 'is_personal_AS', 'peeringDB_info_ratio',
'peeringDB_info_traffic', 'peeringDB_info_scope', 'peeringDB_info_type', 'peeringDB_policy_general']
NUMERICAL_FEATURES = ['AS_rank_numberAsns', 'AS_rank_numberPrefixes', 'AS_rank_numberAddresses', 'AS_rank_total',
'AS_rank_customer', 'AS_rank_peer', 'AS_rank_provider', 'peeringDB_info_prefixes4', 'peeringDB_info_prefixes6',
'peeringDB_ix_count', 'peeringDB_fac_count', 'AS_hegemony']
FEATURES = CATEGORICAL_FEATURES+NUMERICAL_FEATURES
## useful methods
def get_feature_type(feature):
if feature in CATEGORICAL_FEATURES:
data_type = 'categorical'
elif feature in NUMERICAL_FEATURES:
data_type = 'numerical'
else:
raise ValueError
return data_type
## load data
df = | pd.read_csv(AGGREGATE_DATA_FNAME, header=0, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
""" MultiQC module to parse output from scChIPseq pipeline """
from __future__ import print_function
from collections import OrderedDict
import logging
import os
import re
import pandas as pd
import subprocess
import pyBigWig as pyBW
from multiqc import config
from multiqc.plots import bargraph
from multiqc.plots import linegraph
from multiqc.modules.base_module import BaseMultiqcModule
from itertools import chain
from multiqc.plots import linegraph
import math
# Initialise the logger
log = logging.getLogger(__name__)
# Initialise your class and so on
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(name='scChIPseq', anchor='scChIPseq',
href="https://gitlab.curie.fr/data-analysis/ChIP-seq_single-cell_LBC",
info="is a DNA alignment pipeline dedicated to single-cell ChIP-seq experiments")
# Find and load any scChIPseq reports
self.scChIPseq_data = dict()
for f in self.find_log_files('scChIPseq/all_logs'):
log.info('Found the all_logs!')
parsed_data = self.parse_scChIPseq_report(f['f'])
if parsed_data is not None:
s_name = f['s_name']
if s_name == '':
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name in self.scChIPseq_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, section='SummaryLog')
self.scChIPseq_data[s_name] = parsed_data
# Read in flagged_count
self.scChIPseq_flagged_count = dict()
for f in self.find_log_files('scChIPseq/flagged_count'):
log.info('Found the flagged_count !')
colnames = ['count', 'barcode']
if not f['root']:
log.info("is empty")
count = pd.read_csv("./" + f['fn'], delim_whitespace=True, names=colnames)
else:
log.info("is not empty")
count = | pd.read_csv(f['root'] +"/" + f['fn'], delim_whitespace=True, names=colnames) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the ‘name’
ID_treatment_medical_statement=pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
ID_treatment_medical_statement_o= ID_treatment_medical_statement.fillna(0)
ID_treatment_medical_statement_p=ID_treatment_medical_statement_o.drop("ID", axis=1)
ID_treatment_medical_statement_rename= ID_treatment_medical_statement_p.rename(columns={'code':"Receipt type code"})
merge_data= ID_treatment_medical_statement_rename
# Split the training/validation set into 80% and the test set into 20%, with a constant proportion of cases with lesions
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=1)
# Create a function to divide data
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
# Separate into training, validation, and test set
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
# Make test set into pandas
X_test_df = pd.DataFrame(X_test)
y_test_df = pd.DataFrame(y_test)
# Make test set into test_df to keep away for the final process
test_dfp = pd.concat([y_test_df,X_test_df], axis=1)
test_df=test_dfp.rename(columns={0:"target"})
# Make training/validation sets into pandas
y_trainp = pd.DataFrame(y_train)
X_trainp = pd.DataFrame(X_train)
train= | pd.concat([y_trainp, X_trainp], axis=1) | pandas.concat |
# 定位到工作根目录
# import sys
# from os.path import abspath, join, dirname
# sys.path.insert(0, join(abspath(dirname(__file__)), '\..\..'))
# 加载环境文件
from package.env import *
from package.sql_connect import *
# 加载工具包
import pandas as pd
import time
import pendulum
# 自定义函数
# 主体
l_hours = [i for i in range(0, 24)]
l_minutes = [i for i in range(0, 60)]
l_seconds = [i for i in range(0, 60)]
index = pd.MultiIndex.from_product([l_hours, l_minutes, l_seconds], names = ["hours24", "minutes", "seconds"])
df_data = | pd.DataFrame(index=index) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
| tm.assert_index_equal(result, exp) | pandas.util.testing.assert_index_equal |
from conceptnet5.util import get_support_data_filename
from conceptnet5.vectors import standardized_uri, get_vector, cosine_similarity
from conceptnet5.vectors.query import VectorSpaceWrapper
from scipy.stats import spearmanr, pearsonr, tmean, hmean
from itertools import combinations
import numpy as np
import pandas as pd
SAMPLE_SIZES = {
'ws353': 353,
'ws353-es': 353,
'ws353-ro': 353,
'men3000': 3000,
'mturk': 771,
'rw': 2034,
'gur350-de': 350,
'zg222-de': 222,
'simlex': 999,
'scws': 2003,
'pku500-zh': 500,
'tmu-rw-ja': 4431,
'semeval-2a-en': 500,
'semeval-2a-de': 500,
'semeval-2a-es': 500,
'semeval-2a-it': 500,
'semeval-2a-fa': 500,
'semeval17-2a': 2000,
'semeval-2b-de-es': 956,
'semeval-2b-de-fa': 888,
'semeval-2b-de-it': 912,
'semeval-2b-en-de': 914,
'semeval-2b-en-es': 978,
'semeval-2b-en-fa': 952,
'semeval-2b-en-it': 970,
'semeval-2b-es-fa': 914,
'semeval-2b-es-it': 967,
'semeval-2b-it-fa': 916,
'semeval17-2b': 5697,
}
# A mapping from short group names to more formal citations
GROUPS = {
'Luminoso': 'Speer and Chin (2016)',
'Bar-Ilan': 'Levy et al. (2015)',
'Google': 'Mikolov et al. (2013)',
'Facebook': 'Joulin et al. (2016)',
'Stanford': 'Pennington et al. (2014)',
'UFRGS': 'Salle et al. (2016)',
'Google+HL': 'Soricut and Och (2015)',
'Oxford': 'Botha and Blunsom (2014)'
}
def confidence_interval(rho, N):
"""
Give a 95% confidence interval for a Spearman correlation score, given
the correlation and the number of cases.
"""
z = np.arctanh(rho)
interval = 1.96 / np.sqrt(N - 3)
low = z - interval
high = z + interval
return pd.Series(
[rho, np.tanh(low), np.tanh(high)],
index=['acc', 'low', 'high']
)
def empty_comparison_table():
return pd.DataFrame(
columns=['acc', 'low', 'high']
)
def make_comparison_table(scores):
evals = sorted(scores)
table = pd.DataFrame(index=evals, columns=['acc', 'low', 'high'])
for evalname, score in scores.items():
table.loc[evalname] = confidence_interval(score, SAMPLE_SIZES[evalname])
return table
COMPARISONS = {}
# Here are all the existing evaluation results we know about, classified by
# what institution produced the results and which method is implemented.
# TODO: Update COMPARISONS and fill in the Semeval results once the group affiliations become
# available
# Levy et al., 2015
COMPARISONS['Bar-Ilan', 'PPMI'] = make_comparison_table({
'men3000': .745,
'mturk': .686,
'rw': .462,
'simlex': .393,
'ws353': .721 # estimate
})
COMPARISONS['Bar-Ilan', 'SVD'] = make_comparison_table({
'men3000': .778,
'mturk': .666,
'rw': .514,
'simlex': .432,
'ws353': .733 # estimate
})
COMPARISONS['Bar-Ilan', 'SGNS'] = make_comparison_table({
'men3000': .774,
'mturk': .693,
'rw': .470,
'simlex': .438,
'ws353': .729 # estimate
})
COMPARISONS['Bar-Ilan', 'GloVe'] = make_comparison_table({
'men3000': .729,
'mturk': .632,
'rw': .403,
'simlex': .398,
'ws353': .654 # estimate
})
COMPARISONS['Google', 'word2vec SGNS'] = make_comparison_table({
'men3000': .732,
'rw': .385,
'ws353': .624,
'scws': .574
})
# <NAME>, 2016 - arXiv:1604.01692v1
COMPARISONS['Luminoso', 'GloVe'] = make_comparison_table({
'rw': .528,
'men3000': .840,
'ws353': .798
})
COMPARISONS['Luminoso', 'word2vec SGNS'] = make_comparison_table({
'rw': .476,
'men3000': .778,
'ws353': .731
})
COMPARISONS['Luminoso', 'Numberbatch 2016.04'] = make_comparison_table({
'rw': .596,
'men3000': .859,
'ws353': .821
})
COMPARISONS['Luminoso', 'PPMI'] = make_comparison_table({
'rw': .420,
'men3000': .764,
'ws353': .651,
'scws': .608
})
# Pennington et al., 2014
COMPARISONS['Stanford', 'GloVe'] = make_comparison_table({
'rw': .477,
'men3000': .816,
'ws353': .759
})
# Joulin et al., 2016 - "Bag of Tricks"
# Rounded-off numbers from the blog post at https://research.facebook.com/blog/fasttext/
COMPARISONS['Facebook', 'fastText'] = make_comparison_table({
'rw': .46,
'ws353': .73,
'gur350-de': .69,
'zg222-de': .37,
})
# Salle et al., 2016 - LexVec
# https://github.com/alexandres/lexvec
COMPARISONS['UFRGS', 'LexVec'] = make_comparison_table({
'rw': .489,
'simlex': .384,
'scws': .652,
'ws353': .661,
'men3000': .759,
'mturk': .655
})
COMPARISONS['Google+HL', 'SG+Morph'] = make_comparison_table({
'rw': .418,
'ws353': .712,
'gur350-de': .641,
'zg222-de': .215,
'ws353-es': .473,
})
COMPARISONS['Oxford', 'BB2014'] = make_comparison_table({
'rw': .300,
'ws353': .400,
'gur350-de': .560,
'zg222-de': .250
})
# Comparisons from SemEval results
COMPARISONS['SemEval2017', 'Luminoso'] = make_comparison_table({
'semeval-2a-en': .789,
'semeval-2a-de': .700,
'semeval-2a-es': .743,
'semeval-2a-it': .741,
'semeval-2a-fa': .503,
'semeval-2b-en-de': .763,
'semeval-2b-en-es': .761,
'semeval-2b-en-it': .776,
'semeval-2b-en-fa': .598,
'semeval-2b-de-es': .728,
'semeval-2b-de-it': .741,
'semeval-2b-de-fa': .598,
'semeval-2b-es-it': .753,
'semeval-2b-es-fa': .627,
'semeval-2b-it-fa': .604,
})
COMPARISONS['SemEval2017', 'Nasari'] = make_comparison_table({
# This is the baseline system, by Uniroma
'semeval-2a-en': .682,
'semeval-2a-de': .514,
'semeval-2a-es': .600,
'semeval-2a-it': .596,
'semeval-2a-fa': .405,
'semeval-2b-en-de': .598,
'semeval-2b-en-es': .633,
'semeval-2b-en-it': .648,
'semeval-2b-en-fa': .505,
'semeval-2b-de-es': .549,
'semeval-2b-de-it': .561,
'semeval-2b-de-fa': .458,
'semeval-2b-es-it': .595,
'semeval-2b-es-fa': .479,
'semeval-2b-it-fa': .486,
})
COMPARISONS['SemEval2017', 'QLUT'] = make_comparison_table({
'semeval-2a-en': .778,
})
COMPARISONS['SemEval2017', 'HCCL'] = make_comparison_table({
'semeval-2a-en': .687,
'semeval-2a-de': .594,
'semeval-2a-es': .701,
'semeval-2a-it': .651,
'semeval-2a-fa': .436,
'semeval-2b-en-de': .307,
'semeval-2b-en-es': .087,
'semeval-2b-en-it': .055,
'semeval-2b-en-fa': .012,
'semeval-2b-de-es': .045,
'semeval-2b-de-it': .037,
'semeval-2b-de-fa': .023,
'semeval-2b-es-it': .064,
'semeval-2b-es-fa': .048,
'semeval-2b-it-fa': .000,
})
COMPARISONS['SemEval2017', 'Mahtab'] = make_comparison_table({
'semeval-2a-fa': .715,
})
COMPARISONS['SemEval2017', 'hhu'] = make_comparison_table({
'semeval-2a-en': .704,
'semeval-2a-fa': .604,
'semeval-2b-en-fa': .513,
})
COMPARISONS['SemEval2017', 'OoO'] = make_comparison_table({
'semeval-2b-en-de': .570,
'semeval-2b-en-es': .584,
'semeval-2b-en-it': .584,
'semeval-2b-de-es': .549,
'semeval-2b-de-it': .548,
'semeval-2b-es-it': .570,
})
COMPARISONS['SemEval2017', 'SEW'] = make_comparison_table({
'semeval-2a-en': .464,
'semeval-2a-de': .449,
'semeval-2a-es': .616,
'semeval-2a-it': .569,
'semeval-2a-fa': .393,
'semeval-2b-en-de': .464,
'semeval-2b-en-es': .505,
'semeval-2b-en-it': .526,
'semeval-2b-en-fa': .420,
'semeval-2b-de-es': .530,
'semeval-2b-de-it': .520,
'semeval-2b-de-fa': .428,
'semeval-2b-es-it': .595,
'semeval-2b-es-fa': .515,
'semeval-2b-it-fa': .489,
})
COMPARISONS['SemEval2017', 'RUFINO'] = make_comparison_table({
'semeval-2a-en': .656,
'semeval-2a-de': .539,
'semeval-2a-es': .549,
'semeval-2a-it': .476,
'semeval-2a-fa': .360,
'semeval-2b-en-de': .330,
'semeval-2b-en-es': .340,
'semeval-2b-en-it': .342,
'semeval-2b-en-fa': .373,
'semeval-2b-de-es': .318,
'semeval-2b-de-it': .327,
'semeval-2b-de-fa': .267,
'semeval-2b-es-it': .356,
'semeval-2b-es-fa': .300,
'semeval-2b-it-fa': .249,
})
COMPARISONS['SemEval2017', 'Citius'] = make_comparison_table({
'semeval-2a-en': .651,
'semeval-2a-es': .523,
'semeval-2b-en-es': .577,
})
COMPARISONS['SemEval2017', 'l2f'] = make_comparison_table({
'semeval-2a-en': .649,
})
COMPARISONS['SemEval2017', 'gpv8'] = make_comparison_table({
'semeval-2a-en': .555,
'semeval-2a-de': .347,
'semeval-2a-it': .499,
})
COMPARISONS['SemEval2017', 'MERALI'] = make_comparison_table({
'semeval-2a-en': .594,
})
COMPARISONS['SemEval2017', 'Amateur'] = make_comparison_table({
'semeval-2a-en': .589,
})
COMPARISONS['SemEval2017', 'Wild Devs'] = make_comparison_table({
'semeval-2a-en': .468,
})
# Hypothetical SemEval runs of existing systems
COMPARISONS['SemEval2017', 'fastText'] = make_comparison_table({
'semeval-2a-en': .468,
'semeval-2a-de': .507,
'semeval-2a-es': .417,
'semeval-2a-it': .344,
'semeval-2a-fa': .334,
})
# Hypothetical SemEval runs of existing systems
COMPARISONS['SemEval2017', 'Luminoso, no OOV'] = make_comparison_table({
'semeval-2a-en': .747,
'semeval-2a-de': .599,
'semeval-2a-es': .611,
'semeval-2a-it': .606,
'semeval-2a-fa': .363,
'semeval-2b-en-de': .696,
'semeval-2b-en-es': .675,
'semeval-2b-en-it': .677,
'semeval-2b-en-fa': .502,
'semeval-2b-de-es': .620,
'semeval-2b-de-it': .612,
'semeval-2b-de-fa': .501,
'semeval-2b-es-it': .613,
'semeval-2b-es-fa': .482,
'semeval-2b-it-fa': .474,
})
COMPARISONS['SemEval2017', 'word2vec'] = make_comparison_table({
'semeval-2a-en': .575,
})
def read_ws353():
"""
Parses the word-similarity 353 test collection (ws353). ws353 is a
collection of 353 english word pairs, each with a relatedness rating between
0 (totally unrelated) to 10 (very related or identical). The relatedness
of a pair of words was determined by the average scores of either 13
or 16 native english speakers.
"""
lang1, lang2 = 'en', 'en'
with open(get_support_data_filename('wordsim-353/combined.csv')) as file:
for line in file:
if line.startswith('Word 1'): # Skip the header
continue
term1, term2, sscore = line.split(',')
gold_score = float(sscore)
yield term1, term2, gold_score, lang1, lang2
def read_ws353_multilingual(language):
lang1, lang2 = language, language
if language == 'es':
language = 'es.fixed'
filename = 'wordsim-353/{}.tab'.format(language)
with open(get_support_data_filename(filename)) as file:
for line in file:
term1, term2, sscore = line.split('\t')
gold_score = float(sscore)
yield term1, term2, gold_score, lang1, lang2
def read_gurevych(setname):
# The 'setname' here is a number indicating the number of word pairs
# in the set.
lang1, lang2 = 'de', 'de'
filename = 'gurevych/wortpaare{}.gold.pos.txt'.format(setname)
with open(get_support_data_filename(filename)) as file:
for line in file:
if line.startswith('#'):
continue
term1, term2, sscore, _pos1, _pos2 = line.rstrip().split(':')
gold_score = float(sscore)
yield term1, term2, gold_score, lang1, lang2
def read_mturk():
lang1, lang2 = 'en', 'en'
with open(get_support_data_filename('mturk/MTURK-771.csv')) as file:
for line in file:
term1, term2, sscore = line.split(',')
gold_score = float(sscore)
yield term1, term2, gold_score, lang1, lang2
def read_pku500():
lang1, lang2 = 'zh', 'zh'
filename = 'pku-500/pku-500.csv'
with open(get_support_data_filename(filename)) as file:
for line in file:
if line.startswith('#'):
continue
term1, term2, sscore = line.split('\t')
gold_score = float(sscore)
yield term1, term2, gold_score, lang1, lang2
def read_men3000(subset='dev'):
"""
Parses the MEN test collection. MEN is a collection of 3000 english word
pairs, each with a relatedness rating between 0 and 50. The relatedness of
a pair of words was determined by the number of times the pair was selected
as more related compared to another randomly chosen pair.
"""
lang1, lang2 = 'en', 'en'
filename = get_support_data_filename('mensim/MEN_dataset_lemma_form.{}'.format(subset))
with open(filename) as file:
for line in file:
parts = line.rstrip().split()
term1 = parts[0].split('-')[0] # remove part of speech
term2 = parts[1].split('-')[0]
gold_score = float(parts[2])
yield term1, term2, gold_score, lang1, lang2
def read_rg65():
"""
Parses the Rubenstein and Goodenough word similarity test collection.
"""
lang1, lang2 = 'en', 'en'
filename = get_support_data_filename('rg65/EN-RG-65.txt')
with open(filename) as file:
for line in file:
parts = line.split()
yield parts[0], parts[1], float(parts[2]), lang1, lang2
def read_rw(subset='dev'):
"""
Parses the rare word similarity test collection.
"""
lang1, lang2 = 'en', 'en'
filename = get_support_data_filename('rw/rw-{}.csv'.format(subset))
with open(filename) as file:
for line in file:
parts = line.split()
yield parts[0], parts[1], float(parts[2]), lang1, lang2
def read_tmu():
"""
Read the Japanese rare-words dataset from Tokyo Metropolitan University.
"""
lang1, lang2 = 'ja', 'ja'
for pos in ('noun', 'verb', 'adj', 'adv'):
filename = get_support_data_filename('tmu-rw/score_{}.csv'.format(pos))
with open(filename, encoding='utf-8') as file:
for line in file:
if line.startswith('word1'):
continue
parts = line.split(',')
yield parts[0].strip(), parts[1].strip(), float(parts[2]), lang1, lang2
def read_mc():
"""
Parses the Miller and Charles word similarity test collection.
"""
filename = get_support_data_filename('mc/EN-MC-30.txt')
with open(filename) as file:
for line in file:
parts = line.split()
yield parts[0], parts[1], float(parts[2])
def read_semeval_monolingual(lang, subset='test'):
"""
Parses Semeval2017-Task2 monolingual word similarity (subtask 1) test collection.
"""
lang1, lang2 = lang, lang
filename = get_support_data_filename('semeval17-2/{}.{}.txt'.format(lang, subset))
with open(filename) as file:
for line in file:
parts = line.split('\t')
yield parts[0], parts[1], float(parts[2]), lang1, lang2
def read_semeval_crosslingual(lang1, lang2, subset='test'):
"""
Parses Semeval2017-Task2 crosslingual word similarity (Subtask2) test collection.
"""
filename = get_support_data_filename('semeval17-2/{}-{}.{}.txt'.format(lang1, lang2, subset))
with open(filename) as file:
for line in file:
parts = line.split('\t')
yield parts[0], parts[1], float(parts[2]), lang1, lang2
def compute_semeval_score(pearson_score, spearman_score):
"""
Return NaN if a dataset can't be evaluated on a given frame. Return 0 if at least one similarity
measure was 0 or negative. Otherwise, take a harmonic mean of a Pearson correlation coefficient
and a Spearman correlation coefficient.
"""
intervals = ['acc', 'low', 'high']
scores = []
for interval in intervals:
if any(np.isnan(x) for x in [spearman_score[interval], pearson_score[interval]]):
scores.append(float('NaN'))
elif any(x <= 0 for x in [spearman_score[interval], pearson_score[interval]]):
scores.append(0)
else:
scores.append(hmean([spearman_score[interval], pearson_score[interval]]))
return pd.Series(
scores,
index=intervals
)
def evaluate_semeval_monolingual(vectors, lang):
"""
Get a semeval score for a single monolingual test set.
"""
spearman_score = measure_correlation(spearmanr, vectors, read_semeval_monolingual(lang))
pearson_score = measure_correlation(pearsonr, vectors, read_semeval_monolingual(lang))
score = compute_semeval_score(spearman_score, pearson_score)
return score
def evaluate_semeval_crosslingual(vectors, lang1, lang2):
"""
Get a semeval score for a single crosslingual test set
"""
spearman_score = measure_correlation(spearmanr, vectors, read_semeval_crosslingual(lang1, lang2))
pearson_score = measure_correlation(pearsonr, vectors, read_semeval_crosslingual(lang1, lang2))
score = compute_semeval_score(spearman_score, pearson_score)
return score
def evaluate_semeval_monolingual_global(vectors):
"""
According to Semeval2017-Subtask2 rules, the global score for a system is the average the final
individual scores on the four languages on which the system performed best. If less than four
scores are supplied, the global score is NaN.
"""
scores = []
for lang in ['en', 'de', 'es', 'it', 'fa']:
score = evaluate_semeval_monolingual(vectors, lang)
scores.append(score)
top_scores = sorted(scores, key=lambda x: x['acc'] if not np.isnan(x['acc']) else 0)[-4:]
acc_average = tmean([score['acc'] for score in top_scores])
low_average = tmean([score['low'] for score in top_scores])
high_average = tmean([score['high'] for score in top_scores])
return pd.Series(
[acc_average, low_average, high_average],
index=['acc', 'low', 'high']
)
def evaluate_semeval_crosslingual_global(vectors):
"""
According to Semeval2017-Subtask2 rules. the global score is the average of the individual
scores on the six cross-lingual datasets on which the system performs best. If less than six
scores are supplied, the global score is NaN.
"""
scores = []
languages = ['en', 'de', 'es', 'it', 'fa']
for lang1, lang2 in combinations(languages, 2):
score = evaluate_semeval_crosslingual(vectors, lang1, lang2)
scores.append(score)
top_scores = sorted(scores, key=lambda x: x['acc'] if not np.isnan(x['acc']) else 0)[-6:]
acc_average = tmean([score['acc'] for score in top_scores])
low_average = tmean([score['low'] for score in top_scores])
high_average = tmean([score['high'] for score in top_scores])
return pd.Series(
[acc_average, low_average, high_average],
index=['acc', 'low', 'high']
)
def measure_correlation(correlation_function, vectors, standard, verbose=0):
"""
Tests assoc_space's ability to recognize word correlation. This function
computes the spearman correlation between assoc_space's reported word
correlation and the expected word correlation according to 'standard'.
"""
gold_scores = []
our_scores = []
for term1, term2, gold_score, lang1, lang2 in standard:
if isinstance(vectors, VectorSpaceWrapper):
uri1 = standardized_uri(lang1, term1)
uri2 = standardized_uri(lang2, term2)
our_score = vectors.get_similarity(uri1, uri2)
else:
our_score = cosine_similarity(get_vector(vectors, term1, lang1),
get_vector(vectors, term2, lang2))
if verbose > 1:
print('%s\t%s\t%3.3f\t%3.3f' % (term1, term2, gold_score, our_score))
gold_scores.append(gold_score)
our_scores.append(our_score)
correlation = correlation_function(np.array(gold_scores), np.array(our_scores))[0]
if verbose:
print("Correlation: %s" % (correlation,))
return confidence_interval(correlation, len(gold_scores))
def evaluate(frame, subset='dev', semeval_scope='global'):
"""
Evaluate a DataFrame containing term vectors on its ability to predict term
relatedness, according to MEN-3000, RW, MTurk-771, WordSim-353, and Semeval2017-Task2. Use a
VectorSpaceWrapper to fill missing vocabulary from ConceptNet.
Return a Series containing these labeled results.
"""
if subset == 'all':
men_subset = 'test'
else:
men_subset = subset
vectors = VectorSpaceWrapper(frame=frame)
men_score = measure_correlation(spearmanr, vectors, read_men3000(men_subset))
rw_score = measure_correlation(spearmanr, vectors, read_rw(subset))
mturk_score = measure_correlation(spearmanr, vectors, read_mturk())
gur350_score = measure_correlation(spearmanr, vectors, read_gurevych('350'))
zg222_score = measure_correlation(spearmanr, vectors, read_gurevych('222'))
ws_score = measure_correlation(spearmanr, vectors, read_ws353())
ws_es_score = measure_correlation(spearmanr, vectors, read_ws353_multilingual('es'))
ws_ro_score = measure_correlation(spearmanr, vectors, read_ws353_multilingual('ro'))
pku500_score = measure_correlation(spearmanr, vectors, read_pku500())
tmu_score = measure_correlation(spearmanr, vectors, read_tmu())
results = empty_comparison_table()
results.loc['men3000'] = men_score
results.loc['rw'] = rw_score
results.loc['mturk'] = mturk_score
results.loc['gur350-de'] = gur350_score
results.loc['zg222-de'] = zg222_score
results.loc['ws353'] = ws_score
results.loc['ws353-es'] = ws_es_score
results.loc['ws353-ro'] = ws_ro_score
results.loc['pku500-zh'] = pku500_score
results.loc['tmu-rw-ja'] = tmu_score
if semeval_scope == 'global':
results.loc['semeval17-2a'] = evaluate_semeval_monolingual_global(vectors)
results.loc['semeval17-2b'] = evaluate_semeval_crosslingual_global(vectors)
else:
languages = ['en', 'de', 'es', 'it', 'fa']
for lang in languages:
results.loc['semeval-2a-{}'.format(lang)] = evaluate_semeval_monolingual(vectors, lang)
for lang1, lang2 in combinations(languages, 2):
results.loc['semeval-2b-{}-{}'.format(lang1, lang2)] = evaluate_semeval_crosslingual(
vectors, lang1, lang2)
return results
def evaluate_raw(frame, subset='dev', semeval_scope='global'):
"""
Evaluate a DataFrame containing term vectors on its ability to predict term
relatedness, according to MEN-3000, RW, MTurk-771, WordSim-353, and Semeval2017-Task2. Return
a Series containing these labeled results.
"""
frame = frame.astype(np.float32)
men_score = measure_correlation(spearmanr, frame, read_men3000(subset))
rw_score = measure_correlation(spearmanr, frame, read_rw(subset))
mturk_score = measure_correlation(spearmanr, frame, read_mturk())
gur350_score = measure_correlation(spearmanr, frame, read_gurevych('350'))
zg222_score = measure_correlation(spearmanr, frame, read_gurevych('222'))
ws_score = measure_correlation(spearmanr, frame, read_ws353())
ws_es_score = measure_correlation(spearmanr, frame, read_ws353_multilingual('es'))
ws_ro_score = measure_correlation(spearmanr, frame, read_ws353_multilingual('ro'))
pku500_score = measure_correlation(spearmanr, frame, read_pku500())
tmu_score = measure_correlation(spearmanr, frame, read_tmu())
results = empty_comparison_table()
results.loc['men3000'] = men_score
results.loc['rw'] = rw_score
results.loc['mturk'] = mturk_score
results.loc['gur350-de'] = gur350_score
results.loc['zg222-de'] = zg222_score
results.loc['ws353'] = ws_score
results.loc['ws353-es'] = ws_es_score
results.loc['ws353-ro'] = ws_ro_score
results.loc['pku500-zh'] = pku500_score
results.loc['tmu-rw-ja'] = tmu_score
if semeval_scope == 'global':
results.loc['semeval17-2a'] = evaluate_semeval_monolingual_global(frame)
results.loc['semeval17-2b'] = evaluate_semeval_crosslingual_global(frame)
else:
languages = ['en', 'de', 'es', 'it', 'fa']
for lang in languages:
results.loc['semeval-2a-{}'.format(lang)] = evaluate_semeval_monolingual(frame, lang)
for lang1, lang2 in combinations(languages, 2):
results.loc['semeval-2b-{}-{}'.format(lang1, lang2)] = evaluate_semeval_crosslingual(
frame, lang1, lang2)
return results
def comparison_table():
comparisons = dict(COMPARISONS)
comparison_list = sorted(comparisons)
big_frame = pd.concat([comparisons[key] for key in comparison_list],
keys= | pd.MultiIndex.from_tuples(comparison_list) | pandas.MultiIndex.from_tuples |
import pandas
from google.cloud import bigquery
from google_pandas_load import LoadConfig
from tests.context.loaders import gpl1, gpl2, gpl3, gpl4, gpl5
from tests.context.resources import project_id, bq_client, \
dataset_ref, dataset_name
from tests.utils import BaseClassTest, populate_dataset, \
populate, populate_bucket, populate_local_folder
class DataDeliveryTest(BaseClassTest):
def test_query_to_bq(self):
l0 = [2, 3]
populate_dataset()
gpl3.load(
source='query',
destination='bq',
data_name='a0',
query='select 3 as x union all select 2 as x')
table_ref = dataset_ref.table(table_id='a0')
table = bq_client.get_table(table_ref)
df1 = bq_client.list_rows(table=table).to_dataframe()
l1 = sorted(list(df1.x))
self.assertEqual(l0, l1)
def test_bq_to_dataframe(self):
df0 = pandas.DataFrame(data={'x': ['data_a10_bq']})
populate()
df1 = gpl4.load(
source='bq',
destination='dataframe',
data_name='a10_bq')
self.assertTrue(gpl4.exist_in_bq('a10_bq'))
self.assertTrue(df0.equals(df1))
def test_gs_to_local(self):
populate_bucket()
gpl2.load(
source='gs',
destination='local',
data_name='a7')
self.assertEqual(len(gpl2.list_blob_uris('a7')), 1)
self.assertEqual(len(gpl2.list_local_file_paths('a7')), 1)
def test_local_to_dataframe(self):
l0 = ['data_a{}_local'.format(i) for i in range(10, 14)]
populate_local_folder()
df1 = gpl5.load(
source='local',
destination='dataframe',
data_name='a1')
l1 = sorted(list(df1.x))
self.assertEqual(l0, l1)
def test_query_to_dataframe(self):
df0 = | pandas.DataFrame(data={'x': [1, 1]}) | pandas.DataFrame |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
import plotly.graph_objects as go
from datetime import datetime
from datetime import timedelta
import glob
import requests
from app import db
from app.models import *
from app.plots import bp
import pandas as pd
import io
from app.api import vis
from sqlalchemy import sql
import numpy as np
from app.tools.curvefit.core.model import CurveModel
from app.tools.curvefit.core.functions import gaussian_cdf, gaussian_pdf
PHU = {'the_district_of_algoma':'The District of Algoma Health Unit',
'brant_county':'Brant County Health Unit',
'durham_regional':'Durham Regional Health Unit',
'grey_bruce':'Grey Bruce Health Unit',
'haldimand_norfolk':'Haldimand-Norfolk Health Unit',
'haliburton_kawartha_pine_ridge_district':'Haliburton, Kawartha, Pine Ridge District Health Unit',
'halton_regional':'Halton Regional Health Unit',
'city_of_hamilton':'City of Hamilton Health Unit',
'hastings_and_prince_edward_counties':'Hastings and Prince Edward Counties Health Unit',
'huron_county':'Huron County Health Unit',
'chatham_kent':'Chatham-Kent Health Unit',
'kingston_frontenac_and_lennox_and_addington':'Kingston, Frontenac, and Lennox and Addington Health Unit',
'lambton':'Lambton Health Unit',
'leeds_grenville_and_lanark_district':'Leeds, Grenville and Lanark District Health Unit',
'middlesex_london':'Middlesex-London Health Unit',
'niagara_regional_area':'Niagara Regional Area Health Unit',
'north_bay_parry_sound_district':'North Bay Parry Sound District Health Unit',
'northwestern':'Northwestern Health Unit',
'city_of_ottawa':'City of Ottawa Health Unit',
'peel_regional':'Peel Regional Health Unit',
'perth_district':'Perth District Health Unit',
'peterborough_county_city':'Peterborough County–City Health Unit',
'porcupine':'Porcupine Health Unit',
'renfrew_county_and_district':'Renfrew County and District Health Unit',
'the_eastern_ontario':'The Eastern Ontario Health Unit',
'simcoe_muskoka_district':'Simcoe Muskoka District Health Unit',
'sudbury_and_district':'Sudbury and District Health Unit',
'thunder_bay_district':'Thunder Bay District Health Unit',
'timiskaming':'Timiskaming Health Unit',
'waterloo':'Waterloo Health Unit',
'wellington_dufferin_guelph':'Wellington-Dufferin-Guelph Health Unit',
'windsor_essex_county':'Windsor-Essex County Health Unit',
'york_regional':'York Regional Health Unit',
'southwestern':'Southwestern Public Health Unit',
'city_of_toronto':'City of Toronto Health Unit',
'huron_perth_county':'Huron Perth Public Health Unit'}
def get_dir(data, today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + data['stage'] + '/'
load_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_name = data['table_name'] + '_' + today + '.' + data['type']
file_path = load_dir + '/' + file_name
return load_dir, file_path
def get_file(data):
load_dir, file_path = get_dir(data)
files = glob.glob(load_dir + "/*." + data['type'])
files = [file.split('_')[-1] for file in files]
files = [file.split('.csv')[0] for file in files]
dates = [datetime.strptime(file, '%Y-%m-%d') for file in files]
max_date = max(dates).strftime('%Y-%m-%d')
load_dir, file_path = get_dir(data, max_date)
return file_path
## Tests
def new_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New tests'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New tests'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New tests'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New tests'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New tests'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Tests<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="new tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Total tested'].tail(1).values[0],
number = {'font': {'size': 60}},
))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Total tested'],line=dict(color='#5E5AA1',dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['Total tested'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Total tested'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Tested<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def tested_positve_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New Positive pct'].notna()]
temp = df.loc[df['New Positive pct'] > 0]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New Positive pct'].tail(1).values[0]*100,
number = {'font': {'size': 60}}
))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New Positive pct'],line=dict(color='#FFF', dash='dot'),visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New Positive pct'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New Positive pct'].iloc[-2]*100,
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text': f"Percent Positivity<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tested positive").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def under_investigation_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Under Investigation'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Under Investigation'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['Under Investigation'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Under Investigation'].iloc[-2],
'increasing': {'color':'grey'},
'decreasing': {'color':'grey'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Under Investigation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="under investigation").first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Hospital
def in_hospital_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Hospitalized'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Hospitalized'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Hospitalized'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Hospitalized'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In Hospital<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="in hospital", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def in_icu_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['ICU'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['ICU'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['ICU'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'title' : {"text": f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>"},
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':"",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def on_ventilator_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':False},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive_ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive_ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive_ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Cases
def new_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
import os
# Input path for image frames location
folder = '/Users/anshulbansal/Downloads/MPI-Sintel-complete/training'
flow_path = '/Users/anshulbansal/Downloads/MPI-Sintel-complete/training'
data1 = []
files1 = [f for f in os.listdir(folder + '/final/alley_1/')] # Change folder name for creating df
files1.sort()
files2 = [f for f in os.listdir(flow_path + '/flow/alley_1/')] # Change folder name for creating df
files2.sort()
for i in range(len(files1) - 2):
curr = ["alley1/" + files1[i], "alley1/" + files1[i + 1], "alley1/" + files1[i + 2], "alley1/" + files2[i]]
data1.append(curr)
df1 = | pd.DataFrame(data1, columns=["img1", "img2", "img3", "flow"]) | pandas.DataFrame |
import pandas as pd
import sys
import os
import holoviews as hv
from holoviews import opts, dim, Palette
import configparser
# Initializes the figures path in webpage for the diagram output
class InteractivePlots:
def __init__(self, path, ping_file_path, speed_test_file_path):
self.path = path
self.ping_file_path = ping_file_path
self.speed_test_file_name = speed_test_file_path
# Define default layout of graphs
hv.extension('bokeh')
opts.defaults(
opts.Bars(xrotation=45, tools=['hover']),
opts.BoxWhisker(width=700, xrotation=30, box_fill_color=Palette('Category20')),
opts.Curve(width=700, tools=['hover']),
opts.GridSpace(shared_yaxis=True),
opts.Scatter(width=700, height=500, color=Palette('Category20'), size=dim('growth')+5, tools=['hover'],alpha=0.5, cmap='Set1'),
opts.NdOverlay(legend_position='left'))
if os.path.isdir(os.path.join(self.path, "webpage","figures")) is False:
os.mkdir(os.path.join(self.path, "webpage","figures"))
print("Path 'figures' created successfully")
else:
print("Path 'figures' initialized")
# Load basic configurations
config = configparser.ConfigParser()
try:
config.read('./modules/config_a.ini')
# Get values from configuration file
self.upper_acceptable_ping_bound = float(config['DEFAULT']['upper_acceptable_ping_bound'])
self.upper_ping_issue_bound = float(config['DEFAULT']['upper_ping_issue_bound'])
self.acceptable_network_speed = float(config['DEFAULT']['acceptable_network_speed'])
except:
# In case no config-file is found or another reading error occured
print("Configuration file not found/readable.")
print("Creating a new configuration file.")
# Creating new file with standard values
config['DEFAULT'] = {'upper_acceptable_ping_bound': '10',
'upper_ping_issue_bound': '99999',
'acceptable_network_speed': '16'}
with open('config_a.ini', 'w') as configfile:
config.write(configfile)
print("New configuration file was created. Running on default parameters, please restart for changes.")
#set default values to continue with program
self.upper_acceptable_ping_bound = float(config['DEFAULT']['upper_acceptable_ping_bound'])
self.upper_ping_issue_bound = float(config['DEFAULT']['upper_ping_issue_bound'])
self.acceptable_network_speed = float(config['DEFAULT']['acceptable_network_speed'])
def updateTestVariables(self, path, ping_file_path, speed_test_file_path):
self.path = path
self.ping_file_path = ping_file_path
self.speed_test_file_name = speed_test_file_path
def read_csv(self):
# try:
self.df_ping = | pd.read_csv(self.ping_file_path, index_col=0) | pandas.read_csv |
"""
A more involved example showing what data processing is possible, see: https://arxiv.org/pdf/2010.13625.pdf for the original transform
The script is possible to run as a python live script for example in Visual Studio Code.
"""
# %% Imports
from matplotlib import projections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %% Load data
df_2018 = pd.read_fwf("C:/Users/Adrian/Documents/NVI/IcrfTool/data/icrf3_src_posn", skiprows = 23, header = None)
columns_2018 = ["ICRF", "ICRF_Designation", "IERS_Destignation", "Defining_source", "Right_Ascension_h", "Right_Ascension_m", "Right_Ascension_s", "Declination_o", "Declination_prime", "Declination_bis", "Right_Ascention_Uncertainty_s", "Declination_Uncertainty_bis", "Correlation", "Mean_MJD", "First_MJD", "Last_MJD", "Nb_sess", "Nb_del", "Nb_rat"]
df_2018.columns = columns_2018
#df_2018 = df_2018[~df_2018.Defining_source.isna()]
columns_2009 = ["ICRF", "ICRF_Designation", "IERS_Destignation", "Defining_source", "Right_Ascension_h", "Right_Ascension_m", "Right_Ascension_s", "Declination_o", "Declination_prime", "Declination_bis", "Right_Ascention_Uncertainty_s", "Declination_Uncertainty_bis", "Correlation", "Mean_MJD", "First_MJD", "Last_MJD", "Nb_sess", "Nb_del"]
df_2009 = pd.read_fwf("C:/Users/Adrian/Documents/NVI/IcrfTool/data/icrf2_src_posn", skiprows = 23, header = None)
df_2009.columns = columns_2009
#df_2009 = df_2009[~df_2009.Defining_source.isna()]
# %% Transform data
def transform(df):
new_df = pd.DataFrame(
data = {"alpha" : df.Right_Ascension_h * 2*np.pi/24 + df.Right_Ascension_m * 2*np.pi/(24*60) + df.Right_Ascension_s * 2*np.pi/(24*60*60), #radians
"alpha_sigma" : df.Right_Ascention_Uncertainty_s * 360/(24*60*60), #degrees
"delta" : df.Declination_o * 2*np.pi/360 + df.Declination_prime * 2*np.pi/(360*60) + df.Declination_bis * 2*np.pi/(360*60*60), #radians
"delta_sigma" : df.Declination_Uncertainty_bis * 1/(60*60),
"Correlation" : df.Correlation}) #degrees
new_df.index = df.IERS_Destignation
return new_df
df_2009_transformed = transform(df_2009)
df_2018_transformed = transform(df_2018)
df = | pd.merge(df_2009_transformed, df_2018_transformed, how = "left", left_index=True, right_index=True, suffixes = ("_2009", "_2018")) | pandas.merge |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import progressbar
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, BayesianRidge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import Imputer, StandardScaler, OneHotEncoder
from .encoders import EncodeCategorical
def all_algorithms(func):
def wrapper(*args, **kwargs):
with progressbar.ProgressBar(max_value=len(args[0].algorithms)) as pbar:
for i, algorithm in enumerate(args[0].algorithms):
kwargs['algorithm'] = algorithm
func(*args, **kwargs)
pbar.update(i+1)
return wrapper
class AutoLearn(object):
def __init__(self, encode_categoricals=False, onehot=False, impute=False, standardize=False, decompose=False,
impute_strategy='mean', missing_values='NaN', target=None, id_col=None, error_metric='rmse',
algorithms={'linear', 'ridge', 'lasso', 'bayes', 'bayes_ridge', 'boost', 'forest'}):
impute_strategy_types = {'mean', 'median', 'most_frequent'}
assert impute_strategy in impute_strategy_types,\
'Strategy must be one of the following: {} {} {}'.format('mean', 'median', 'most_frequent')
self.encode_categoricals = encode_categoricals
self.onehot = onehot
self.impute = impute
self.impute_strategy = impute_strategy
self.missing_values = missing_values
self.standardize = standardize
self.decompose = decompose
self.target = target
self.id_col = id_col
self.error_metric = error_metric
self.model = {}
self.algorithms = algorithms
self.encoder_label = None
self.imputer = None
self.encoder_onehot = None
self.scaler = None
self.pca = None
for i, algorithm in enumerate(self.algorithms):
self.model[algorithm] = {}
def process_training_data(self, filename):
training_data = | pd.read_csv(filename, sep=',') | pandas.read_csv |
from glob import glob
import datetime, os, pickle, shutil
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from cdips.plotting import vetting_pdf as vp
from cdips.vetting import (
centroid_analysis as cdva,
initialize_neighborhood_information as ini
)
from numpy import array as nparr
from astropy.io import fits
from datetime import datetime
from astropy.coordinates import SkyCoord
from astropy import units as u
from astroquery.vizier import Vizier
DEBUG = 0
def make_vetting_multipg_pdf(tfa_sr_path, lcpath, outpath, mdf, sourceid,
supprow, suppfulldf, pfdf, pfrow, toidf, sector,
k13_notes_df, mask_orbit_edges=True, nworkers=40,
show_rvs=False):
"""
args:
tfa_sr_path: path to signal-reconstructed TFA lightcurve.
lcpath: path to "main" lightcurve.
outpath: where we're saving the vetting pdf, by default (if
isobviouslynottransit is False)
mdf: single row dataframe from CDIPS source catalog
supprow: dataframe with LC statistics, Gaia xmatch info, CDIPS xmatch
info. Cut to match the sourceid of whatever object is getting the PDF
made. Columns include:
'lcobj', 'cat_mag', 'med_rm1', 'mad_rm1', 'mean_rm1', 'stdev_rm1',
'ndet_rm1', 'med_sc_rm1', 'mad_sc_rm1', 'mean_sc_rm1',
...
'#Gaia-ID[1]', 'RA[deg][2]', 'Dec[deg][3]', 'RAError[mas][4]',
'DecError[mas][5]', 'Parallax[mas][6]', 'Parallax_error[mas][7]',
'PM_RA[mas/yr][8]', 'PM_Dec[mas/year][9]', 'PMRA_error[mas/yr][10]',
'PMDec_error[mas/yr][11]', 'Ref_Epoch[yr][12]', 'phot_g_mean_mag[20]',
'phot_bp_mean_mag[25]', 'phot_rp_mean_mag[30]', 'radial_velocity[32]',
'radial_velocity_error[33]', 'teff_val[35]',
'teff_percentile_lower[36]', 'teff_percentile_upper[37]', 'a_g_val[38]',
'a_g_percentile_lower[39]', 'a_g_percentile_upper[40]',
'e_bp_min_rp_val[41]', 'e_bp_min_rp_percentile_lower[42]',
'e_bp_min_rp_percentile_upper[43]', 'radius_val[44]',
'radius_percentile_lower[45]', 'radius_percentile_upper[46]',
'lum_val[47]', 'lum_percentile_lower[48]', 'lum_percentile_upper[49]'
...
'cluster', 'ext_catalog_name', 'reference', 'source_id'
suppfulldf: as above, but the whole dataframe for all CDIPS sources
that got lightcurves for this sector. Useful for broader assessment of
the LC within the sample of cluster lightcurves.
pfdf: dataframe with period finding results for everything from this
sector. good to check on matching ephemerides.
toidf: dataframe with alerted TOI results
"""
hdul_sr = fits.open(tfa_sr_path)
hdul = fits.open(lcpath)
lc_sr = hdul_sr[1].data
lc, hdr = hdul[1].data, hdul[0].header
# define "detrended mag". by default, this is the TFASR signal. however,
# if residual stellar variability was found after TFA detrending, then,
# this is defined as the RAW LC + penalized spline detrending.
# NOTE: a possible hack to force detrending from the raw LC can be set
# below. By default, it's not.
# pfrow['pspline_detrended'].iloc[0] = True
is_pspline_dtr = bool(pfrow['pspline_detrended'].iloc[0])
# Create the PdfPages object to which we will save the pages...
with PdfPages(outpath) as pdf:
##########
# page 1
##########
fluxap = 'IRM2' if is_pspline_dtr else 'TFASR2'
tfaap = 'TFA2' if is_pspline_dtr else 'TFASR2'
try:
fig, tlsp, _ = vp.two_periodogram_checkplot(
lc_sr, hdr, supprow, pfrow, mask_orbit_edges=mask_orbit_edges,
fluxap=fluxap, nworkers=nworkers)
except Exception as e:
# NOTE: if this is raised, probably a detrending singularity issue.
# perhaps fine-tune the cutoff further.
raise(e)
return
pdf.savefig(fig)
plt.close()
if pd.isnull(tlsp):
return
##########
# page 2
##########
ap_index=2
time, rawmag, tfasrmag, bkgdval, tfatime = (
lc['TMID_BJD'],
lc['IRM2'],
lc_sr[tfaap],
lc['BGV'],
lc_sr['TMID_BJD']
)
t0, per = tlsp['tlsresult'].T0, tlsp['tlsresult'].period
midtimes = np.array([t0 + ix*per for ix in range(-100,100)])
obsd_midtimes = midtimes[ (midtimes > np.nanmin(time)) &
(midtimes < np.nanmax(time)) ]
tmag = hdr['TESSMAG']
customstr = '\nT = {:.1f}'.format(float(tmag))
fig = vp.plot_raw_tfa_bkgd(time, rawmag, tfasrmag, bkgdval, ap_index,
supprow, pfrow,
obsd_midtimes=obsd_midtimes,
xlabel='BJDTDB', customstr=customstr,
tfatime=tfatime, is_tfasr=True,
figsize=(30,20))
pdf.savefig(fig)
plt.close()
##########
# page 3 -- it's a QLP ripoff
##########
fig, infodict = vp.transitcheckdetails(
rawmag, time, tlsp, mdf, hdr, supprow, pfrow,
obsd_midtimes=obsd_midtimes, tfamag=tfasrmag, tfatime=tfatime,
figsize=(30,20)
)
pdf.savefig(fig)
plt.close()
##########
# page 4
##########
fig, apdict = vp.scatter_increasing_ap_size(
lc_sr, pfrow, infodict=infodict, obsd_midtimes=obsd_midtimes,
customstr=customstr, xlabel='BJDTDB', figsize=(30,20),
auto_depth_vs_apsize=True
)
pdf.savefig(fig)
plt.close()
##########
# page 5
##########
fig, mmbr_dict = vp.cluster_membership_check(hdr, supprow, infodict,
suppfulldf, mdf,
k13_notes_df,
figsize=(30,16))
pdf.savefig(fig)
plt.close()
##########
# page 6
##########
ra_obj, dec_obj = hdr['RA_OBJ'], hdr['DEC_OBJ']
c_obj = SkyCoord(ra_obj, dec_obj, unit=(u.deg), frame='icrs')
t0,per,dur,sourceid = (float(pfrow['tls_t0']),
float(pfrow['tls_period']),
float(pfrow['tls_duration']),
int(pfrow['source_id'].iloc[0]) )
outdir = ("/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_cutouts/"
"sector-{}_TFA_SR_pkl".format(sector))
if not os.path.exists(outdir):
os.mkdir(outdir)
cd = cdva.measure_centroid(t0,per,dur,sector,sourceid,c_obj,outdir)
catalog_to_gaussian_sep_arcsec = None
if isinstance(cd, dict):
#
# check whether the measured ephemeris matches other TCEs. cutoffs
# below came by looking at distribution of errors on the QLP quoted
# parameters.
#
tls_period, tls_t0 = nparr(pfdf['tls_period']), nparr(pfdf['tls_t0'])
ras, decs = nparr(pfdf['ra_x']), nparr(pfdf['dec_x'])
coords = SkyCoord(ras, decs, unit=(u.deg), frame='icrs')
seps_px = c_obj.separation(coords).to(u.arcsec).value/21
period_cutoff = 2e-3 # about 3 minutes
t0_cutoff = 5e-3 # 7 minutes
close_per = np.abs(tls_period - per) < period_cutoff
close_t0 = np.abs(tls_t0 - t0) < t0_cutoff
is_close = close_per & close_t0
if len(seps_px[is_close]) > 1:
_pfdf = pfdf.loc[is_close]
_pfdf['seps_px'] = seps_px[is_close]
_pfdf = _pfdf[_pfdf['source_id'] != sourceid]
else:
_pfdf = None
fig, catalog_to_gaussian_sep_arcsec = (
vp.centroid_plots(c_obj, cd, hdr, _pfdf, toidf, figsize=(30,24))
)
if fig is not None:
pdf.savefig(fig)
plt.close()
if not isinstance(catalog_to_gaussian_sep_arcsec, float):
catalog_to_gaussian_sep_arcsec = 0
infodict['catalog_to_gaussian_sep_arcsec'] = (
catalog_to_gaussian_sep_arcsec
)
##########
# page 7
##########
info = (
ini.get_group_and_neighborhood_information(
sourceid, mmbr_dict=mmbr_dict, k13_notes_df=k13_notes_df,
overwrite=0)
)
if isinstance(info, tuple):
if DEBUG:
picklepath = 'nbhd_info_{}.pkl'.format(sourceid)
with open(picklepath , 'wb') as f:
pickle.dump(info, f)
print('made {}'.format(picklepath))
(targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min, pmdec_max, pmra_min, pmra_max,
group_in_k13, group_in_cg18, group_in_kc19, group_in_k18
) = info
fig = vp.plot_group_neighborhood(
targetname, groupname, group_df_dr2, target_df, nbhd_df,
cutoff_probability, pmdec_min=pmdec_min, pmdec_max=pmdec_max,
pmra_min=pmra_min, pmra_max=pmra_max,
group_in_k13=group_in_k13, group_in_cg18=group_in_cg18,
group_in_kc19=group_in_kc19, group_in_k18=group_in_k18,
source_id=sourceid, figsize=(30,20), show_rvs=show_rvs
)
pdf.savefig(fig)
plt.close()
elif info is None:
info = ini.get_neighborhood_information(sourceid, overwrite=0,
min_n_nbhrs=1000)
if info is not None:
(targetname, groupname, target_df, nbhd_df, pmdec_min,
pmdec_max, pmra_min, pmra_max) = info
fig = vp.plot_neighborhood_only(
targetname, groupname, target_df, nbhd_df,
pmdec_min=pmdec_min, pmdec_max=pmdec_max,
pmra_min=pmra_min, pmra_max=pmra_max,
source_id=sourceid, figsize=(30,20),
)
pdf.savefig(fig)
plt.close()
else:
raise NotImplementedError
##########
# set the file's metadata via the PdfPages object:
##########
d = pdf.infodict()
d['Title'] = 'CDIPS vetting report for GAIADR2-{}'.format(sourceid)
d['Author'] = '<NAME>'
d['Keywords'] = 'stars | planets'
d['CreationDate'] = datetime.today()
d['ModDate'] = datetime.today()
picklepath = outpath.replace('pdfs','pkls').replace('.pdf','.pkl')
with open(picklepath,'wb') as f:
pickle.dump(infodict, f)
print('made {}'.format(picklepath))
##########
# check if is obviously nottransit. this will be the case if:
# * ndet_tf2 < 100
# * depth < 0.85
# * rp > 6 R_jup = 67.25 R_earth. Based on 2x the limit given at 1Myr
# in Burrows+2001, figure 3.
# * rp > 3 R_jup and age > 1Gyr
# * SNR from TLS is < 8 (these will be unbelievable no matter what.
# they might exist b/c TFA SR could have lowered overall SDE/SNR)
# * primary/secondary depth ratios from gaussian fitting in range of
# ~1.2-5, accounting for 1-sigma formal uncertainty in measurement
# * the OOT - intra image gaussian fit centroid is > 2 pixels off the
# catalog position.
# * Kharchenko+2013 gave a cluster parallax, and the GaiaDR2 measured
# parallax is >5 sigma away from it. (these are 99% of the time
# backgruond stars).
##########
time_key = 'logt' if 'logt' in list(supprow.columns) else 'k13_logt'
logt = str(supprow[time_key].iloc[0])
if not | pd.isnull(logt) | pandas.isnull |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in | pd.date_range("2015-01-12", "2015-01-16") | pandas.date_range |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import geopandas as gpd
import pandas as pd
import numpy as np
from .grids import GPS_to_grid
from .preprocess import id_reindex, clean_same
def plot_activity(data, col=['stime', 'etime', 'LONCOL', 'LATCOL'],
figsize=(10, 5), dpi=250):
'''
Plot the activity plot of individual
Parameters
----------------
data : DataFrame
activity information of one person
col : List
The column name.[starttime,endtime,LONCOL,LATCOL] of activities
'''
stime, etime, LONCOL, LATCOL = col
activity = data.copy()
activity['date'] = activity[stime].dt.date
dates = list(activity['date'].astype(str).drop_duplicates())
dates_all = []
minday = min(dates)
maxday = max(dates)
import datetime
thisdate = minday
while thisdate != maxday:
dates_all.append(thisdate)
thisdate = str((pd.to_datetime(thisdate+' 00:00:00') +
datetime.timedelta(days=1)).date())
dates = dates_all
import matplotlib.pyplot as plt
import numpy as np
activity['duration'] = (activity[etime]-activity[stime]).dt.total_seconds()
activity = activity[-activity['duration'].isnull()]
import time
activity['ststmp'] = activity[stime].astype(str).apply(
lambda x: time.mktime(
time.strptime(x, '%Y-%m-%d %H:%M:%S'))).astype('int64')
activity['etstmp'] = activity[etime].astype(str).apply(
lambda x: time.mktime(
time.strptime(x, '%Y-%m-%d %H:%M:%S'))).astype('int64')
activityinfo = activity[[LONCOL, LATCOL]].drop_duplicates()
indexs = list(range(1, len(activityinfo)+1))
np.random.shuffle(indexs)
activityinfo['index'] = indexs
import matplotlib as mpl
norm = mpl.colors.Normalize(vmin=0, vmax=len(activityinfo))
from matplotlib.colors import ListedColormap
import seaborn as sns
cmap = ListedColormap(sns.hls_palette(
n_colors=len(activityinfo), l=.5, s=0.8))
plt.figure(1, figsize, dpi)
ax = plt.subplot(111)
plt.sca(ax)
for day in range(len(dates)):
plt.bar(day, height=24*3600, bottom=0, width=0.4, color=(0, 0, 0, 0.1))
stime = dates[day]+' 00:00:00'
etime = dates[day]+' 23:59:59'
bars = activity[(activity['stime'] < etime) &
(activity['etime'] > stime)].copy()
bars['ststmp'] = bars['ststmp'] - \
time.mktime(time.strptime(stime, '%Y-%m-%d %H:%M:%S'))
bars['etstmp'] = bars['etstmp'] - \
time.mktime(time.strptime(stime, '%Y-%m-%d %H:%M:%S'))
for row in range(len(bars)):
plt.bar(day,
height=bars['etstmp'].iloc[row]-bars['ststmp'].iloc[row],
bottom=bars['ststmp'].iloc[row],
color=cmap(
norm(
activityinfo[
(activityinfo[LONCOL] == bars[LONCOL].
iloc[row]) &
(activityinfo[LATCOL] ==
bars[LATCOL].iloc[row])
]['index'].iloc[0])))
plt.xlim(-0.5, len(dates))
plt.ylim(0, 24*3600)
plt.xticks(range(len(dates)), [i[-5:] for i in dates])
plt.yticks(range(0, 24*3600+1, 3600),
pd.DataFrame({'t': range(0, 25)})['t'].astype('str')+':00')
plt.show()
def traj_stay_move(data, params,
col=['ID', 'dataTime', 'longitude', 'latitude'],
activitytime=1800):
'''
Input trajectory data and gridding parameters, identify stay and move
Parameters
----------------
data : DataFrame
trajectory data
params : List
gridding parameters
col : List
The column name, in the order of ['ID','dataTime','longitude',
'latitude']
activitytime : Number
How much time to regard as activity
Returns
----------------
stay : DataFrame
stay information
move : DataFrame
move information
'''
uid, timecol, lon, lat = col
trajdata = data.copy()
trajdata[timecol] = pd.to_datetime(trajdata[timecol])
trajdata['LONCOL'], trajdata['LATCOL'] = GPS_to_grid(
trajdata[lon], trajdata[lat], params)
trajdata = clean_same(trajdata, col=[uid, timecol, 'LONCOL', 'LATCOL'])
trajdata['stime'] = trajdata[timecol]
trajdata['etime'] = trajdata[timecol].shift(-1)
trajdata[uid+'_next'] = trajdata[uid].shift(-1)
trajdata = trajdata[trajdata[uid+'_next'] == trajdata[uid]]
trajdata['duration'] = (
trajdata['etime'] - trajdata['stime']).dt.total_seconds()
activity = trajdata[[uid, lon, lat, 'stime',
'etime', 'duration', 'LONCOL', 'LATCOL']]
activity = activity[activity['duration'] >= activitytime].rename(
columns={lon: 'lon', lat: 'lat'})
stay = activity.copy()
activity['stime_next'] = activity['stime'].shift(-1)
activity['elon'] = activity['lon'].shift(-1)
activity['elat'] = activity['lat'].shift(-1)
activity['ELONCOL'] = activity['LONCOL'].shift(-1)
activity['ELATCOL'] = activity['LATCOL'].shift(-1)
activity[uid+'_next'] = activity[uid].shift(-1)
activity = activity[activity[uid+'_next'] == activity[uid]
].drop(['stime', 'duration', uid+'_next'], axis=1)
activity = activity.rename(columns={'lon': 'slon',
'lat': 'slat',
'etime': 'stime',
'stime_next': 'etime',
'LONCOL': 'SLONCOL',
'LATCOL': 'SLATCOL',
})
activity['duration'] = (
activity['etime'] - activity['stime']).dt.total_seconds()
move = activity.copy()
return stay, move
def traj_densify(data, col=['Vehicleid', 'Time', 'Lng', 'Lat'], timegap=15):
'''
Trajectory densification, ensure that there is a trajectory point each
timegap seconds
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the sequence of [Vehicleid, Time, lng, lat]
timegap : number
The sampling interval (second)
Returns
-------
data1 : DataFrame
The processed data
'''
Vehicleid, Time, Lng, Lat = col
data[Time] = pd.to_datetime(data[Time])
data1 = data.copy()
data1 = data1.drop_duplicates([Vehicleid, Time])
data1 = id_reindex(data1, Vehicleid)
data1 = data1.sort_values(by=[Vehicleid+'_new', Time])
data1['utctime'] = data1[Time].apply(lambda r: int(r.value/1000000000))
data1['utctime_new'] = data1[Vehicleid+'_new']*10000000000+data1['utctime']
a = data1.groupby([Vehicleid+'_new']
)['utctime'].min().rename('mintime').reset_index()
b = data1.groupby([Vehicleid+'_new']
)['utctime'].max().rename('maxtime').reset_index()
minmaxtime = pd.merge(a, b)
mintime = data1['utctime'].min()
maxtime = data1['utctime'].max()
timedata = pd.DataFrame(range(mintime, maxtime, timegap), columns=[Time])
timedata['tmp'] = 1
minmaxtime['tmp'] = 1
minmaxtime = pd.merge(minmaxtime, timedata)
minmaxtime = minmaxtime[(minmaxtime['mintime'] <= minmaxtime[Time]) & (
minmaxtime['maxtime'] >= minmaxtime[Time])]
minmaxtime['utctime_new'] = minmaxtime[Vehicleid+'_new'] * \
10000000000+minmaxtime[Time]
minmaxtime[Time] = pd.to_datetime(minmaxtime[Time], unit='s')
data1 = pd.concat([data1, minmaxtime[['utctime_new', Time]]]
).sort_values(by=['utctime_new'])
data1 = data1.drop_duplicates(['utctime_new'])
data1[Lng] = data1.set_index('utctime_new')[
Lng].interpolate(method='index').values
data1[Lat] = data1.set_index('utctime_new')[
Lat].interpolate(method='index').values
data1[Vehicleid] = data1[Vehicleid].ffill()
data1[Vehicleid] = data1[Vehicleid].bfill()
data1 = data1.drop([Vehicleid+'_new', 'utctime', 'utctime_new'], axis=1)
return data1
def traj_sparsify(data, col=['Vehicleid', 'Time', 'Lng', 'Lat'], timegap=15,
method='subsample'):
'''
Trajectory sparsify. When the sampling frequency of trajectory data is too
high, the amount of data is too large, which is not convenient for the
analysis of some studies that require less data frequency. This function
can expand the sampling interval and reduce the amount of data.
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the sequence of [Vehicleid, Time, lng, lat]
timegap : number
Time gap between trajectory point
method : str
'interpolate' or 'subsample'
Returns
-------
data1 : DataFrame
Sparsified trajectory data
'''
Vehicleid, Time, Lng, Lat = col
data[Time] = pd.to_datetime(data[Time], unit='s')
data1 = data.copy()
data1 = data1.drop_duplicates([Vehicleid, Time])
data1 = id_reindex(data1, Vehicleid)
data1 = data1.sort_values(by=[Vehicleid+'_new', Time])
data1['utctime'] = data1[Time].apply(lambda r: int(r.value/1000000000))
data1['utctime_new'] = data1[Vehicleid+'_new']*10000000000+data1['utctime']
if method == 'interpolate':
a = data1.groupby([Vehicleid+'_new']
)['utctime'].min().rename('mintime').reset_index()
b = data1.groupby([Vehicleid+'_new']
)['utctime'].max().rename('maxtime').reset_index()
minmaxtime = pd.merge(a, b)
mintime = data1['utctime'].min()
maxtime = data1['utctime'].max()
timedata = pd.DataFrame(
range(mintime, maxtime, timegap), columns=[Time])
timedata['tmp'] = 1
minmaxtime['tmp'] = 1
minmaxtime = pd.merge(minmaxtime, timedata)
minmaxtime = minmaxtime[(minmaxtime['mintime'] <= minmaxtime[Time]) & (
minmaxtime['maxtime'] >= minmaxtime[Time])]
minmaxtime['utctime_new'] = minmaxtime[Vehicleid+'_new'] * \
10000000000+minmaxtime[Time]
minmaxtime[Time] = pd.to_datetime(minmaxtime[Time], unit='s')
data1 = pd.concat([
data1, minmaxtime[['utctime_new', Time]]
]).sort_values(by=['utctime_new'])
data1 = data1.drop_duplicates(['utctime_new'])
data1[Lng] = data1.set_index('utctime_new')[
Lng].interpolate(method='index').values
data1[Lat] = data1.set_index('utctime_new')[
Lat].interpolate(method='index').values
data1[Vehicleid] = data1[Vehicleid].ffill()
data1[Vehicleid] = data1[Vehicleid].bfill()
data1 = | pd.merge(minmaxtime['utctime_new'], data1) | pandas.merge |
from piper.custom import ratio
import datetime
import numpy as np
import pandas as pd
import pytest
from time import strptime
from piper.custom import add_xl_formula
from piper.factory import sample_data
from piper.factory import generate_periods, make_null_dates
from piper.custom import from_julian
from piper.custom import fiscal_year
from piper.custom import from_excel
from piper.custom import to_julian
from piper.verbs import across
# t_sample_data {{{1
@pytest.fixture
def t_sample_data():
return sample_data()
# test_add_xl_formula {{{1
def test_add_xl_formula(t_sample_data):
df = t_sample_data
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
expected = (367, )
assert expected == df.X7.shape
# test_across_str_date_single_col_pd_to_datetime {{{1
def test_across_str_date_single_col_pd_to_datetime():
''' '''
test = ['30/11/2019', '29/4/2019', '30/2/2019', '28/2/2019', '2019/4/30']
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', pd.to_datetime, format='%d/%m/%Y', errors='coerce')
assert exp.equals(got) == True
# test_across_str_date_single_col_lambda {{{1
def test_across_str_date_single_col_lambda():
''' '''
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, 'dates', convert_date)
assert exp.equals(got) == True
# test_across_raise_column_parm_none_ValueError {{{1
def test_across_raise_column_parm_none():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
got = across(got, columns=None, function=convert_date)
assert exp.equals(got) == True
# test_across_raise_function_parm_none_ValueError {{{1
def test_across_raise_function_parm_none_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='dates', function=None)
# test_across_raise_Series_parm_TypeError {{{1
def test_across_raise_Series_parm_TypeError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(TypeError):
got = across(pd.Series(test), columns='dates', function=convert_date)
# test_across_raise_column_parm_ValueError {{{1
def test_across_raise_column_parm_ValueError():
convert_date = lambda x: pd.to_datetime(x, dayfirst=True, format='%d%m%Y', errors='coerce')
test = [30112019, 2942019, 3022019, 2822019, 2019430]
got = pd.DataFrame(test, columns=['dates'])
# Convert expected values to datetime format
exp = ['30/11/2019', '29/4/2019', pd.NaT, '28/2/2019', pd.NaT]
exp = pd.DataFrame(exp, columns=['dates'])
exp.dates = exp.dates.astype('datetime64[ns]')
with pytest.raises(ValueError):
got = across(got, columns='invalid', function=convert_date)
# test_across_dataframe_single_column_with_lambda {{{1
def test_across_dataframe_single_column_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
got = across(df, columns='effective', function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_with_lambda {{{1
def test_across_dataframe_multiple_columns_with_lambda():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
got = across(df, columns=['effective', 'expired'], function=convert_date)
assert exp.equals(got) == True
# test_across_dataframe_multiple_columns_raise_invalid_column {{{1
def test_across_dataframe_multiple_columns_raise_invalid_column():
convert_date = lambda x: x.strftime('%b %-d, %Y') if not x is pd.NaT else x
df = generate_periods(delta_range=(1, 10), rows=20)
df = make_null_dates(df, null_values_percent=.2)
exp = df.copy(deep=True)
exp.effective = exp.effective.apply(convert_date)
exp.expired = exp.expired.apply(convert_date)
with pytest.raises(ValueError):
got = across(df, columns=['effective', 'invalid'], function=convert_date)
# test_dividing_numbers {{{1
def test_dividing_numbers():
''' '''
exp = 1
got = ratio(2, 2)
assert exp == got
# test_dividing_numbers_by_zero {{{1
def test_dividing_numbers_by_zero():
''' '''
exp = np.inf
got = ratio(2, 0)
assert exp == got
# test_dividing_numbers_floats {{{1
def test_dividing_numbers_floats():
''' '''
exp = 1.0
got = ratio(2.0, 2.0)
assert exp == got
# test_dividing_numbers_float_percent {{{1
def test_dividing_numbers_float_percent():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True)
assert exp == got
# test_dividing_numbers_float_percent_with_round {{{1
def test_dividing_numbers_float_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2.0, 2.0, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1.0, 2.0, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_int_percent_with_round {{{1
def test_dividing_numbers_int_percent_with_round():
''' '''
exp = 100.0000
got = ratio(2, 2, percent=True, format=False, precision=4)
assert exp == got
exp = 50.00
got = ratio(1, 2, percent=True, format=False, precision=2)
assert exp == got
# test_dividing_numbers_percent_with_format {{{1
def test_dividing_numbers_percent_with_format():
''' '''
exp = '100.0%'
got = ratio(2.0, 2.0, percent=True, format=True)
assert exp == got
# test_dividing_numbers_percent_with_precision_format {{{1
def test_dividing_numbers_percent_with_precision_format():
''' '''
exp = '66.66%'
got = ratio(1.3333, 2.0, percent=True,
precision=2, format=True)
assert exp == got
# test_dividing_by_two_series {{{1
def test_dividing_by_two_series():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 2, 3])
exp = pd.Series([10, 10, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_zero_denominator {{{1
def test_dividing_by_two_series_with_zero_denominator():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1, 0, 3])
exp = pd.Series([10, np.inf, 10], dtype=float)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_decimals {{{1
def test_dividing_by_two_series_with_decimals():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1.3, 5.4, 3])
exp = (s1 / s2).round(2)
got = ratio(s1, s2)
assert exp.equals(got)
# test_dividing_by_two_series_with_rounding {{{1
def test_dividing_by_two_series_with_rounding():
''' '''
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([1.3, 5.4, 3])
exp = (s1 / s2).round(2)
got = ratio(s1, s2, precision=2)
assert exp.equals(got)
exp = (s1 / s2).round(4)
got = ratio(s1, s2, precision=4)
assert exp.equals(got)
# test_dividing_by_two_series_with_format {{{1
def test_dividing_by_two_series_with_format():
s1 = pd.Series([10, 20, 30])
s2 = pd.Series([100, 200, 300])
exp = pd.Series(['10.0%', '10.0%', '10.0%'])
got = ratio(s1, s2, precision=2, percent=True, format=True)
assert exp.equals(got)
# test_fiscal_year {{{1
def test_fiscal_year():
assert fiscal_year( | pd.Timestamp('2014-01-01') | pandas.Timestamp |
from functools import wraps
import numpy as np
import datetime as dt
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype, is_object_dtype, is_string_dtype
from sklearn.decomposition import NMF, TruncatedSVD
from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer
from sklearn.pipeline import make_pipeline
#TODO - create a simple class to dummify date columns
def dummify_date_cols(df):
if 'giadmd' in df.columns:
df['giadmd'] = pd.to_datetime(df['giadmd'], errors='coerce')
df['giadmd_year'] = df['giadmd'].dt.year.astype('Int64').astype('object')
df['giadmd_month'] = df['giadmd'].dt.month.astype('Int64').astype('object')
df = df.drop('giadmd', axis=1)
if 'girefs' in df.columns:
df['girefs'] = pd.to_datetime(df['girefs'], errors='coerce')
df['girefs_year'] = df['girefs'].dt.year.astype('Int64').astype('object')
df['girefs_month'] = df['girefs'].dt.month.astype('Int64').astype('object')
df = df.drop('girefs', axis=1)
if 'gidscd' in df.columns:
df['gidscd'] = pd.to_datetime(df['gidscd'], errors='coerce')
df['gidscd_year'] = df['gidscd'].dt.year.astype('Int64').astype('object')
df['gidscd_month'] = df['gidscd'].dt.month.astype('Int64').astype('object')
df = df.drop('gidscd', axis=1)
print("Shape after dummify:", df.shape)
return df
def format_missings(df):
for column in df.columns:
if is_numeric_dtype(df[column]):
fill_value = df[column].mean()
df[column] = df[column].fillna(fill_value, downcast=False)
elif is_object_dtype(df[column]) or is_string_dtype(df[column]):
df[column] = df[column].fillna('MISSING', downcast=False)
print("Shape after format_missing:", df.shape)
return df
def remove_features_with_missing_values(df, na_thres):
return df.loc[:, df.isna().mean() < na_thres]
def clean_floats(x):
if pd.isnull(x):
return x
elif type(x) is float:
return str(int(x))
else:
return x
def clean_up_floats(df):
for col in df.columns:
if is_object_dtype(df[col]) or | is_string_dtype(df[col]) | pandas.api.types.is_string_dtype |
import argparse
import json
import numpy as np
import os
import pandas as pd
import torch
from glob import glob
from scipy.ndimage.morphology import binary_fill_holes
from skimage.io import imread
from skimage.morphology import disk, binary_erosion, label
from skimage.transform import downscale_local_mean
from tqdm import tqdm
from dataset import TomoDetectionDataset as Dataset
from dense_yolo import DenseYOLO
from subsets import data_frame_subset
cell_size = Dataset.cell_size
# larger grid size for inference to run inference on full image without cropping
img_height = cell_size * 12
img_width = cell_size * 9
grid_size = (img_height // cell_size, img_width // cell_size)
anchor = Dataset.anchor
def main(args, config):
data_frame = data_frame_subset(
args.data_views, args.data_boxes, args.subset, seed=args.seed
)
pred_data_frame = | pd.DataFrame() | pandas.DataFrame |
'''
example of loading FinMind api
'''
from FinMind.Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset':'TaiwanStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset':'TaiwanStockPrice',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset':'TaiwanStockPriceMinute',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset':'FinancialStatements',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset':'TaiwanCashFlowsStatement',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset':'TaiwanStockStockDividend',
'stock_id':'2317',
'date':'2018-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMarginPurchaseShortSale----------------'''
form_data = {'dataset':'TaiwanStockMarginPurchaseShortSale',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------InstitutionalInvestorsBuySell----------------'''
form_data = {'dataset':'InstitutionalInvestorsBuySell',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Shareholding----------------'''
form_data = {'dataset':'Shareholding',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------BalanceSheet----------------'''
form_data = {'dataset':'BalanceSheet',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockHoldingSharesPer----------------'''
form_data = {'dataset':'TaiwanStockHoldingSharesPer',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMonthRevenue----------------'''
form_data = {'dataset':'TaiwanStockMonthRevenue',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanOption----------------'''
form_data = {'dataset':'TaiwanOption'}
res = requests.post(
translate_url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
form_data = {'dataset':'TaiwanOption',
'stock_id':'OCO',
'date':'2019-09-05',
}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanFutures----------------'''
#load stock_id table, 讀取代碼表,用於輸入以下 stock_id 參數
form_data = {'dataset':'TaiwanFutures'}
res = requests.post(
translate_url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
form_data = {'dataset':'TaiwanFutures',
'stock_id':'MTX',
'date':'2019-09-02',
}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------USStockInfo----------------'''
form_data = {'dataset':'USStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------USStockPrice----------------'''
form_data = {'dataset':'USStockPrice',
'stock_id':'^DJI',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------USStockPriceMinute----------------'''
form_data = {'dataset':'USStockPriceMinute',
'stock_id':'MTX',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset':'FinancialStatements',
'stock_id':'AAPL',
'date':'2018-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------JapanStockInfo----------------'''
form_data = {'dataset':'JapanStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------JapanStockPrice----------------'''
form_data = {'dataset':'JapanStockPrice',
'stock_id':'1376.T',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
"""Unittests for the functions in time."""
import unittest
import numpy as np
import pandas as pd
import gnssmapper.common.time as tm
from gnssmapper.common.constants import gps_epoch
import pandas.testing as pt
class TestTime(unittest.TestCase):
def test_gps_utc(self) -> None:
ns = pd.Series([(1167264018 * 10**9),1167264018*10**9+1]).convert_dtypes()
ts = pd.Series([pd.Timestamp(year=2017,month=1,day=1,hour=0,minute=0,second=0,nanosecond=0),pd.Timestamp(year=2017,month=1,day=1,hour=0,minute=0,second=0,nanosecond=1)])
pt.assert_extension_array_equal(tm.gps_to_utc(ns).array,ts.array,check_exact=True)
pt.assert_extension_array_equal(tm.utc_to_gps(ts).array,ns.array,check_exact=True)
def test_gps_doy(self) -> None:
ns = pd.Series([1,2]).convert_dtypes()
ts = pd.DataFrame({'date': ['1980006', '1980006'], 'time': [1, 2]}).convert_dtypes()
pt.assert_frame_equal(tm.gps_to_doy(ns).astype('float64'),ts.astype('float64'),check_exact=True,check_dtype=False)
pt.assert_extension_array_equal(tm.doy_to_gps(ts.date,ts.time).array,ns.array,check_exact=True)
def test_gps_gpsweek(self) -> None:
ns = pd.Series([604800*2000 * 10**9 + 1 * 10 ** 7]).convert_dtypes()
ts = pd.DataFrame({'week':[2000],'day':[0],'time':[1 * 10 ** 7]}).convert_dtypes()
pt.assert_frame_equal(tm.gps_to_gpsweek(ns).astype('float64'),ts.astype('float64'),check_exact=True)
pt.assert_extension_array_equal(tm.gpsweek_to_gps(ts.week,ts.day,ts.time).array,ns.array,check_exact=True)
class TestMissing(unittest.TestCase):
def test_gps_utc(self) -> None:
ns = pd.Series([(1167264018 * 10**9),pd.NA],dtype='Int64')
ts = pd.Series([pd.Timestamp(year=2017, month=1, day=1, hour=0, minute=0, second=0, nanosecond=0), pd.NaT])
pt.assert_extension_array_equal(tm.gps_to_utc(ns).array,ts.array,check_exact=True)
pt.assert_extension_array_equal(tm.utc_to_gps(ts).array,ns.array,check_exact=True)
def test_gps_doy(self) -> None:
ns = pd.Series([1, 2, pd.NA], dtype='Int64')
ts = | pd.DataFrame({'date': ['1980006', '1980006',pd.NA], 'time': [1, 2,np.nan]}) | pandas.DataFrame |
import os
import pandas as pd
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2013-01-01'
end_date = '2013-12-31'
dates = | pd.date_range(start_date, end_date) | pandas.date_range |
import json
import numpy as np
import pandas as pd
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
from cubepy.cube import kindToString
class PandasEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = np.nansum
if summaryBy == 'avg':
sby = np.nanmean
elif summaryBy == 'max':
sby = np.nanmax
elif summaryBy == 'min':
sby = np.nanmin
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
_filters = {}
_rows = []
_columns = []
theResult = self.prepareDataframeForTable(result)
if not rows is None:
for row in rows:
if self.hasDim(theResult, str(row["field"]).split(".")[0]):
_rows.append(str(row["field"]).split(".")[0])
self.addToFilter(row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(theResult, str(column["field"]).split(".")[0]):
_columns.append(str(column["field"]).split(".")[0])
self.addToFilter(column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(theResult, str(dim["field"]).split(".")[0]):
self.addToFilter(dim, _filters)
res = None
pageInfo = None
dfResult = None
if len(_rows) == 0 and len(_columns) == 0:
dfResult = self.applyFilter(theResult, _filters)
# if have indexes sum all
if not dfResult.index is None and not dfResult.index.names is None and len(dfResult.index.names) > 0 and not dfResult.index.names[0] is None:
serieResult = dfResult.agg(sby)
dfResult = | pd.DataFrame({"total": serieResult}) | pandas.DataFrame |
## Inviting our friends to the party
import pandas as pd
import networkx as nx
from datetime import datetime as dt, timedelta as td
from docplex.mp.model import Model
## Configs and Parameters
# Driver Schedule Input File
schedule_file = "./data/RouteDetails.xlsx"
preload_time = 6
drop_time = 8
loading_type = 'DAHK'
# Function to read in optimized driver schedule as DataFrame
def read_driver_schedule(file_loc, sample_size=None):
"""Function for pre-processing input."""
cols = ['Route', 'Sequence', 'Zip', 'EqCode', 'City',
'State', 'ArvDate', 'ArvTime', 'DeptDate', 'DeptTime']
df = ( | pd.read_excel(file_loc, usecols=cols) | pandas.read_excel |
from datetime import date
import random
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output, State, ALL, MATCH, ALLSMALLER
import plotly.express as px
import pandas as pd
# CSS
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
show_slides_css = {"height": "800px", "paddingTop": "2%"}
page_button_css = {"fontSize": "1.5rem", "display": "inline-block"}
page_num_css = {"display": "inline-block", "fontSize": "3rem", "paddingLeft": "80%"}
page_bottom_css = {"borderBottom": "inset 3px black"}
my_link_css = {"fontSize": "3rem", "paddingLeft": "4%"}
title_css = {
"textAlign": "center",
"fontSize": "6rem",
"borderBottom": "inset 3px black",
"width": "35%",
"margin": "auto",
}
half_css = {
"width": "46%",
"display": "inline-block",
"verticalAlign": "top",
"margin": "auto",
"padding": "2%",
}
bottom_css = {
"display": "inline-block",
"fontSize": "3rem",
"paddingLeft": "80%",
"position": "absolute",
"bottom": "2%",
}
# application
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.config.suppress_callback_exceptions = True
# pages あとからファイルにする
# page1
title = html.Div(
[
html.H1(
"国内COVID-19データをDashを使って可視化する",
style={"padding": "15%", "fontSize": "5rem", "textAlign": "center"},
),
html.H3("BizPy 20201209", style={"textAlign": "right", "padding": "3% 10% 0"}),
html.H3("合同会社長目 小川 英幸", style={"textAlign": "right", "padding": "0 10% 0"}),
]
)
# page2
intro = html.Div(
[
html.H1("自己紹介", style=title_css),
html.Div(
[
html.Div(
[
html.P("小川 英幸"),
html.P("@ogawahideyuki"),
html.P("合同会社 長目"),
html.P("はんなりPython"),
html.P("最近黒豆にはまっている"),
],
style={
"textAlign": "center",
"paddingTop": "14%",
"fontSize": "5rem",
},
),
],
style=half_css,
),
html.Div(
[
html.Img(
id="my_book",
src="assets/python.jpeg",
style={"width": "60%", "margin": "4% auto"},
),
],
style=half_css,
id="img_div",
n_clicks=0,
),
]
)
@app.callback(Output("my_book", "src"), Input("img_div", "n_clicks"))
def update_image(n_clicks):
if n_clicks % 2 == 1:
return "assets/webdb.png"
return "assets/python.jpeg"
# page3
hanpy_event = pd.read_csv("data/hanpy.csv", index_col=0)
hanpy_event.index = pd.to_datetime(hanpy_event.index)
hanpy_event_month = hanpy_event.resample("M").sum()
len_month = len(hanpy_event_month) + 1
mem_data = pd.read_csv("data/hannari.csv", index_col=0, parse_dates=["registration"])
mem_data["one"] = 1
mem_data = mem_data.set_index("registration")
mem_data = mem_data.sort_index()
mem_data["cumsum"] = mem_data["one"].cumsum()
len_mem_data = len(mem_data) + 1
hanpy = html.Div(
[
html.H1("はんなりPython", style=title_css),
html.Div(
[
html.Img(src="assets/hannari.png"),
html.P("京都発のプログラミング勉強会"),
html.P("2020年オンライン化し、その後イベントを多く開催"),
],
style={
"height": "800px",
"width": "80%",
"fontSize": "6rem",
"textAlign": "center",
"margin": "4% auto",
},
),
html.Div(
[
dcc.Interval(
id="hanpy_interval",
n_intervals=len_month,
interval=100,
disabled=False,
max_intervals=len_month,
),
html.Button(id="hanpy_button", n_clicks=0, children="Button"),
dcc.Graph(id="hanpy_graph", style={"height": "550px"}),
],
style={"backgroudColor": "yellow", "height": "600px"},
),
html.Div(
[
dcc.Interval(
id="hanpy_mem_interval",
n_intervals=len_mem_data,
interval=100,
disabled=False,
max_intervals=len_mem_data / 10,
),
html.Button(id="hanpy_mem_button", n_clicks=0, children="Button"),
dcc.Graph(id="hanpy_mem_graph", style={"height": "600px"}),
]
),
]
)
@app.callback(
Output("hanpy_interval", "n_intervals"), Input("hanpy_button", "n_clicks")
)
def interval_switch(n_clicks):
if n_clicks > 0:
return 0
@app.callback(Output("hanpy_graph", "figure"), Input("hanpy_interval", "n_intervals"))
def update_graph(n_counts):
hanpy_event_graph = hanpy_event_month.iloc[:n_counts, :]
return px.bar(hanpy_event_graph, title="はんなりPython月間イベント数")
@app.callback(
Output("hanpy_mem_interval", "n_intervals"), Input("hanpy_mem_button", "n_clicks")
)
def interval_switch(n_clicks):
if n_clicks > 0:
return 0
@app.callback(
Output("hanpy_mem_graph", "figure"), Input("hanpy_mem_interval", "n_intervals")
)
def update_graph(n_counts):
mem_count = mem_data.iloc[: n_counts * 10, :]
return px.area(mem_count, x=mem_count.index, y="cumsum", title="はんなりPython登録者数")
# page4
## データの日付から年、週数、曜日を加える
def add_weeknum_dayofweek(data):
data["calendar"] = data.index.map(date.isocalendar)
data["year"] = data["calendar"].map(lambda x: x[0])
data["week_num"] = data["calendar"].map(lambda x: x[1])
data["day_of_week"] = data["calendar"].map(lambda x: x[2])
return data
# covid_csv from mhlw(厚生労働省)
## なぜか、退院、死亡者数だけが累計////
# データURL
covid_positive = "https://www.mhlw.go.jp/content/pcr_positive_daily.csv"
pcr_testing = "https://www.mhlw.go.jp/content/pcr_tested_daily.csv"
hospital_num = "https://www.mhlw.go.jp/content/cases_total.csv"
leaving_num = "https://www.mhlw.go.jp/content/recovery_total.csv"
death_num = "https://www.mhlw.go.jp/content/death_total.csv"
# データ読み込み
covid_positive_data = pd.read_csv(covid_positive, index_col=0, parse_dates=["日付"])
pcr_testing_data = | pd.read_csv(pcr_testing, index_col=0, parse_dates=["日付"]) | pandas.read_csv |
import os
import sys
import pandas as pd
from finta import TA
def add_time_feature(df, symbol, dt_col_name='time'):
"""read csv into df and index on time
dt_col_name can be any unit from minutes to day. time is the index of pd
must have pd columns [(time_col),(asset_col), Open,close,High,Low,day]
data_process will add additional time information: time(index), minute, hour, weekday, week, month,year, day(since 1970)
use StopLoss and ProfitTaken to simplify the action,
feed a fixed StopLoss (SL = 200) and PT = SL * ratio
action space: [action[0,2],ratio[0,10]]
rewards is point
add hourly, dayofweek(0-6, Sun-Sat)
Args:
file (str): file path/name.csv
"""
df['symbol'] = symbol
df['dt'] = pd.to_datetime(df[dt_col_name])
df.index = df['dt']
df['minute'] = df['dt'].dt.minute
df['hour'] = df['dt'].dt.hour
df['weekday'] = df['dt'].dt.dayofweek
df['week'] = df['dt'].dt.isocalendar().week
df['month'] = df['dt'].dt.month
df['year'] = df['dt'].dt.year
df['day'] = df['dt'].dt.day
# df = df.set_index('dt')
return df
# 'macd', 'boll_ub', 'boll_lb', 'rsi_30', 'dx_30','close_30_sma', 'close_60_sma'
def tech_indictors(df):
df['macd'] = TA.MACD(df).SIGNAL
df['boll_ub'] = TA.BBANDS(df).BB_UPPER
df['boll_lb'] = TA.BBANDS(df).BB_LOWER
df['rsi_30'] = TA.RSI(df, period=30)
df['dx_30'] = TA.ADX(df, period=30)
df['close_30_sma'] = TA.SMA(df, period=30)
df['close_60_sma'] = TA.SMA(df, period=60)
# fill NaN to 0
df = df.fillna(0)
print(f'--------df head - tail ----------------\n{df.head(3)}\n{df.tail(3)}\n---------------------------------')
return df
def split_timeserious(df, key_ts='dt', freq='W', symbol=''):
"""import df and split into hour, daily, weekly, monthly based and
save into subfolder
Args:
df (pandas df with timestamp is part of multi index):
spliter (str): H, D, W, M, Y
"""
freq_name = {'H': 'hourly', 'D': 'daily', 'W': 'weekly', 'M': 'monthly', 'Y': 'Yearly'}
for count, (n, g) in enumerate(df.groupby(pd.Grouper(level=key_ts, freq=freq))):
p = f'./data/split/{symbol}/{freq_name[freq]}'
os.makedirs(p, exist_ok=True)
# fname = f'{symbol}_{n:%Y%m%d}_{freq}_{count}.csv'
fname = f'{symbol}_{n:%Y}_{count}.csv'
fn = f'{p}/{fname}'
print(f'save to:{fn}')
g.reset_index(drop=True, inplace=True)
g.drop(columns=['dt'], inplace=True)
g.to_csv(fn)
return
"""
python ./neo_finrl/data_processors/fx.py GBPUSD W ./data/raw/GBPUSD_raw.csv
symbol="GBPUSD"
freq = [H, D, W, M]
file .csv, column names [time, Open, High, Low, Close, Vol]
"""
if __name__ == '__main__':
symbol, freq, file = sys.argv[1], sys.argv[2], sys.argv[3]
print(f'processing... symbol:{symbol} freq:{freq} file:{file}')
try:
df = | pd.read_csv(file) | pandas.read_csv |
#-*-coding:utf-8-*-
# 카카오톡에서 사용자가 딕셔너리에 나타나는 사람에게 존댓말을 쓰는지 여부를 파악합니다.
# 존대말을 쓰면 1 존대말을 안쓰면 0 입니다.
# 대화 파일명과 사용자의 이름을 입력하면 대화 상대에게 존댓말을 쓰는 지 여부를 파악합니다.
import pandas as pd
import re
import datetime as dt
from check import mecab_ch
import avgtime
#존대말 사용 여부를 1과 0으로 나타냅니다.
def drop_dupli():
data = pd.read_csv(file, encoding = 'utf-8')
kakaotalk_la = pd.DataFrame(data, columns=["User", "Date", "Message"])
kakaotalk_la.drop_duplicates()
kakaotalk_la2 = kakaotalk_la.reset_index(drop=True)
return kakaotalk_la2
def read_kko_msg(filename):
with open(filename, encoding = 'utf-8') as f:
msg_list = f.readlines()
return msg_list
def apply_kko_regex(msg_list):
kko_pattern = re.compile("\[([\S\s]+)\] \[(오전|오후) ([0-9:\s]+)\] ([^\n]+)")
kko_date_pattern = re.compile("--------------- ([0-9]+년 [0-9]+월 [0-9]+일) ")
kko_parse_result = list()
cur_date = ""
for msg in msg_list:
if len(kko_date_pattern.findall(msg)) > 0:
# 패턴에 해당하는 것이 있을 경우, 아래의 코드를 실행한다.
cur_date = dt.datetime.strptime(kko_date_pattern.findall(msg)[0], "%Y년 %m월 %d일")
# finall() 정규식으로 찾으면, 결과 문자열의 리스트를 리턴
cur_date = cur_date.strftime("%Y-%m-%d")
# cur_date에 날짜를 넣는다.
else:
kko_pattern_result = kko_pattern.findall(msg)
# kko_pattern_result에 findall()을 통해 정규식으로 문자열의 리스트를 리턴
if len(kko_pattern_result) > 0:
# 패턴에 해당하는 것이 있을 경우 아래의 코드를 실행한다.
tokens = list(kko_pattern_result[0])
# tokens에 패턴에 해당하는 것을 전부 저장한다.
pattern = re.compile("[0-9]+")
cur_hour = pattern.findall(tokens[2])[0]
# 시간 반환
cur_minute = pattern.findall(tokens[2])[1]
# 분 반환
if tokens[1] == '오전' and cur_hour == '12':
cur_hour = '0'
tokens[2] = ("%s:%s"%(cur_hour, cur_minute))
del tokens[1]
elif tokens[1] == '오전':
del tokens[1]
elif (tokens[1] == '오후' and cur_hour == '12'):
cur_hour = '12'
tokens[2] = ("%s:%s"%(cur_hour, cur_minute))
del tokens[1]
elif tokens[1] == '오후':
tokens[2] = ("%s:%s"%(str(int(cur_hour)+12), cur_minute))
del tokens[1]
tokens.insert(1, cur_date)
# cur_date를 인덱스 1에 저장
tokens[1] = tokens[1] + " " + tokens[2]
del tokens[2]
kko_parse_result.append(tokens)
kko_parse_result = pd.DataFrame(kko_parse_result, columns = ["User", "Date", "Message"])
kko_parse_result.to_csv("merge.csv", encoding='utf-8-sig', index = False)
return kko_parse_result
def name():
data1 = pd.read_csv(file, encoding = 'utf-8')
name_list = list(data1["User"])
count = {}
for i in name_list:
try:
count[i] += 1
except:
count[i] = 1
max_key = max(count, key=lambda k: count[k])
return max_key
def output_csv():
#데이터를 판다스 데이터프레임으로 불러오고 메시지만 추출합니다.
data = pd.read_csv(file, encoding='utf-8')
kakaotalk_label = pd.DataFrame(data, columns=["User", "Date", "Message"])
text_sentences = list(data['Message'])
#리스트를 초기화하여 추출한 메시지 데이터에서 각각 존대말 사용 여부를 넣습니다.
c_list = []
for line in text_sentences:
# 추출한 메시지를 정규화하여 한글에서 실질적 의미를 가진 높임 종결어미를 찾습니다.
parse = re.sub('[\'\"\-!-= .1234567890^#~/?:ㅋ$ㅜ}ㅠ)(*$%@]', '', str(line))
#띄어쓰기를 제거합니다.
eumjeol = [s.replace(' ', '') for s in parse]
if not eumjeol:
c_list.append(0)
elif len(eumjeol) == 1:
if eumjeol[0] == ('넵' or '네' or '넴' or '넨' or '옙' or '예' or '넷' or '옛'):
c_list.append(1)
else:
c_list.append(0)
elif eumjeol[-9:] == ['삭', '제', '된', '메', '시', '지', '입', '니', '다']:
c_list.append(0)
elif eumjeol[-3:] == ['아', '니', '다']:
c_list.append(0)
elif eumjeol[-2:] == (['니', '다'] or ['니', '까']) or eumjeol[-1] == ('요' or '용' or '욥' or "염"):
c_list.append(1)
else:
c_list.append(0)
#위에서 만든 존대말 사용 여부 리스트를(0,1만 들어가있음) 데이터프레임화 시킵니다.
df2 = pd.DataFrame({'label': c_list})
#데이터프레임을 합쳐줍니다.
kakaotalk_label = kakaotalk_label.join(df2)
return kakaotalk_label
#카카오톡 csv파일 데이터에서는 누구에게 보냈는 지 알 수 없으므로,
#받는 사람의 이름을 레이블링 해주는 코드입니다.
def labeling_data():
#outputcsv에서 만든 데이터프레임을 엽니다.
kakaotalk_label1 = output_csv()
#데이터프레임에서 유저 데이터만 씁니다.
kakaotalk_label2 = kakaotalk_label1.loc[:, ['User']]
#사용자의 이름을 입력합니다.
name = global_name
#체크포인트 리스트는 카카오톡 데이터를 한줄 씩 읽으며 새로 나오는 이름을 넣는 리스트입니다.
ckp_list = []
#인덱스 데이터베이스는 인덱스를 표현하기 위해 데이터 프레임 각 줄의 인덱스를 넣습니다.
idx_db = []
#시간 별로 각각 받는 사람의 이름을 예측하 to list에 넣습니다.
to_list = []
for row_index, row in kakaotalk_label2.iterrows():
try:
#한줄 씩 보면 사용자의 이름이 아닐 때
if not row[0] == name:
#체크포인트 리스트에 들어있는 이름과 다른 새 이름이 나오면
#체크포인트 리스트에 그 줄에 받은 사람의 이름을 넣고
#인덱스에 추가해줍니다.
#그리고 to list에 받는 사람 이름을 집어 넣습니다.
if not row[0] == ckp_list[-1]:
ckp_list.append(row[0])
idx_db.append(row_index)
to_list.append(row[0])
#체크포인트 리스트에 있는 사람과 같은 사람의 이름이 나오면
#그 이름을 받는 사람인 to list에 넣습니다. (아래는 동일)
else:
idx_db.append(row_index)
to_list.append(ckp_list[-1])
else:
idx_db.append(row_index)
to_list.append(ckp_list[-1])
except:
if not row[0] == name:
ckp_list.append(row[0])
idx_db.append(row_index)
to_list.append(ckp_list[-1])
else:
idx_db.append(row_index)
to_list.append(name)
#만들어진 to 리스트를 데이터프레임화 시키고 join으로 전 데이터프레임과 합칩니다.
df2 = | pd.DataFrame({'to': to_list}) | pandas.DataFrame |
# %%%%
import pandas as pd
import numpy as np
import re
# %%%% functions
## Fill missing values
def fillmissing(x,col,index,benchmark):
for i in range(index,len(x)):
# find missing value
if x.loc[i,col] == benchmark:
# if first is missing, fill using the value next to it
if i == index:
x.loc[i,col] = x.loc[i+1,col]
# if the last one is missing, fill using the value preceeds it
elif i == len(x)-1:
x.loc[i,col] = x.loc[i-1,col]
# otherwise, fill using the average of the two not null values above and after
else:
j = i-1
k = i+1
while x.loc[j,col] == benchmark:
j -= 1
while x.loc[k,col] == benchmark:
k += 1
x.loc[i,col] = np.mean([x.loc[j,col],x.loc[k,col]])
return x
## Data Preprocess
def preprocess(x,name,Date,column,index,benchmark,q):
# select the valid starting day
x = x[x['Date'] > Date].copy()
x = x.reset_index().copy()
x = x.drop('index',axis = 1).copy()
# fill na with benchmark we chose
x[column] = x[column].fillna(benchmark).copy()
# fill missing values
x = fillmissing(x,column,index,benchmark).copy()
# calculate daily return
x['lag_'+column] = x[column].shift(1)
x = x.iloc[1:,:].copy().reset_index()
x = x.drop('index',axis = 1).copy()
x['log_ret'] = np.log(x[column])-np.log(x['lag_'+column])
retm = np.mean(x['log_ret'])
x['retv'] = np.square(x['log_ret']-retm)*100
# estimate volatility
x[name+'_20day_vol'] = np.sqrt(x['retv'].rolling(window=20,win_type="boxcar").mean())/10
# estimate quantiles of the distribution of log-returns
x[name+'_quant_ret'] = np.nan
for r in range(len(x)-20):
R_quant = np.quantile(x['log_ret'][r:r+20],q)
x.loc[r+19,name+'_quant_ret'] = R_quant
return x
# %%%% Main Dataset: csi300
csi = pd.read_csv('/Users/msstark/Desktop/project/Shanghai Shenzhen CSI 300 Historical Data.csv')
# setting date format
csi['Date'] = csi['Date'].apply(lambda x: re.sub(r',',r'',x))
csi['Day'] = csi['Date'].apply(lambda x: x.split(' ')[1]).astype(int)
csi['Month'] = csi['Date'].apply(lambda x: x.split(' ')[0])
csi['Month'].unique()
csi['Month'] = csi['Month'].map({'Jan':1,'Feb':2,'Mar':3,'Apr':4,'May':5,'Jun':6,
'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12})
csi['Year'] = csi['Date'].apply(lambda x: x.split(' ')[2]).astype(int)
csi['Date'] = csi['Year'].astype(str) +'-'+csi['Month'].astype(str)+'-'+csi['Day'].astype(str)
csi['Date'] = pd.to_datetime(csi['Date'], format='%Y-%m-%d')
csi = csi.rename(columns = {'Price':'Close'}).copy()
# convert object type to float
col = ['Close','Open','High','Low']
for c in col:
csi[c] = csi[c].apply(lambda x: re.sub(r',',r'',x)).astype('float')
csi['log_dsprd'] = np.log(csi['High'] - csi['Low'])
csi.columns
# apply preprocess function
csi = preprocess(csi,'csi','2005-01-03','Close',0,0,0.025).copy()
# %%%% spot exchange rate
xr = pd.read_csv('/Users/msstark/Desktop/project/DEXCHUS.csv')
# setting date format
xr['DATE'] = pd.to_datetime(xr['DATE'], format='%Y-%m-%d')
xr = xr.rename(columns = {'DATE':'Date','DEXCHUS':'exR'}).copy()
# we find there's '.' inside our dataset
# replace '.' with '0', which is also the benchmark we chose to fill the missing values
xr['exR'] = xr[['exR']].apply(lambda x: x.replace('.','0'))
# convert object type to float
xr['exR'] = xr['exR'].astype(float)
# apply preprocess function
xr = preprocess(xr,'exR','2005-01-03','exR',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(xr[['Date','exR_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% hsi
hsi = pd.read_csv('^HSI.csv')
# setting date format
hsi['Date'] = pd.to_datetime(hsi['Date'], format='%Y-%m-%d')
# apply preprocess function
hsi = preprocess(hsi,'hsi','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(hsi[['Date','hsi_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% sse
sse = pd.read_csv('SSE Composite Index.csv')
# setting date format
sse['Date'] = pd.to_datetime(sse['Date'], format='%Y-%m-%d')
# apply preprocess function
sse = preprocess(sse,'sse','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(sse[['Date','sse_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# %%%% commodities
# corn
corn = pd.read_csv('corn-prices-historical-chart-data.csv')
corn = corn.rename(columns = {'date':'Date',' value':'Close'})
# setting date format
corn['Date'] = pd.to_datetime(corn['Date'], format='%Y-%m-%d')
# apply preprocess function
corn = preprocess(corn,'corn','2005-01-03','Close',0,0,0.025).copy()
# merge onto the main dataset
csi = csi.merge(corn[['Date','corn_quant_ret']],left_on = ['Date'],right_on = ['Date'],how = 'left').copy()
# soybean
soybean = | pd.read_csv('soybean-prices-historical-chart-data.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
"Codigo que permite la porderación de la nubosidad por la ponderación de sus horas"
## ----------------------LECTURA DE DATOS DE GOES CH02----------------------- ##
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C2_2019_0320_0822.nc')
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = | pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce') | pandas.to_datetime |
import pandas as pd
confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv '
recovered = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_recovered_global.csv '
deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_deaths_global.csv '
deaths = pd.read_csv(deaths)
recovered = pd.read_csv(recovered)
confirmed = pd.read_csv(confirmed)
recovered = recovered.drop(columns=['Province/State'])
deaths = deaths.drop(columns=['Province/State'])
confirmed = confirmed.drop(columns=['Province/State'])
recovered = recovered.rename(columns={'Country/Region': 'Country'})
deaths = deaths.rename(columns={'Country/Region': 'Country'})
confirmed = confirmed.rename(columns={'Country/Region': 'Country'})
class GlobalCases:
def confirmed(self):
df = confirmed.iloc[:, 4:].sum().max()
df = {'Confirmed': int(df)}
return df
def deaths(self):
df = deaths.iloc[:, 4:].sum().max()
df = {'Deaths': int(df)}
return df
def recovered(self):
df = recovered.iloc[:, 4:].sum().max()
df = {'Recovered': int(df)}
return df
def active(self):
df = GlobalCases.confirmed(self)['Confirmed'] - GlobalCases.deaths(self)['Deaths'] \
- GlobalCases.recovered(self)['Recovered']
df = {'Active': int(df)}
return df
def complete_world(self):
df = {
'Confirmed': GlobalCases.confirmed(self),
'Deaths': GlobalCases.deaths(self),
'Recovered': GlobalCases.recovered(self),
'Active': GlobalCases.active(self)
}
return df
def death_rate(self=None):
df = GlobalCases.deaths(self)['Deaths'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Death Rate': float(df)}
return df
def recovery_rate(self):
df = GlobalCases.recovered(self)['Recovered'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Recovery Rate': float(df)}
return df
def active_perc(self):
df = GlobalCases.active(self)['Active'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Active Percantage': float(df)}
return df
def daily_confirmed(self):
df = confirmed.iloc[:, 3:].sum(axis=0)
df.index = pd.to_datetime(df.index)
df = pd.DataFrame(df).reset_index()
df.columns = ['Date', 'Confirmed']
#df["Confirmed"].astype(int)
return df.to_dict()
def daily_deaths(self):
df = deaths.iloc[:, 3:].sum(axis=0)
df.index = pd.to_datetime(df.index)
df = pd.DataFrame(df).reset_index()
df.columns = ['Date', 'Deaths']
# df['7 Day Change'] = df['Deaths'].pct_change(periods=7)
# df /= 1_000_000
return df.to_dict()
def daily_recovered(self):
df = recovered.iloc[:, 3:].sum(axis=0)
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
from time import daylight
from django.test import TestCase
from django.db import connections
import pandas
import numpy
from pandas.testing import assert_frame_equal, assert_series_equal
from transform_layer.services.data_service import DataService, KEY_FAMILY, KEY_MEMBER, KEY_SERVICE
import transform_layer.calculations as calc
import json
import math
import unittest
import os
import pyreadr
#How 'off' the value returned by a data def can be before it is considered wrong
#.005 = .5% of expected
REL_TOL = .01
base_scope = {
"startDate":"01/01/2020",
"endDate":"12/31/2020",
"scope_type": "hierarchy",
"scope_field":"loc_id",
"scope_field_value":6,
"control_type_name":"Is Grocery Service"
}
TEST_DATA_SERVICE = DataService(base_scope)
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
base_families = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_geographies/base_families.rds'))[None]
base_members = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_geographies/base_members.rds'))[None]
base_services = pyreadr.read_r(os.path.join(__location__, './test_data/test_calc_geographies/base_services.rds'))[None]
#substitue the call to TEST_DATA_SERVICE.get_data_for_definition with this
#its the data that david used in his calculations
BASE_DATA = {
KEY_SERVICE: base_services,
KEY_FAMILY: base_families,
KEY_MEMBER : base_members
}
class CalculationsTestCase(unittest.TestCase):
#test data def 47
def test_get_geo_coverage(self):
expected = 0.988
data = BASE_DATA
func = calc.data_calc_function_switcher[47]
result = func(data)
self.assertTrue(math.isclose(result, expected, rel_tol = REL_TOL))
#test data def 48
def test_get_geo_breakdown_fam_state(self):
expected = pandas.read_csv(
os.path.join(__location__, './expected_results/results_geographic_breakdown_fam_state.csv'),
dtype={'fips_state':str}
).fillna('<NA>')
data = BASE_DATA
func = calc.data_calc_function_switcher[48]
result = func(data)
resultFrame = pandas.read_json(result).reset_index().rename(columns={"index": "fips_state"})
assert_frame_equal(resultFrame, expected, check_like = True)
#test data def 49
def test_get_geo_breakdown_fam_cnty(self):
expected = pandas.read_csv(
os.path.join(__location__, './expected_results/results_geographic_breakdown_fam_county.csv'),
dtype={'fips_cnty':str}
).fillna('<NA>')
data = BASE_DATA
func = calc.data_calc_function_switcher[49]
result = func(data)
resultFrame = pandas.read_json(result).reset_index().rename(columns={"index": "fips_cnty"})
assert_frame_equal(resultFrame, expected, check_like = True)
#test data def 50
def test_get_geo_breakdown_fam_zcta(self):
expected = pandas.read_csv(
os.path.join(__location__, './expected_results/results_geographic_breakdown_fam_zcta.csv'),
dtype={'fips_zcta':str}
).fillna('<NA>')
data = BASE_DATA
func = calc.data_calc_function_switcher[50]
result = func(data)
resultFrame = pandas.read_json(result).reset_index().rename(columns={"index": "fips_zcta"})
assert_frame_equal(resultFrame, expected, check_like = True)
#test data def 51
def test_get_services_flow_event_fips(self):
expected = pandas.read_csv(
os.path.join(__location__, './expected_results/results_services_flow_event_fips.csv'),
index_col = 'index'
)
data = BASE_DATA
func = calc.data_calc_function_switcher[51]
result = func(data)
resultFrame = pandas.read_json(result)
assert_frame_equal(resultFrame, expected, check_like = True)
#test data def 52
def test_get_distance_traveled(self):
expected = pandas.read_csv(
os.path.join(__location__, './expected_results/results_distance_traveled.csv'),
index_col = 'distance_roll'
)
data = BASE_DATA
func = calc.data_calc_function_switcher[52]
result = func(data)
resultFrame = | pandas.read_json(result) | pandas.read_json |
import sys
from unittest import TestCase, main
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_series_equal
sys.path.append("../")
from valhalla.transform import ColumnSelector, ColumnMerger
from valhalla.transform import WordUnifier, DuplicateRemover, StopWordRemover
from valhalla.transform import WordLower, RegExReplacer
from valhalla.transform import MorphTokenizer, NounTokenizer, PosTokenizer
class ColumnPreprocessingSimpleTest(TestCase):
"""
테스트 메소드 리스트
- ColumnSelector : DONE
- ColumnMerger : DONE
"""
def test_ColumnSelector(self):
df = pd.DataFrame(
data={
"과일": [
'사과', '배', '딸기'], "시장": [
'명동', '상정', '죽도']})
answer = pd.Series(data=['사과', '배', "딸기"])
cs = ColumnSelector("과일")
pred = cs.transform(df)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
def test_ColumnMerger(self):
df = pd.DataFrame(
data={
"과일": [
'사과', '배', '딸기'], "시장": [
'명동', '상정', '죽도']})
answer = pd.Series(data=["사과 명동", "배 상정", "딸기 죽도"])
cs = ColumnMerger(["과일", "시장"])
pred = cs.transform(df)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
class BasicNLPPreprocessingSimpleTest(TestCase):
"""
테스트 메소드 리스트
- WordUnifier : TODO
- DuplicateRemover : DONE
- StopWordRemover : DONE
- RegExReplacer : TODO => Input Argument가 어떤 구성이 깔끔할지 고민이 되는 중
- WordLower : DONE
"""
"""
Word Unifier Test
"""
def test_word_unifier_with_list(self):
sample = ['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"]
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = ['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_word_unifier_with_numpy_input(self):
sample = np.array(
['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"])
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = np.array(['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_word_unifier_with_pandas_input(self):
sample = pd.Series(
['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"])
transformer = WordUnifier(
[["삼성", "삼성전자", 'samsung'], ["엘지", '엘지전자', 'lg']])
answer = pd.Series(['삼성 노트북', "노트북 삼성", "삼성 스마트폰", "엘지 폰", "엘지 상거래"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
RegExReplacer Test
"""
def test_RegExReplacer_with_list(self):
sample = ["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"]
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = ["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_RegExReplacer_with_numpy_input(self):
sample = np.array(["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"])
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = np.array(["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_RegExReplacer_with_pandas_input(self):
sample = pd.Series(["열무김치 10kg 판매", "매실 5kg 포장", "미닛메이드 10L 병", "포도주스 30L 병",
"kgfi 공인 판매", "lipspace 판매"])
transformer = RegExReplacer([("[0-9]+kg", "<단위>"), ("[0-9]+L", "<부피단위>")])
answer = pd.Series(["열무김치 <단위> 판매", "매실 <단위> 포장", "미닛메이드 <부피단위> 병", "포도주스 <부피단위> 병",
"kgfi 공인 판매", "lipspace 판매"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
DuplicateRemover Test
"""
def test_DuplicateRemover_with_list(self):
sample = ['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"]
transformer = DuplicateRemover()
answer = ['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박']
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_DuplicateRemover_with_numpy_input(self):
sample = np.array(
['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"])
transformer = DuplicateRemover()
answer = np.array(['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박'])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_DuplicateRemover_with_pandas_input(self):
sample = pd.Series(
['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"])
transformer = DuplicateRemover()
answer = pd.Series(['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박'])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
StopWordRemover Test
"""
def test_StopWordRemover_with_list(self):
sample = [
"노트북 할인 판매",
"옷 기타 완전 세일",
"비아그라 할인",
"클래식기타 판매 세일",
"판매왕의 판매 전략"]
transformer = StopWordRemover(['판매', '기타'])
answer = ["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_StopWordRemover_with_numpy_input(self):
sample = np.array(["노트북 할인 판매", "옷 기타 완전 세일",
"비아그라 할인", "클래식기타 판매 세일", "판매왕의 판매 전략"])
transformer = StopWordRemover(['판매', '기타'])
answer = np.array(
["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_StopWordRemover_with_pandas_input(self):
sample = pd.Series(["노트북 할인 판매", "옷 기타 완전 세일",
"비아그라 할인", "클래식기타 판매 세일", "판매왕의 판매 전략"])
transformer = StopWordRemover(['판매', '기타'])
answer = pd.Series(
["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의 전략"])
pred = transformer.transform(sample)
assert_series_equal(answer, pred, check_names=False, check_dtype=False)
"""
WordLower Test
"""
def test_WordLower_with_list(self):
sample = ["Kang", "KAM", "Kan"]
transformer = WordLower()
answer = ["kang", "kam", "kan"]
pred = transformer.transform(sample)
self.assertListEqual(answer, pred)
def test_WordLower_with_numpy_input(self):
sample = np.array(["Kang", "KAM", "Kan"])
transformer = WordLower()
answer = np.array(["kang", "kam", "kan"])
pred = transformer.transform(sample)
assert_array_equal(answer, pred)
def test_WordLower_with_pandas_input(self):
sample = pd.Series(["Kang", "KAM", "Kan"])
transformer = WordLower()
answer = pd.Series(["kang", "kam", "kan"])
pred = transformer.transform(sample)
| assert_series_equal(answer, pred, check_names=False, check_dtype=False) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import pandas as pd
from numpy import nan, isnan, arange, dtype, zeros
from pandapower.auxiliary import pandapowerNet, get_free_id, _preserve_dtypes
from pandapower.results import reset_results
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower import __version__
def create_empty_network(name="", f_hz=50., sn_kva=1e3):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
**sn_kva** (float, 1e3) - reference apparent power for per unit system
OUTPUT:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
EXAMPLE:
net = create_empty_network()
"""
net = pandapowerNet({
# structure data
"bus": [('name', dtype(object)),
('vn_kv', 'f8'),
('type', dtype(object)),
('zone', dtype(object)),
('in_service', 'bool'), ],
"load": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("const_z_percent", "f8"),
("const_i_percent", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"sgen": [("name", dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"gen": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", dtype(object)),
("type", dtype(object)),
("closed", "bool"),
("name", dtype(object))],
"shunt": [("bus", "u4"),
("name", dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("vn_kv", "f8"),
("step", "u4"),
("max_step", "u4"),
("in_service", "bool")],
"ext_grid": [("name", dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", dtype(object)),
("std_type", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("max_i_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", dtype(object)),
("in_service", 'bool')],
"trafo": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_st_degree", "f8"),
("tp_pos", "i4"),
("parallel", "u4"),
("df", "f8"),
("in_service", 'bool')],
"trafo3w": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("rft_pu", "f8"),
("xft_pu", "f8"),
("rtf_pu", "f8"),
("xtf_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"dcline": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("p_kw", "f8"),
("loss_percent", 'f8'),
("loss_kw", 'f8'),
("vm_from_pu", "f8"),
("vm_to_pu", "f8"),
("max_p_kw", "f8"),
("min_q_from_kvar", "f8"),
("min_q_to_kvar", "f8"),
("max_q_from_kvar", "f8"),
("max_q_to_kvar", "f8"),
("in_service", 'bool')],
"ward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "bool")],
"xward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "bool")],
"measurement": [("name", dtype(object)),
("type", dtype(object)),
("element_type", dtype(object)),
("value", "f8"),
("std_dev", "f8"),
("bus", "u4"),
("element", dtype(object))],
"piecewise_linear_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("p", dtype(object)),
("f", dtype(object))],
"polynomial_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("c", dtype(object))],
# geodata
"line_geodata": [("coords", dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8"),
("vm_pu", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_dcline": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("vm_from_pu", "f8"),
("va_from_degree", "f8"),
("vm_to_pu", "f8"),
("va_to_degree", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"_is_elements": None,
"_pd2ppc_lookups": {"bus": None,
"ext_grid": None,
"gen": None},
"version": float(__version__[:3]),
"converged": False,
"name": name,
"f_hz": f_hz,
"sn_kva": sn_kva
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(zeros(0, dtype=net[s]), index=[])
add_basic_std_types(net)
reset_results(net)
net['user_pf_options'] = dict()
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=nan,
min_vm_pu=nan, **kwargs):
"""create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b", \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (pandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available. If None, the \
index one higher than the highest already existing index is selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata is not None:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan):
"""create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None, \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (pandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force specified IDs if available. If None, the indeces \
higher than the highest already existing index are selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique indices ID of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] = pd.concat([net["bus"], dd], axis=0).reindex_axis(net["bus"].columns, axis=1)
# and preserve dtypes
# _preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[bid, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan,
name=None, scaling=1., index=None,
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan):
"""create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan, \
name=None, scaling=1., index=None, \
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan, max_q_kvar=nan, \
min_q_kvar=nan, controllable=nan)
Adds one load in table net["load"].
All loads are modelled in the consumer system, meaning load is positive and generation is
negative active power. Please pay attention to the correct signing of the reactive power as
well.
INPUT:
**net** - The net within this load should be created
**bus** (int) - The bus id to which the load is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the load
- postive value -> load
- negative value -> generation
**q_kvar** (float, default 0) - The reactive power of the load
**const_z_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant impedance load at rated voltage
**const_i_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant current load at rated voltage
**sn_kva** (float, default None) - Nominal power of the load
**name** (string, default None) - The name for this load
**scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the load
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power load - necessary for controllable \
loads in for OPF
**min_p_kw** (float, default NaN) - Minimum active power load - necessary for controllable \
loads in for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power load - necessary for \
controllable loads in for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power load - necessary for \
controllable loads in OPF
**controllable** (boolean, default NaN) - States, whether a load is controllable or not. \
Only respected for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_load(net, bus=0, p_kw=10., q_kvar=2.)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["load"])
if index in net["load"].index:
raise UserWarning("A load with the id %s already exists" % id)
# store dtypes
dtypes = net.load.dtypes
net.load.loc[index, ["name", "bus", "p_kw", "const_z_percent", "const_i_percent", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, const_z_percent, const_i_percent, scaling, q_kvar, sn_kva,
bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.load, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.load.columns:
net.load.loc[:, "min_p_kw"] = pd.Series()
net.load.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.load.columns:
net.load.loc[:, "max_p_kw"] = pd.Series()
net.load.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.load.columns:
net.load.loc[:, "min_q_kvar"] = pd.Series()
net.load.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.load.columns:
net.load.loc[:, "max_q_kvar"] = pd.Series()
net.load.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.load.columns:
net.load.loc[:, "controllable"] = pd.Series()
net.load.loc[index, "controllable"] = bool(controllable)
else:
if "controllable" in net.load.columns:
net.load.loc[index, "controllable"] = False
return index
def create_load_from_cosphi(net, bus, sn_kva, cos_phi, mode, **kwargs):
"""
Creates a load element from rated power and power factor cos(phi).
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the load is connected
**sn_kva** (float) - rated power of the load
**cos_phi** (float) - power factor cos_phi
**mode** (str) - "ind" for inductive or "cap" for capacitive behaviour
**kwargs are passed on to the create_load function
OUTPUT:
**index** (int) - The unique ID of the created load
All elements are modeled from a consumer point of view. Active power will therefore always be
positive, reactive power will be negative for inductive behaviour and positive for capacitive
behaviour.
"""
from pandapower.toolbox import pq_from_cosphi
p_kw, q_kvar = pq_from_cosphi(sn_kva, cos_phi, qmode=mode, pmode="load")
return create_load(net, bus, sn_kva=sn_kva, p_kw=p_kw, q_kvar=q_kvar, **kwargs)
def create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None,
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan):
"""create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None, \
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan, \
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan)
Adds one static generator in table net["sgen"].
Static generators are modelled as negative PQ loads. This element is used to model generators
with a constant active and reactive power feed-in. If you want to model a voltage controlled
generator, use the generator element instead.
All elements in the grid are modelled in the consumer system, including generators!
If you want to model the generation of power, you have to assign a negative active power
to the generator. Please pay attention to the correct signing of the
reactive power as well.
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
**p_kw** (float) - The real power of the static generator (negative for generation!)
OPTIONAL:
**q_kvar** (float, default 0) - The reactive power of the sgen
**sn_kva** (float, default None) - Nominal power of the sgen
**name** (string, default None) - The name for this sgen
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the static generator
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, NaN) - Maximum active power injection - necessary for \
controllable sgens in OPF
**min_p_kw** (float, NaN) - Minimum active power injection - necessary for \
controllable sgens in OPF
**max_q_kvar** (float, NaN) - Maximum reactive power injection - necessary for \
controllable sgens in OPF
**min_q_kvar** (float, NaN) - Minimum reactive power injection - necessary for \
controllable sgens in OPF
**controllable** (bool, NaN) - Whether this generator is controllable by the optimal
powerflow
**k** (float, NaN) - Ratio of nominal current to short circuit current
**rx** (float, NaN) - R/X ratio for short circuit impedance. Only relevant if type is specified as motor so that sgen is treated as asynchronous motor
OUTPUT:
**index** (int) - The unique ID of the created sgen
EXAMPLE:
create_sgen(net, 1, p_kw = -120)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["sgen"])
if index in net["sgen"].index:
raise UserWarning("A static generator with the id %s already exists" % index)
# store dtypes
dtypes = net.sgen.dtypes
net.sgen.loc[index, ["name", "bus", "p_kw", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, scaling, q_kvar, sn_kva, bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.sgen, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "min_p_kw"] = pd.Series()
net.sgen.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "max_p_kw"] = pd.Series()
net.sgen.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "min_q_kvar"] = pd.Series()
net.sgen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "max_q_kvar"] = pd.Series()
net.sgen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.sgen.columns:
net.sgen.loc[:, "controllable"] = pd.Series()
net.sgen.loc[index, "controllable"] = bool(controllable)
else:
if "controllable" in net.sgen.columns:
net.sgen.loc[index, "controllable"] = False
if not isnan(k):
if "k" not in net.sgen.columns:
net.sgen.loc[:, "k"] = pd.Series()
net.sgen.loc[index, "k"] = float(k)
if not isnan(rx):
if "rx" not in net.sgen.columns:
net.sgen.loc[:, "rx"] = pd.Series()
net.sgen.loc[index, "rx"] = float(rx)
return index
def create_sgen_from_cosphi(net, bus, sn_kva, cos_phi, mode, **kwargs):
"""
Creates an sgen element from rated power and power factor cos(phi).
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
**sn_kva** (float) - rated power of the generator
**cos_phi** (float) - power factor cos_phi
**mode** (str) - "ind" for inductive or "cap" for capacitive behaviour
OUTPUT:
**index** (int) - The unique ID of the created sgen
All elements including generators are modeled from a consumer point of view. Active power
will therefore always be negative, reactive power will be negative for inductive behaviour and
positive for capacitive behaviour.
"""
from pandapower.toolbox import pq_from_cosphi
p_kw, q_kvar = pq_from_cosphi(sn_kva, cos_phi, qmode=mode, pmode="gen")
return create_sgen(net, bus, sn_kva=sn_kva, p_kw=p_kw, q_kvar=q_kvar, **kwargs)
def create_gen(net, bus, p_kw, vm_pu=1., sn_kva=nan, name=None, index=None, max_q_kvar=nan,
min_q_kvar=nan, min_p_kw=nan, max_p_kw=nan, scaling=1., type=None,
controllable=nan, vn_kv=nan, xdss=nan, rdss=nan, cos_phi=nan, in_service=True):
"""create_gen(net, bus, p_kw, vm_pu=1., sn_kva=nan, name=None, index=None, max_q_kvar=nan, \
min_q_kvar=nan, min_p_kw=nan, max_p_kw=nan, scaling=1., type=None, \
controllable=nan, vn_kv=nan, xdss=nan, rdss=nan, cos_phi=nan, in_service=True)
Adds a generator to the network.
Generators are always modelled as voltage controlled PV nodes, which is why the input parameter
is active power and a voltage set point. If you want to model a generator as PQ load with fixed
reactive power and variable voltage, please use a static generator instead.
INPUT:
**net** - The net within this generator should be created
**bus** (int) - The bus id to which the generator is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the generator (negative for generation!)
**vm_pu** (float, default 0) - The voltage set point of the generator.
**sn_kva** (float, None) - Nominal power of the generator
**name** (string, None) - The name for this generator
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**scaling** (float, 1.0) - scaling factor which for the active power of the generator
**type** (string, None) - type variable to classify generators
**controllable** (bool, NaN) - Whether this generator is controllable by the optimal
powerflow
**vn_kv** (float, NaN) - Rated voltage of the generator for short-circuit calculation
**xdss** (float, NaN) - Subtransient generator reactance for short-circuit calculation
**rdss** (float, NaN) - Subtransient generator resistance for short-circuit calculation
**cos_phi** (float, NaN) - Rated cosine phi of the generator for short-circuit calculation
**in_service** (bool, True) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power injection - necessary for OPF
**min_p_kw** (float, default NaN) - Minimum active power injection - necessary for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power injection - necessary for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power injection - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created generator
EXAMPLE:
create_gen(net, 1, p_kw = -120, vm_pu = 1.02)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if bus in net.ext_grid.bus.values:
raise UserWarning(
"There is already an external grid at bus %u, thus no other voltage " % bus +
"controlling element (ext_grid, gen) is allowed at this bus.")
# if bus in net.gen.bus.values:
# raise UserWarning(
# "There is already a generator at bus %u, only one voltage controlling " % bus +
# "element (ext_grid, gen) is allowed per bus.")
if index is None:
index = get_free_id(net["gen"])
if index in net["gen"].index:
raise UserWarning("A generator with the id %s already exists" % index)
# store dtypes
dtypes = net.gen.dtypes
net.gen.loc[index, ["name", "bus", "p_kw", "vm_pu", "sn_kva", "type", "in_service",
"scaling"]] = [name, bus, p_kw, vm_pu, sn_kva, type, bool(in_service),
scaling]
# and preserve dtypes
_preserve_dtypes(net.gen, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.gen.columns:
net.gen.loc[:, "min_p_kw"] = pd.Series()
net.gen.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.gen.columns:
net.gen.loc[:, "max_p_kw"] = pd.Series()
net.gen.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.gen.columns:
net.gen.loc[:, "min_q_kvar"] = pd.Series()
net.gen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.gen.columns:
net.gen.loc[:, "max_q_kvar"] = pd.Series()
net.gen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.gen.columns:
net.gen.loc[:, "controllable"] = pd.Series(False)
net.gen.loc[index, "controllable"] = bool(controllable)
elif "controllable" in net.gen.columns:
net.gen.loc[index, "controllable"] = False
if not isnan(vn_kv):
if "vn_kv" not in net.gen.columns:
net.gen.loc[:, "vn_kv"] = pd.Series()
net.gen.loc[index, "vn_kv"] = float(vn_kv)
if not isnan(xdss):
if "xdss" not in net.gen.columns:
net.gen.loc[:, "xdss"] = | pd.Series() | pandas.Series |
"""
Script used to automatically generate PDF report comparing TaxData outputs
after updates
"""
# flake8: noqa: E501
import argparse
import pandas as pd
import taxcalc as tc
import altair as alt
from report_utils import (
run_calc,
distplot,
write_page,
growth_scatter_plot,
compare_vars,
cbo_bar_chart,
agg_liability_table,
)
from pathlib import Path
from datetime import datetime
from collections import defaultdict
from requests_html import HTMLSession
CUR_PATH = Path(__file__).resolve().parent
STAGE1_PATH = Path(CUR_PATH, "..", "puf_stage1")
CBO_PATH = Path(STAGE1_PATH, "CBO_baseline.csv")
SOI_PATH = Path(STAGE1_PATH, "SOI_estimates.csv")
GROW_FACTORS_PATH = Path(STAGE1_PATH, "growfactors.csv")
META_PATH = Path(CUR_PATH, "..", "tests", "records_metadata.json")
CBO_URL = "https://raw.githubusercontent.com/PSLmodels/taxdata/master/puf_stage1/CBO_baseline.csv"
SOI_URL = "https://raw.githubusercontent.com/PSLmodels/taxdata/master/puf_stage1/SOI_estimates.csv"
META_URL = "https://raw.githubusercontent.com/PSLmodels/taxdata/master/tests/records_metadata.json"
GROW_FACTORS_URL = "https://raw.githubusercontent.com/PSLmodels/taxdata/master/puf_stage1/growfactors.csv"
PUF_PATH = Path(CUR_PATH, "..", "puf_data", "puf.csv")
PUF_AVAILABLE = PUF_PATH.exists()
TEMPLATE_PATH = Path(CUR_PATH, "report_template.md")
CBO_LABELS = {
"GDP": "GDP (Billions)",
"TPY": "Personal Income (Billions)",
"Wages": "Wages and Salaries (Billions)",
"SCHC": "Proprietors Income, Non Farm with IVA & CCAdj (Billions)",
"SCHF": "Proprietors Income, Farm with IVA & CCAdj (Billions)",
"INTS": "Personal Interest Income (Billions)",
"DIVS": "Personal Dividend Income (Billions)",
"RENTS": "Rental Income with CCADJ (Billions)",
"BOOK": "Corporate Profits with IVA & CCADJ (Billions)",
"CPIU": "Consumer Pricing Index, All Urban Consumers (CPI-U) - 1982-84=100",
"CGNS": "Capital Gains Realizations",
"RETS": "IRS Projections of Individual Returns (Millions)",
"SOCSEC": "Scheduled Social Security Benefits",
"CPIM": "CPI Medical Care",
"UCOMP": "Unemployment Compensation (Billions)",
}
def report():
"""
Generate TaxData history report
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"prs",
help=(
"prs is a list of prs that were used for this report. "
"Enter them as a string separated by commas"
),
default="",
type=str,
)
parser.add_argument(
"--desc",
help=(
"File path to a text or markdown file with additonal information "
"that will appear at the begining of the report"
),
default="",
type=str,
)
args = parser.parse_args()
desc = args.desc
if desc:
desc = Path(args.desc).open("r").read()
plot_paths = []
date = datetime.today().date()
template_args = {"date": date, "desc": desc}
pull_str = "* [#{}: {}]({})"
_prs = args.prs.split(",")
session = HTMLSession()
prs = []
for pr in _prs:
url = f"https://github.com/PSLmodels/taxdata/pull/{pr}"
# extract PR title
r = session.get(url)
elm = r.html.find("span.js-issue-title")[0]
title = elm.text
prs.append(pull_str.format(pr, title, url))
template_args["prs"] = prs
# CBO projection comparisons
cbo_projections = []
cur_cbo = pd.read_csv(CBO_URL, index_col=0)
new_cbo = pd.read_csv(CBO_PATH, index_col=0)
cbo_years = new_cbo.columns.astype(int)
last_year = cbo_years.max()
first_year = last_year - 9
if new_cbo.equals(cur_cbo):
cbo_projections.append("No changes to CBO projections.")
else:
# we"re only going to include the final ten years in our bar chart
keep_years = [str(year) for year in range(first_year, last_year + 1)]
cur_cbo = cur_cbo.filter(keep_years, axis=1).transpose().reset_index()
cur_cbo["Projections"] = "Current"
new_cbo = new_cbo.filter(keep_years, axis=1).transpose().reset_index()
new_cbo["Projections"] = "New"
cbo_data = pd.concat([cur_cbo, new_cbo], axis=0)
for col in cbo_data.columns:
if col == "index" or col == "Projections":
continue
chart = cbo_bar_chart(cbo_data, col, CBO_LABELS[col])
img_path = Path(CUR_PATH, f"{col}.png")
chart.save(str(img_path))
plot_paths.append(img_path)
cbo_projections.append(f"})" + "{.center}")
template_args["cbo_projections"] = cbo_projections
# changes in data availability
cur_meta = pd.read_json(META_URL, orient="index")
new_meta = pd.read_json(META_PATH, orient="index")
puf_added, puf_removed = compare_vars(cur_meta, new_meta, "puf")
cps_added, cps_removed = compare_vars(cur_meta, new_meta, "cps")
template_args["puf_added"] = puf_added
template_args["puf_removed"] = puf_removed
template_args["cps_added"] = cps_added
template_args["cps_removed"] = cps_removed
# growth rate changes
growth_rate_projections = []
cur_grow = pd.read_csv(GROW_FACTORS_URL)
new_grow = | pd.read_csv(GROW_FACTORS_PATH) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.data_checks import (
ClassImbalanceDataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning,
)
class_imbalance_data_check_name = ClassImbalanceDataCheck.name
def test_class_imbalance_errors():
X = pd.DataFrame()
with pytest.raises(ValueError, match="threshold 0 is not within the range"):
ClassImbalanceDataCheck(threshold=0).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold 0.51 is not within the range"):
ClassImbalanceDataCheck(threshold=0.51).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold -0.5 is not within the range"):
ClassImbalanceDataCheck(threshold=-0.5).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided number of CV folds"):
ClassImbalanceDataCheck(num_cv_folds=-1).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided value min_samples"):
ClassImbalanceDataCheck(min_samples=-1).validate(X, y=pd.Series([0, 1, 1]))
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_data_check_binary(input_type):
X = pd.DataFrame()
y = pd.Series([0, 0, 1])
y_long = | pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.ensemble import RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
from sklearn.model_selection import GridSearchCV
import seaborn as sns
import matplotlib.pyplot as plt
from xgboost import XGBClassifier, XGBRegressor
def frames_season():
# Creating the frames by season
frames = []
new_frames = []
for i in range(2005, 2019):
globals()['df' + str(i)] = pd.read_csv(r"frames\\" + str(i) + '.csv', nrows=380)
globals()['df' + str(i)].dropna()
frames.append(globals()['df' + str(i)])
# Creating the stats for which frame (season)
pd.options.mode.chained_assignment = None
for frame in frames:
frame['ID'] = frame.index + 1
frame['ID'] = frame['ID'].apply(lambda x: '{0:0>5}'.format(x))
frame['FTGT'] = frame['FTHG'] + frame['FTAG']
columns_name = ['ID', 'Date', 'Team', 'FTG', 'FTR', 'FTGT', 'HTG', 'HTR', 'S', 'ST', 'C', 'F', 'Y', 'R',
'B365H', 'BWH', 'IWH', 'VCH', 'WHH',
'B365D', 'BWD', 'IWD', 'VCD', 'WHD',
'B365A', 'BWA', 'IWA', 'VCA', 'WHA',
'BbAv>2.5', 'BbAv<2.5']
# Home games
frame_h = frame[['ID', 'Date', 'HomeTeam', 'FTHG', 'FTR', 'FTGT', 'HTHG', 'HTR', 'HS', 'HST', 'HC', 'HF', 'HY', 'HR',
'B365H', 'BWH', 'IWH', 'VCH', 'WHH',
'B365D', 'BWD', 'IWD', 'VCD', 'WHD',
'B365A', 'BWA', 'IWA', 'VCA', 'WHA',
'BbAv>2.5', 'BbAv<2.5']].copy()
frame_h.columns = columns_name
frame_h['Location'] = 1
# Away games
frame_A = frame[['ID', 'Date', 'AwayTeam', 'FTAG', 'FTR', 'FTGT', 'HTAG', 'HTR', 'AS', 'AST', 'AC', 'AF', 'AY', 'AR',
'B365H', 'BWH', 'IWH', 'VCH', 'WHH',
'B365D', 'BWD', 'IWD', 'VCD', 'WHD',
'B365A', 'BWA', 'IWA', 'VCA', 'WHA',
'BbAv>2.5', 'BbAv<2.5']].copy()
frame_A.columns = columns_name
frame_A['Location'] = 0
# Merge and making stats
new_data_set = pd.merge(frame_h, frame_A, how='outer').sort_values('ID')
new_data_set['FTGT_ALL_MEAN'] = new_data_set.groupby('Team')['FTGT'].transform(lambda x: x.expanding().mean().shift())
new_data_set['FTG_ALL_MEAN'] = new_data_set.groupby('Team')['FTG'].transform(lambda x: x.expanding().mean().shift())
new_data_set['HTG_ALL_MEAN'] = new_data_set.groupby('Team')['HTG'].transform(lambda x: x.expanding().mean().shift())
new_data_set['S_ALL_MEAN'] = new_data_set.groupby('Team')['S'].transform(lambda x: x.expanding().mean().shift())
new_data_set['ST_ALL_MEAN'] = new_data_set.groupby('Team')['ST'].transform(lambda x: x.expanding().mean().shift())
new_data_set['C_ALL_MEAN'] = new_data_set.groupby('Team')['C'].transform(lambda x: x.expanding().mean().shift())
new_data_set['F_ALL_MEAN'] = new_data_set.groupby('Team')['F'].transform(lambda x: x.expanding().mean().shift())
new_data_set['Y_ALL_MEAN'] = new_data_set.groupby('Team')['Y'].transform(lambda x: x.expanding().mean().shift())
new_data_set['R_ALL_MEAN'] = new_data_set.groupby('Team')['R'].transform(lambda x: x.expanding().mean().shift())
# Sep and merging again by game
home_teams = new_data_set[new_data_set['Location'] == 1]
home_teams.columns = ['ID', 'Date', 'HomeTeam', 'FTHG', 'FTR', 'FTGT', 'HTHG', 'HTR', 'HS', 'HST', 'HC', 'HF', 'HY',
'HR', 'B365H', 'BWH', 'IWH', 'VCH', 'WHH', 'B365D', 'BWD', 'IWD', 'VCD', 'WHD',
'B365A', 'BWA', 'IWA', 'VCA', 'WHA', 'BbAv>2.5', 'BbAv<2.5', 'Location', 'FTGHT_ALL_MEAN',
'FTHG_ALL_MEAN', 'HTHG_ALL_MEAN', 'HS_ALL_MEAN', 'HST_ALL_MEAN', 'HC_ALL_MEAN', 'HF_ALL_MEAN',
'HY_ALL_MEAN', 'HR_ALL_MEAN']
#home_teams.reset_index(drop=True)
away_teams = new_data_set[new_data_set['Location'] == 0]
away_teams.columns = ['ID', 'Date', 'AwayTeam', 'FTAG', 'FTR', 'FTGT', 'HTAG', 'HTR', 'AS', 'AST', 'AC', 'AF', 'AY',
'AR', 'B365H', 'BWH', 'IWH', 'VCH', 'WHH', 'B365D', 'BWD', 'IWD', 'VCD', 'WHD',
'B365A', 'BWA', 'IWA', 'VCA', 'WHA', 'BbAv>2.5', 'BbAv<2.5', 'Location', 'FTGAT_ALL_MEAN',
'FTAG_ALL_MEAN', 'HTAG_ALL_MEAN', 'AS_ALL_MEAN', 'AST_ALL_MEAN', 'AC_ALL_MEAN', 'AF_ALL_MEAN',
'AY_ALL_MEAN', 'AR_ALL_MEAN']
#away_teams.reset_index(drop=True)
frame_merge = pd.merge(home_teams, away_teams, left_on=['ID', 'Date', 'B365H', 'BWH', 'IWH', 'VCH', 'WHH', 'B365D', 'BWD', 'IWD', 'VCD', 'WHD', 'B365A', 'BWA', 'IWA', 'VCA', 'WHA', 'BbAv>2.5', 'BbAv<2.5'],
right_on=['ID', 'Date', 'B365H', 'BWH', 'IWH', 'VCH', 'WHH', 'B365D', 'BWD', 'IWD', 'VCD', 'WHD', 'B365A', 'BWA', 'IWA', 'VCA', 'WHA', 'BbAv>2.5', 'BbAv<2.5'])
frame_merge = frame_merge.drop(['FTR_y', 'HTR_y', 'Location_y', 'Location_x', 'FTGT_y'], axis=1)
frame_merge = frame_merge.rename(columns={'FTR_x': 'FTR', 'HTR_x': 'HTR', 'FTGT_x': 'FTGT'})
frame_merge['FTGHT_MEAN'] = frame_merge.groupby('HomeTeam')['FTGT'].transform(lambda x: x.expanding().mean().shift())
frame_merge['FTGAT_MEAN'] = frame_merge.groupby('AwayTeam')['FTGT'].transform(lambda x: x.expanding().mean().shift())
frame_merge['FTHG_MEAN'] = frame_merge.groupby('HomeTeam')['FTHG'].transform(lambda x: x.expanding().mean().shift()) # FTHG = Full Time Home Team Goals
frame_merge['FTAG_MEAN'] = frame_merge.groupby('AwayTeam')['FTAG'].transform(lambda x: x.expanding().mean().shift()) # FTAG = Full Time Away Team Goals
frame_merge['HTHG_MEAN'] = frame_merge.groupby('HomeTeam')['HTHG'].transform(lambda x: x.expanding().mean().shift()) # HTHG = Half Time Home Team Goals
frame_merge['HTAG_MEAN'] = frame_merge.groupby('AwayTeam')['HTAG'].transform(lambda x: x.expanding().mean().shift()) # HTAG = Half Time Away Team Goals
frame_merge['HS_MEAN'] = frame_merge.groupby('HomeTeam')['HS'].transform(lambda x: x.expanding().mean().shift()) # HS = Home Team Shots
frame_merge['AS_MEAN'] = frame_merge.groupby('AwayTeam')['AS'].transform(lambda x: x.expanding().mean().shift()) # AS = Away Team Shots
frame_merge['HST_MEAN'] = frame_merge.groupby('HomeTeam')['HST'].transform(lambda x: x.expanding().mean().shift()) # HST = Home Team Shots on Target
frame_merge['AST_MEAN'] = frame_merge.groupby('AwayTeam')['AST'].transform(lambda x: x.expanding().mean().shift()) # AST = Away Team Shots on Target
frame_merge['HC_MEAN'] = frame_merge.groupby('HomeTeam')['HC'].transform(lambda x: x.expanding().mean().shift()) # HC = Home Team Corners
frame_merge['AC_MEAN'] = frame_merge.groupby('AwayTeam')['AC'].transform(lambda x: x.expanding().mean().shift()) # AC = Away Team Corners
frame_merge['HF_MEAN'] = frame_merge.groupby('HomeTeam')['HF'].transform(lambda x: x.expanding().mean().shift()) # HF = Home Team Fouls Committed
frame_merge['AF_MEAN'] = frame_merge.groupby('AwayTeam')['AF'].transform(lambda x: x.expanding().mean().shift()) # AF = Away Team Fouls Committed
frame_merge['HY_MEAN'] = frame_merge.groupby('HomeTeam')['HY'].transform(lambda x: x.expanding().mean().shift()) # HY = Home Team Yellow Cards
frame_merge['AY_MEAN'] = frame_merge.groupby('AwayTeam')['AY'].transform(lambda x: x.expanding().mean().shift()) # AY = Away Team Yellow Cards
frame_merge['HR_MEAN'] = frame_merge.groupby('HomeTeam')['HR'].transform(lambda x: x.expanding().mean().shift()) # HR = Home Team Red Cards
frame_merge['AR_MEAN'] = frame_merge.groupby('AwayTeam')['AR'].transform(lambda x: x.expanding().mean().shift()) # AR = Away Team Red Cards
# Mean of odds
frame_merge['AvgHr'] = frame_merge[['B365H', 'BWH', 'IWH', 'VCH', 'WHH']].mean(axis=1) # AvgHr = Market average home win odds
frame_merge['AvgDr'] = frame_merge[['B365D', 'BWD', 'IWD', 'VCD', 'WHD']].mean(axis=1) # AvgDr = Market average draw win odds
frame_merge['AvgAr'] = frame_merge[['B365A', 'BWA', 'IWA', 'VCA', 'WHA']].mean(axis=1) # AvgAr = Market average away win odds
# Creating the WINS, LOSSES, DRAWS of witch team
frame_merge['FTR_A'] = np.select([frame_merge['FTR'] == 'H', frame_merge['FTR'] == 'A', frame_merge['FTR'] == 'D'], [0, 2, 1], default=None)
frame_merge['FTR'] = frame_merge['FTR'].map({'H': 2, 'A': 0, 'D': 1}) # FTR = Full Time Result (H=Home Win, D=Draw, A=Away Win)
frame_merge['HTR'] = frame_merge['HTR'].map({'H': 2, 'A': 0, 'D': 1}) # HTR = Half Time Result (H=Home Win, D=Draw, A=Away Win)
frame_merge['H_POINTS'] = frame_merge.groupby('HomeTeam')['FTR'].transform(lambda x: x.expanding().sum().shift())
frame_merge['A_POINTS'] = frame_merge.groupby('AwayTeam')['FTR_A'].transform(lambda x: x.expanding().sum().shift())
frame_merge['H_DRAWS'] = frame_merge.groupby('HomeTeam')['FTR'].transform(lambda x: (x == 1).expanding().sum().shift())
frame_merge['A_DRAWS'] = frame_merge.groupby('AwayTeam')['FTR_A'].transform(lambda x: (x == 1).expanding().sum().shift())
frame_merge['Diff_POINTS'] = frame_merge['H_POINTS'] - frame_merge['A_POINTS']
#Last 2
frame_merge['LAST_2_MP_H'] = frame_merge.groupby('HomeTeam')['FTR'].transform(lambda x: x.rolling(window=2).sum().shift())
frame_merge['LAST_2_MP_A'] = frame_merge.groupby('AwayTeam')['FTR_A'].transform(lambda x: x.rolling(window=2).sum().shift())
frame_merge['LAST_2_FTGHT_MEAN'] = frame_merge.groupby('HomeTeam')['FTGT'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_FTGAT_MEAN'] = frame_merge.groupby('AwayTeam')['FTGT'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_FTHG_MEAN'] = frame_merge.groupby('HomeTeam')['FTHG'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_FTAG_MEAN'] = frame_merge.groupby('AwayTeam')['FTAG'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_HS_MEAN'] = frame_merge.groupby('HomeTeam')['HS'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_AS_MEAN'] = frame_merge.groupby('AwayTeam')['AS'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_HC_MEAN'] = frame_merge.groupby('HomeTeam')['HC'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_AC_MEAN'] = frame_merge.groupby('AwayTeam')['AC'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_HF_MEAN'] = frame_merge.groupby('HomeTeam')['HF'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_AF_MEAN'] = frame_merge.groupby('AwayTeam')['AF'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_HY_MEAN'] = frame_merge.groupby('HomeTeam')['HY'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_AY_MEAN'] = frame_merge.groupby('AwayTeam')['AY'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_HR_MEAN'] = frame_merge.groupby('HomeTeam')['HR'].transform(lambda x: x.rolling(window=2).mean().shift())
frame_merge['LAST_2_AR_MEAN'] = frame_merge.groupby('AwayTeam')['AR'].transform(lambda x: x.rolling(window=2).mean().shift())
#Last 3
frame_merge['LAST_3_MP_H'] = frame_merge.groupby('HomeTeam')['FTR'].transform(lambda x: x.rolling(window=3).sum().shift())
frame_merge['LAST_3_MP_A'] = frame_merge.groupby('AwayTeam')['FTR_A'].transform(lambda x: x.rolling(window=3).sum().shift())
frame_merge['LAST_3_FTGHT_MEAN'] = frame_merge.groupby('HomeTeam')['FTGT'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_FTGAT_MEAN'] = frame_merge.groupby('AwayTeam')['FTGT'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_FTHG_MEAN'] = frame_merge.groupby('HomeTeam')['FTHG'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_FTAG_MEAN'] = frame_merge.groupby('AwayTeam')['FTAG'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_HS_MEAN'] = frame_merge.groupby('HomeTeam')['HS'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_AS_MEAN'] = frame_merge.groupby('AwayTeam')['AS'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_HC_MEAN'] = frame_merge.groupby('HomeTeam')['HC'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_AC_MEAN'] = frame_merge.groupby('AwayTeam')['AC'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_HF_MEAN'] = frame_merge.groupby('HomeTeam')['HF'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_AF_MEAN'] = frame_merge.groupby('AwayTeam')['AF'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_HY_MEAN'] = frame_merge.groupby('HomeTeam')['HY'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_AY_MEAN'] = frame_merge.groupby('AwayTeam')['AY'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_HR_MEAN'] = frame_merge.groupby('HomeTeam')['HR'].transform(lambda x: x.rolling(window=3).mean().shift())
frame_merge['LAST_3_AR_MEAN'] = frame_merge.groupby('AwayTeam')['AR'].transform(lambda x: x.rolling(window=3).mean().shift())
# Last 5
frame_merge['LAST_5_MP_H'] = frame_merge.groupby('HomeTeam')['FTR'].transform(lambda x: x.rolling(window=5).sum().shift())
frame_merge['LAST_5_MP_A'] = frame_merge.groupby('AwayTeam')['FTR_A'].transform(lambda x: x.rolling(window=5).sum().shift())
frame_merge['LAST_5_FTGHT_MEAN'] = frame_merge.groupby('HomeTeam')['FTGT'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_FTGAT_MEAN'] = frame_merge.groupby('AwayTeam')['FTGT'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_FTHG_MEAN'] = frame_merge.groupby('HomeTeam')['FTHG'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_FTAG_MEAN'] = frame_merge.groupby('AwayTeam')['FTAG'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_HS_MEAN'] = frame_merge.groupby('HomeTeam')['HS'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_AS_MEAN'] = frame_merge.groupby('AwayTeam')['AS'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_HC_MEAN'] = frame_merge.groupby('HomeTeam')['HC'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_AC_MEAN'] = frame_merge.groupby('AwayTeam')['AC'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_HF_MEAN'] = frame_merge.groupby('HomeTeam')['HF'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_AF_MEAN'] = frame_merge.groupby('AwayTeam')['AF'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_HY_MEAN'] = frame_merge.groupby('HomeTeam')['HY'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_AY_MEAN'] = frame_merge.groupby('AwayTeam')['AY'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_HR_MEAN'] = frame_merge.groupby('HomeTeam')['HR'].transform(lambda x: x.rolling(window=5).mean().shift())
frame_merge['LAST_5_AR_MEAN'] = frame_merge.groupby('AwayTeam')['AR'].transform(lambda x: x.rolling(window=5).mean().shift())
new_frames.append(frame_merge)
return new_frames
def data_set():
frames = frames_season()
premier_league_stats = pd.concat(frames)
columns_to_use = ['Date', 'HomeTeam', 'AwayTeam', 'FTGT', 'FTHG', 'FTHG_MEAN', 'FTAG', 'FTR', 'FTR_A', 'Diff_POINTS',
'LAST_2_MP_H', 'LAST_2_MP_A', 'LAST_3_MP_H', 'LAST_3_MP_A', 'LAST_5_MP_H', 'LAST_5_MP_A', 'H_DRAWS',
'A_DRAWS', 'H_POINTS', 'A_POINTS', 'HTHG', 'HTAG', 'HTR', 'HS', 'AS', 'HST', 'AST', 'HC', 'AC', 'HF',
'AF', 'HY', 'AY', 'HR', 'AR', 'AvgHr', 'AvgDr', 'AvgAr', 'BbAv>2.5', 'BbAv<2.5', 'FTAG_MEAN', 'HTHG_MEAN', 'HTAG_MEAN', 'HS_MEAN',
'AS_MEAN', 'HST_MEAN', 'AST_MEAN', 'HC_MEAN', 'AC_MEAN', 'HF_MEAN', 'AF_MEAN', 'HY_MEAN', 'AY_MEAN', 'HR_MEAN',
'AR_MEAN', 'LAST_2_FTHG_MEAN', 'LAST_2_FTAG_MEAN', 'LAST_3_FTHG_MEAN', 'LAST_3_FTAG_MEAN', 'LAST_5_FTHG_MEAN', 'LAST_5_FTAG_MEAN',
'LAST_2_HS_MEAN', 'LAST_2_AS_MEAN', 'LAST_2_HC_MEAN', 'LAST_2_AC_MEAN', 'LAST_2_HF_MEAN', 'LAST_2_AF_MEAN',
'LAST_2_HY_MEAN', 'LAST_2_AY_MEAN', 'LAST_2_HR_MEAN', 'LAST_2_AR_MEAN',
'LAST_3_HS_MEAN', 'LAST_3_AS_MEAN', 'LAST_3_HC_MEAN', 'LAST_3_AC_MEAN', 'LAST_3_HF_MEAN',
'LAST_3_AF_MEAN', 'LAST_3_HY_MEAN', 'LAST_3_AY_MEAN', 'LAST_3_HR_MEAN', 'LAST_3_AR_MEAN',
'LAST_5_HS_MEAN', 'LAST_5_AS_MEAN', 'LAST_5_HC_MEAN', 'LAST_5_AC_MEAN', 'LAST_5_HF_MEAN',
'LAST_5_AF_MEAN', 'LAST_5_HY_MEAN', 'LAST_5_AY_MEAN', 'LAST_5_HR_MEAN', 'LAST_5_AR_MEAN',
'FTGHT_ALL_MEAN', 'FTHG_ALL_MEAN', 'HTHG_ALL_MEAN', 'HS_ALL_MEAN', 'HST_ALL_MEAN', 'HC_ALL_MEAN', 'HF_ALL_MEAN', 'HY_ALL_MEAN', 'HR_ALL_MEAN',
'FTGAT_ALL_MEAN', 'FTAG_ALL_MEAN', 'HTAG_ALL_MEAN', 'AS_ALL_MEAN', 'AST_ALL_MEAN', 'AC_ALL_MEAN', 'AF_ALL_MEAN',
'AY_ALL_MEAN', 'AR_ALL_MEAN',
'FTGHT_MEAN', 'FTGAT_MEAN', 'LAST_2_FTGHT_MEAN', 'LAST_2_FTGAT_MEAN', 'LAST_3_FTGHT_MEAN', 'LAST_3_FTGAT_MEAN', 'LAST_5_FTGHT_MEAN', 'LAST_5_FTGAT_MEAN']
premier_league_stats = premier_league_stats[columns_to_use]
premier_league_stats['FTGT_C'] = np.where(premier_league_stats['FTGT'] > 2.5, 1, 0)
premier_league_stats = premier_league_stats.dropna().reset_index(drop=True)
return premier_league_stats
def feature_and_target(tML=str, tar=str):
features = ['LAST_2_HS_MEAN', 'LAST_2_AS_MEAN', 'LAST_2_HC_MEAN', 'LAST_2_AC_MEAN', 'LAST_2_HF_MEAN',
'LAST_2_AF_MEAN', 'LAST_2_HY_MEAN', 'LAST_2_AY_MEAN', 'LAST_2_HR_MEAN', 'LAST_2_AR_MEAN',
'LAST_3_HS_MEAN', 'LAST_3_AS_MEAN', 'LAST_3_HC_MEAN', 'LAST_3_AC_MEAN', 'LAST_3_HF_MEAN',
'LAST_3_AF_MEAN', 'LAST_3_HY_MEAN', 'LAST_3_AY_MEAN', 'LAST_3_HR_MEAN', 'LAST_3_AR_MEAN',
'LAST_5_HS_MEAN', 'LAST_5_AS_MEAN', 'LAST_5_HC_MEAN', 'LAST_5_AC_MEAN', 'LAST_5_HF_MEAN',
'LAST_5_AF_MEAN', 'LAST_5_HY_MEAN', 'LAST_5_AY_MEAN', 'LAST_5_HR_MEAN', 'LAST_5_AR_MEAN',
'FTGHT_ALL_MEAN', 'FTHG_ALL_MEAN', 'HTHG_ALL_MEAN', 'HS_ALL_MEAN', 'HST_ALL_MEAN', 'HC_ALL_MEAN', 'HF_ALL_MEAN', 'HY_ALL_MEAN',
'HR_ALL_MEAN', 'FTGAT_ALL_MEAN', 'FTAG_ALL_MEAN', 'HTAG_ALL_MEAN', 'AS_ALL_MEAN', 'AST_ALL_MEAN', 'AC_ALL_MEAN', 'AF_ALL_MEAN',
'AY_ALL_MEAN', 'AR_ALL_MEAN', 'FTHG_MEAN', 'FTAG_MEAN', 'HTHG_MEAN', 'HTAG_MEAN', 'HS_MEAN', 'AS_MEAN', 'HST_MEAN',
'AST_MEAN', 'HC_MEAN', 'AC_MEAN', 'HF_MEAN', 'AF_MEAN', 'HY_MEAN', 'AY_MEAN', 'HR_MEAN', 'AR_MEAN', 'H_POINTS', 'A_POINTS',
'H_DRAWS', 'A_DRAWS', 'Diff_POINTS', 'LAST_2_MP_H', 'LAST_2_MP_A', 'LAST_2_FTHG_MEAN', 'LAST_2_FTAG_MEAN',
'LAST_3_MP_H', 'LAST_3_MP_A', 'LAST_3_FTHG_MEAN', 'LAST_3_FTAG_MEAN', 'LAST_5_MP_H', 'LAST_5_MP_A', 'LAST_5_FTHG_MEAN',
'LAST_5_FTAG_MEAN']
if tML == 'classifier':
target = [tar]
if tML == 'regression':
target = [tar]
return features, target
def KNN(Xtr, ytr, Xte, yte): #KNN(near neightboor) test
for i in range(1, 20):
knn = KNeighborsClassifier(n_neighbors=i).fit(Xtr, ytr)
if i == 1:
s_te = knn.score(Xte, yte)
s_tr = knn.score(Xtr, ytr)
g = i
if knn.score(Xte, yte) > s_te:
s_te = knn.score(Xte, yte)
s_tr = knn.score(Xtr, ytr)
g = i
print(f'The best n_neighbors is: {g}')
print(f'Accuracy of K-NN ({g}) test on train set is: {s_tr}')
print(f'Accuracy of K-NN ({g}) test on test set is: {s_te}')
return knn.predict(Xte)
def Decision_tree(Xtr, ytr, Xte, yte): #Decision tree test
dt = DecisionTreeClassifier().fit(Xtr, ytr)
print(f'Accuracy of DecisionTreeClassifier test on train set is: {dt.score(Xtr, ytr)}')
print(f'Accuracy of DecisionTreeClassifier test on test set is: {dt.score(Xte, yte)}')
return dt.predict(Xte)
def SVC_test(Xtr, ytr, Xte, yte=None, text=None): #SVC
for i in [0.01, 0.1, 1, 2, 10]:
svm = SVC(gamma=i).fit(Xtr, ytr)
if i == 0.01:
s_te = svm.score(Xte, yte)
s_tr = svm.score(Xtr, ytr)
g = i
if svm.score(Xte, yte) > s_te:
s_te = svm.score(Xte, yte)
s_tr = svm.score(Xtr, ytr)
g = i
if text is not None:
print(f'The best params is: {g}')
print(f'Accuracy of Logistic Regression test on train set is: {svm.score(Xtr, ytr)}')
if yte is not None:
print(f'Accuracy of Logistic Regression test on test set is: {svm.score(Xte, yte)}')
return svm.predict(Xte)
def logistic_reg(Xtr, ytr, Xte, yte=None, text=None):
lr = LogisticRegression(penalty='l2', C=7.74263682, max_iter=4000).fit(Xtr, ytr)
y_pred = lr.predict(Xte)
if text is not None:
print(f'Accuracy of Logistic Regression test on train set is: {lr.score(Xtr, ytr)}')
if yte is not None:
print(f'Accuracy of Logistic Regression test on test set is: {lr.score(Xte, yte)}')
return y_pred
def linear_reg(Xtr, ytr, Xte, yte=None, text=None):
lir = LinearRegression().fit(Xtr, ytr)
y_pred = lir.predict(Xte)
if text is not None:
print(f'Accuracy of Linear Regression test on train set is: {lir.score(Xtr, ytr)}')
if yte is not None:
## Kpi
print(f"R2 (explained variance):{round(r2_score(yte, y_pred), 3)}")
print(f'Mean Absolute Percentual Error (Σ(|y-pred|/y)/n):{round(np.mean(np.abs((yte - y_pred) / y_pred)), 2)}')
print(f'Mean Absolute Error (Σ|y-pred|/n): {(mean_absolute_error(yte, y_pred)):.2f}')
print(f"Root Mean Squared Error (sqrt(Σ(y-pred)^2/n)): {(np.sqrt(mean_squared_error(yte, y_pred))):.2f}")
return y_pred
def RandomF(Xtr, ytr, Xte, yte):
'''{'max_depth': 5, 'min_samples_leaf': 5, 'min_samples_split': 2, 'n_estimators': 100}'''
rf = RandomForestClassifier(max_depth=5, min_samples_leaf=5, min_impurity_split=2, n_estimators=100).fit(Xtr, ytr.values.ravel())
print(f'Accuracy of Logistic Regression test on train set is: {rf.score(Xtr, ytr)}')
print(f'Accuracy of Logistic Regression test on test set is: {rf.score(Xte, yte)}')
return rf.predict(Xte)
def XGB(Xtr, ytr, Xte, yte=None, text=True):
xg = XGBClassifier().fit(Xtr, ytr)
y_pred = xg.predict(Xte)
if text == True:
print(f'Accuracy of Logistic Regression test on train set is: {xg.score(Xtr, ytr)}')
if yte is not None:
print(f'Accuracy of Logistic Regression test on test set is: {xg.score(Xte, yte)}')
return y_pred
def XGB_r(Xtr, ytr, Xte, yte=None, text=None):
xgb = XGBRegressor().fit(Xtr, ytr)
y_pred = xgb.predict(Xte)
if text is not None:
print(f'R2 at the train set (explained variance): {xgb.score(Xtr, ytr)}')
if yte is not None:
## Kpi
print(f"R2 at the test set (explained variance):{round(r2_score(yte, y_pred), 3)}")
print(f'Mean Absolute Error (Σ|y-pred|/n): {(mean_absolute_error(yte, y_pred)):.2f}')
print(f"Root Mean Squared Error (sqrt(Σ(y-pred)^2/n)): {(np.sqrt(mean_squared_error(yte, y_pred))):.2f}")
return y_pred
def features_importances(Xtr, ytr, tML='classifier'):
if tML == 'regression':
## call model
model = GradientBoostingRegressor()
## Importance
model.fit(Xtr, ytr)
importances = model.feature_importances_
## Put in a pandas dtf
dtf_importances = pd.DataFrame({"IMPORTANCE":importances, "VARIABLE":Xtr.columns.tolist()}).sort_values("IMPORTANCE", ascending=False)
dtf_importances['cumsum'] = dtf_importances['IMPORTANCE'].cumsum(axis=0)
dtf_importances = dtf_importances.set_index("VARIABLE")
## Plot
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False, figsize=[50,70])
fig.suptitle("Features Importance", fontsize=100)
ax[0].title.set_text('variables')
dtf_importances[["IMPORTANCE"]].sort_values(by="IMPORTANCE").plot(kind="barh", legend=False, ax=ax[0], fontsize=30).grid(axis="x")
ax[0].set(ylabel="")
ax[1].title.set_text('cumulative')
dtf_importances[["cumsum"]].plot(kind="line", linewidth=4, legend=False, ax=ax[1])
ax[1].set(xlabel="", xticks=np.arange(len(dtf_importances)), xticklabels=dtf_importances.index)
plt.xticks(rotation=70)
plt.grid(axis='both')
plt.show()
if tML =='classifier':
## call model
model = GradientBoostingClassifier()
## Importance
model.fit(Xtr, ytr)
importances = model.feature_importances_
## Put in a pandas dtf
dtf_importances = pd.DataFrame({"IMPORTANCE":importances, "VARIABLE":Xtr.columns.tolist()}).sort_values("IMPORTANCE", ascending=False)
dtf_importances['cumsum'] = dtf_importances['IMPORTANCE'].cumsum(axis=0)
dtf_importances = dtf_importances.set_index("VARIABLE")
## Plot
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False, figsize=[50,70])
fig.suptitle("Features Importance", fontsize=100)
ax[0].title.set_text('variables')
dtf_importances[["IMPORTANCE"]].sort_values(by="IMPORTANCE").plot(kind="barh", legend=False, ax=ax[0], fontsize=30).grid(axis="x")
ax[0].set(ylabel="")
ax[1].title.set_text('cumulative')
dtf_importances[["cumsum"]].plot(kind="line", linewidth=4, legend=False, ax=ax[1])
ax[1].set(xlabel="", xticks=np.arange(len(dtf_importances)), xticklabels=dtf_importances.index)
plt.xticks(rotation=70)
plt.grid(axis='both')
plt.show()
def scaler(X_tr, X_te):
scaller = MinMaxScaler()
X_train_transf = scaller.fit_transform(X_tr)
X_test_transf = scaller.transform(X_te)
return X_train_transf, X_test_transf
def by_rods_C(temp_line, season=str, bet_by_rod=10, tML='classifier', tar=str):
import matplotlib.pyplot as plt
import seaborn as sns
features, target = feature_and_target(tML, tar)
data_base = data_set()
rods = []
index_r = []
for i in range(temp_line, (temp_line + (10 * 28)) - 2, 10):
rods.append(i)
gain_by_rod = []
gain_by_rod_f = []
frames = []
frames_f = []
for item in rods:
try:
train = data_base[:item]
X_train = train[features].copy()
y_train = train[target].copy()
test = data_base[item:(item + 10)]
X_test = test[features].copy()
y_test = test[target].copy()
X_train_transf, X_test_transf = scaler(X_train, X_test)
X_test['PREDICTS'] = logistic_reg(X_train_transf, y_train, X_test_transf, y_test)
globals()['DB_test' + str(item)] = pd.merge(X_test, data_base, how='left')
globals()['DB_test' + str(item)] = globals()['DB_test' + str(item)][['Date', 'HomeTeam', 'AwayTeam', 'AvgHr', 'AvgAr', 'AvgDr', 'FTR', 'PREDICTS']]
globals()['DB_test' + str(item)]['ODD_CHOSEN'] = np.select([globals()['DB_test' + str(item)]['PREDICTS'] == 2, globals()['DB_test' + str(item)]['PREDICTS'] == 1, globals()['DB_test' + str(item)]['PREDICTS'] == 0],
[globals()['DB_test' + str(item)]['AvgHr'] - 1, globals()['DB_test' + str(item)]['AvgDr'] - 1, globals()['DB_test' + str(item)]['AvgAr'] - 1], default=None)
globals()['DB_test' + str(item)]['GAIN'] = np.where(globals()['DB_test' + str(item)]['FTR'] == globals()['DB_test' + str(item)]['PREDICTS'], globals()['DB_test' + str(item)]['ODD_CHOSEN'], -1)
gain_by_rod.append(sum(globals()['DB_test' + str(item)]["GAIN"]) / len(globals()['DB_test' + str(item)]) * 100)
globals()['DB_test_F' + str(item)] = globals()['DB_test' + str(item)][globals()['DB_test' + str(item)]['ODD_CHOSEN'] < 1]
gain_by_rod_f.append(sum(globals()['DB_test_F' + str(item)]["GAIN"]) / len(globals()['DB_test_F' + str(item)]) * 100)
frames.append(globals()['DB_test' + str(item)])
frames_f.append(globals()['DB_test_F' + str(item)])
except:
pass
for i in range(11, (len(gain_by_rod) + 10) + 1):
index_r.append(i)
df_by_rod = pd.DataFrame(data={'Gain ML': gain_by_rod, 'Gain F': gain_by_rod_f}, index=index_r)
df_by_rod['U$D Gain ML'] = df_by_rod['Gain ML'] * bet_by_rod / 100
df_by_rod['U$D Gain F'] = df_by_rod['Gain F'] * bet_by_rod / 100
print(f'The gain with the normal machine learning, betting US$ {bet_by_rod:.2f} per round, would be: '
f'US$ {df_by_rod["U$D Gain ML"].sum():.2f}, {(df_by_rod["U$D Gain ML"].sum()/(len(rods) * bet_by_rod)) * 100:.2f}%\n'
f'The gain with the team favorites machine learning, betting US$ {bet_by_rod:.2f} per round, would be: '
f'US$ {df_by_rod["U$D Gain F"].sum():.2f}, {(df_by_rod["U$D Gain F"].sum()/(len(rods) * bet_by_rod)) * 100:.2f}%')
plt.figure()
sns.set(style="dark")
sns.lineplot(data=df_by_rod[['Gain ML', 'Gain F']], palette="PuBuGn_d", linewidth=2.5).lines[1].set_linestyle("-")
plt.plot([index_r[0], index_r[-1]], [0, 0], '-g', linewidth=1, alpha=0.8)
plt.title('Season ' + season)
plt.show()
return pd.concat(frames), | pd.concat(frames_f) | pandas.concat |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse( | Index(['a', 'b', 'c']) | pandas.core.index.Index |
"""
DataSet: data structure for potentially mixed-type Attribute.
"""
from pandas import DataFrame, Series
from ds4ml.attribute import Attribute
class DataSetPattern:
"""
A helper class of ``DataSet`` to store its patterns.
"""
# DataSet's pattern data has following members:
_network = None
_cond_prs = None
_attrs = None
_records = None
# Options of DataSet constructor to preset some properties:
_categories = [] # categorical columns setting from command lines
_config = None # configurations for data-pattern command
_pattern_generated = False
class DataSet(DataSetPattern, DataFrame):
def __init__(self, *args, **kwargs):
"""
An improved DataFrame with extra patterns information, e.g. its bayesian
network structure, conditional probabilities on the network, and pattern
information of all its columns.
The ``DataSet`` class has two modes:
- it has raw data, and then can calculate its pattern from the data;
- it doesn't have raw data, and only have the pattern from customer.
Parameters
----------
categories : list of columns (optional)
Column names whose values are categorical.
"""
categories = kwargs.pop("categories", [])
self._categories = [] if categories is None else categories
pattern = kwargs.pop('pattern', None)
super(DataSet, self).__init__(*args, **kwargs)
self.separator = '_'
if pattern is not None and all(k in pattern for k in
['network', 'prs', 'attrs', 'records']):
self._set_pattern(pattern)
else:
self._records = self.shape[0]
@property
def _constructor(self):
return DataSet
# disable _constructor_sliced method for single column slicing. Try to
# use __getitem__ method.
# @property
# def _constructor_sliced(self):
# return Attribute
def __getitem__(self, key):
result = super(DataSet, self).__getitem__(key)
if isinstance(result, Series):
result.__class__ = Attribute
if self._attrs is not None:
result.set_pattern(self._attrs.get(key),
categorical=key in self._categories)
else:
result.set_pattern(categorical=key in self._categories)
return result
@classmethod
def from_pattern(cls, filename):
"""
Alternate constructor to create a ``DataSet`` from a pattern file.
"""
import json
with open(filename) as f:
pattern = json.load(f)
# set columns to DataSet, which will set column name to each Attribute.
columns = pattern['attrs'].keys()
dataset = DataSet(columns=columns, pattern=pattern)
return dataset
def _set_pattern(self, pattern=None):
""" Set pattern data for the DataSet. """
if not self._pattern_generated:
self._network = pattern['network']
self._cond_prs = pattern['prs']
self._attrs = pattern['attrs']
self._config = pattern['config']
self._records = pattern['records']
self._pattern_generated = True
def mi(self):
""" Return mutual information of pairwise attributes. """
from ds4ml.metrics import pairwise_mutual_information
return pairwise_mutual_information(self)
def encode(self, data=None):
"""
Transform data set to values by kinds of encoders.
If data is set, use this data set's encoders to transform.
"""
# If the data to encode is None, then transform source data _data;
frame = | DataFrame() | pandas.DataFrame |
import numpy as np
import arviz as az
import pandas as pd
import seaborn as sns
import matplotlib.pylab as plt
from math import *
import json
import itertools
import os
import re
sns.set_style("whitegrid")
import tools
from tools import toVec
def jSonIterator(j):
yield j
if isinstance(j,dict):
for key in j.keys():
for m in jSonIterator(j[key]):
yield m
else:
if isinstance(j,list):
for item in j:
for m in jSonIterator(item):
yield m
def getByLabel(j,label):
def hasLabel(j):
if isinstance(j,dict) :
if "label" in j.keys():
if j["label"]==label:
return True
return False
ms=[ item for item in jSonIterator(j) if hasLabel(item) ]
return ms
def getForwardWalkingLabels(j,label):
fwLabels=[]
for m in j["measurements"] :
if ("label" in m.keys() ) and (m["label"]==label ) and ("forwardWalkingSteps" in m.keys() ):
for step in m["forwardWalkingSteps"]:
fwLabels.append(label + "_fw" + str(step))
return fwLabels
def average(data,labels=None,hues=None,minIndex=None):
if minIndex is not None:
data=data[data.index >= minIndex]
if labels is None:
labels=list(data.columns)
if hues is None:
hues = list( set(data.columns) -set(labels) )
averagedData={label : [] for label in labels }
averagedData.update({hue : [] for hue in hues } )
averagedData.update( { "delta" + label : [] for label in toVec(labels) })
if hues == []:
groups= { None : data }
groups=groups.items()
else:
groups = data.groupby(hues)
for hue_values,df in groups:
for label in toVec(labels):
x=np.array(df[label])
averagedData[label].append(np.mean(x) )
neff=az.ess(x)
averagedData["delta" + label].append( np.sqrt(np.var(x) /neff ) )
for name,value in zip(toVec(hues),toVec(hue_values) ):
averagedData[name].append(value)
return pd.DataFrame(averagedData)
def createHueLabel(hueNames,hueValues):
hueNames=toVec(hueNames)
hueValues=toVec(hueValues)
labels= [ str(name) + "=" + str(value) for name,value in zip(hueNames,hueValues) ]
return ", ".join(labels)
def assemblePlot(func):
def assemble(data,hues=None,table=False,nCols=2,width=10.,height=6.,x=None,y=None,delta=None,showLegend=True,*args,**kwds):
fig=plt.figure()
if hues is None:
ax=fig.add_subplot(111)
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(data,x=x1 ,y=y1 ,delta=delta1,ax=ax,label=y1,*args,**kwds)
if showLegend:
ax.legend()
fig.set_size_inches(width, height)
else:
if not table :
ax=fig.add_subplot(111)
for hue,df in data.groupby(hues):
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(df,x=x1,y=y1,delta=delta1,label=y1 + ";"+createHueLabel(hues,hue),ax=ax,*args,**kwds)
if showLegend:
ax.legend()
fig.set_size_inches(width, height )
else:
groups=data.groupby(hues)
Nplots=len(groups)
nRows=ceil(Nplots/nCols)
i=1
for hue,df in data.groupby(hues):
ax=fig.add_subplot(nRows,nCols,i)
for x1,y1,delta1 in itertools.zip_longest(toVec(x),toVec(y),toVec(delta)):
func(df,x=x1,y=y1,delta=delta1,label=y1 + ";" +createHueLabel(hues,hue),ax=ax,*args,**kwds)
i+=1
if (showLegend):
ax.legend()
fig.set_size_inches(width, height/2. * len(groups) )
fig.tight_layout()
return assemble
@assemblePlot
def plotVector(data,x,y,delta=None,label=None,ax=None,errorbar=False,*args,**kwds):
if delta is not None and (not errorbar):
ax.fill_between(data[x],data[y]-data[delta],data[y]+data[delta],alpha=0.5)
if errorbar is not True:
ax.plot(data[x],data[y],label=label,*args,**kwds)
else:
ax.errorbar(data[x],data[y],data[delta],label=label,*args,**kwds)
ax.set_xlabel(x)
ax.set_ylabel(y)
@assemblePlot
def plotScalar(data,y,x=None,label=None,ax=None,delta=None,alpha=0.5,trace=False,alpha_trace=1):
if x is None:
x1=np.arange(0,len(data[y]))
else:
x1=np.array(data[x])
if delta is None:
p=ax.plot(x1,np.array(data[y]),label=label,marker="o",linestyle="dashed",alpha=alpha)
else:
p=ax.errorbar(x1,np.array(data[y]),yerr=np.array(data[delta]),label=label,marker="o",linestyle="dashed",alpha=alpha)
if trace and (delta is None):
movingAverage=data[y].expanding().mean()
color=p[0].get_color()
ax.plot(x1,np.array(movingAverage),linestyle="solid",alpha=alpha_trace,color=color)
def compare(data,ax=None):
columns=list(data.columns)
labels = [label for label in columns if ( (re.match("(?!delta).*",label) is not None) and ( ("delta"+label) in columns ) ) ]
if ax is None:
fig=plt.figure()
ax=fig.add_subplot(111)
y=[ float(data[label]) for label in labels]
deltay=[ float(data["delta"+label]) for label in labels]
ax.errorbar(labels,y,yerr=deltay,marker="o",linestyle="None")
def gatherByLabel(baseDir,label,jSonInput,getHues=None,maxRows=None,minIndex=0):
measurements=getByLabel(jSonInput["measurements"],label)
if len(measurements)!=0 and ("recordSteps" in measurements[0]):
fwLabels=getForwardWalkingLabels(jSonInput,label)
datas=[]
for fwLabel in fwLabels:
data=gatherByLabel(baseDir,fwLabel,jSonInput,getHues=getHues,maxRows=maxRows,minIndex=minIndex)
data=data.rename(columns={fwLabel : label})
fwSteps=int(re.match(".*_fw(\d+)",fwLabel).group(1))
fwTime=jSonInput["correlationSteps"]*fwSteps*jSonInput["timeStep"]
data["fwTime"]=float(fwTime)
datas.append(data)
return pd.concat(datas)
filename=os.path.join(baseDir , label + ".dat")
data=pd.read_csv(filename,sep=" ")
if (maxRows is not None) and (len(data) > maxRows) :
data.reset_index(drop=True)
k=len(data)//maxRows
data=data[data.index% k == 0]
if getHues is not None:
hues=getHues(jSonInput)
for name,value in hues.items():
data[name]=value
data=data[data.index >= minIndex]
return data
def gather(dirname,label,hues=None,maxRows=None,minIndex=0,max_level=1):
datas=[]
json_file="input.json"
for subdir, dirs, files in tools.walk(dirname,max_level=max_level):
if json_file in files:
try:
with open(os.path.join(subdir,json_file)) as f:
j = json.load(f)
data=gatherByLabel(subdir,label,jSonInput=j,getHues=hues,maxRows=maxRows,minIndex=minIndex)
datas.append(data)
except FileNotFoundError as e:
print ("Warning: data not availible in " + subdir)
print (str(e))
if datas != []:
data= | pd.concat(datas) | pandas.concat |
import numpy as np
import pandas as pd
from typing import Dict
from sklearn.metrics import mean_absolute_percentage_error, r2_score, mean_squared_error
def evraz_metric(y_true: pd.DataFrame, y_pred: np.array):
"""
Метрика оценки качества модели, предложенная организаторами EVRAZ.
:param answers: pd.DataFrame, датасет с реальными значениями целевых переменных.
:param user_csv: pd.DataFrame, датасет с предсказанными значениями целевых переменных.
:return:
"""
predictions = | pd.DataFrame(data=y_pred, columns=['C', 'TST']) | pandas.DataFrame |
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Retrieve verification from MesoWest, NWS CF6 files, and NCDC data.
"""
from .MesoPy import Meso
from .obs import get_obs
import pandas as pd
import numpy as np
import os
import re
from thetae.util import meso_api_dates, Daily, check_cache_file
from thetae.db import readTimeSeries, get_latest_date
from thetae import MissingDataError
from datetime import datetime, timedelta
import requests
from builtins import str
def get_cf6_files(config, stid, num_files=1):
"""
After code by <NAME>
Retrieves CF6 climate verification data released by the NWS. Parameter num_files determines how many recent files
are downloaded.
"""
# Create directory if it does not exist
site_directory = '%s/site_data' % config['THETAE_ROOT']
if config['debug'] > 50:
print('get_cf6_files: accessing site data in %s' % site_directory)
# Construct the web url address. Check if a special 3-letter station ID is provided.
nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'
try:
stid3 = config['Stations'][stid]['station_id3']
except KeyError:
stid3 = stid[1:].upper()
nws_url = nws_url % stid3
# Determine how many files (iterations of product) we want to fetch
if num_files == 1:
print('get_cf6_files: retrieving latest CF6 file for %s' % stid)
else:
print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, stid))
# Fetch files
for r in range(1, num_files + 1):
# Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files
version = 'version=%d&glossary=0' % r
nws_site = '&'.join((nws_url, version))
if config['debug'] > 50:
print('get_cf6_files: fetching from %s' % nws_site)
response = requests.get(nws_site)
cf6_data = response.text
# Remove the header
try:
body_and_footer = cf6_data.split('CXUS')[1] # Mainland US
except IndexError:
try:
body_and_footer = cf6_data.split('CXHW')[1] # Hawaii
except IndexError:
try:
body_and_footer = cf6_data.split('CXAK')[1] # Alaska
except IndexError:
if config['debug'] > 50:
print('get_cf6_files: bad file from request version %d' % r)
continue
body_and_footer_lines = body_and_footer.splitlines()
if len(body_and_footer_lines) <= 2:
body_and_footer = cf6_data.split('000')[2]
# Remove the footer
body = body_and_footer.split('[REMARKS]')[0]
# Find the month and year of the file
try:
current_year = re.search('YEAR: *(\d{4})', body).groups()[0]
except BaseException:
if config['debug'] > 9:
print('get_cf6_files warning: file from request version %d is faulty' % r)
continue
try:
current_month = re.search('MONTH: *(\D{3,9})', body).groups()[0]
current_month = current_month.strip() # Gets rid of newlines and whitespace
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%B %Y')
except: # Some files have a different formatting, although this may be fixed now.
current_month = re.search('MONTH: *(\d{2})', body).groups()[0]
current_month = current_month.strip()
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%m %Y')
# Write to a temporary file, check if output file exists, and if so, make sure the new one has more data
datestr = file_date.strftime('%Y%m')
filename = '%s/%s_%s.cli' % (site_directory, stid.upper(), datestr)
temp_file = '%s/temp.cli' % site_directory
with open(temp_file, 'w') as out:
out.write(body)
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
return i + 1
if os.path.isfile(filename):
old_file_len = file_len(filename)
new_file_len = file_len(temp_file)
if old_file_len < new_file_len:
if config['debug'] > 9:
print('get_cf6_files: overwriting %s' % filename)
os.remove(filename)
os.rename(temp_file, filename)
else:
if config['debug'] > 9:
print('get_cf6_files: %s already exists' % filename)
else:
if config['debug'] > 9:
print('get_cf6_files: writing %s' % filename)
os.rename(temp_file, filename)
def _cf6_wind(config, stid):
"""
After code by <NAME>
This function is used internally only.
Generates wind verification values from climate CF6 files stored in site_directory. These files can be generated
by _get_cf6_files.
"""
site_directory = '%s/site_data' % config['THETAE_ROOT']
if config['debug'] > 9:
print('verification: searching for CF6 files in %s' % site_directory)
listing = os.listdir(site_directory)
file_list = [f for f in listing if f.startswith(stid.upper()) and f.endswith('.cli')]
file_list.sort()
if len(file_list) == 0:
raise IOError('No CF6 files found in %s for site %s.' % (site_directory, stid))
if config['debug'] > 50:
print('verification: found %d CF6 files' % len(file_list))
# Interpret CF6 files
if config['debug'] > 50:
print('verification: reading CF6 files')
cf6_values = {}
for file in file_list:
year, month = re.search('(\d{4})(\d{2})', file).groups()
open_file = open('%s/%s' % (site_directory, file), 'r')
for line in open_file:
matcher = re.compile('( \d|\d{2}) ( \d{2}|-\d{2}| \d| -\d|\d{3})')
if matcher.match(line):
# We've found an obs line!
lsp = line.split()
day = int(lsp[0])
date = datetime(int(year), int(month), day)
cf6_values[date] = {}
# Get only the wind value
if lsp[11] == 'M':
cf6_values[date]['wind'] = 0.0
else:
cf6_values[date]['wind'] = float(lsp[11]) * 0.868976
return cf6_values
def _climo_wind(config, stid, dates=None):
"""
This function is used internally only.
Fetches climatological wind data using ulmo package to retrieve NCDC archives.
"""
import ulmo
from thetae.util import get_ghcn_stid
ghcn_stid = get_ghcn_stid(config, stid)
if config['debug'] > 0:
print('verification: fetching wind data for %s from NCDC (may take a while)' % ghcn_stid)
v = 'WSF2'
D = ulmo.ncdc.ghcn_daily.get_data(ghcn_stid, as_dataframe=True, elements=[v])
wind_dict = {}
if dates is None:
dates = list(D[v].index.to_timestamp().to_pydatetime())
for date in dates:
wind_dict[date] = {'wind': D[v].loc[date]['value'] / 10. * 1.94384}
return wind_dict
def get_verification(config, stid, start_dt, end_dt, use_climo=False, use_cf6=True):
"""
Generates verification data from MesoWest API. If use_climo is True, then fetch climate data from NCDC using ulmo
to fill in wind values. (We probably generally don't want to do this, because it is slow and is delayed by 1-2
weeks from present.) If use_cf6 is True, then any CF6 files found in ~/site_data will be used for wind values.
These files are retrieved by get_cf6_files.
"""
# MesoWest token and init
meso_token = config['Verify']['api_key']
m = Meso(token=meso_token)
# Look for desired variables
vars_request = ['air_temp_low_6_hour', 'air_temp_high_6_hour', 'precip_accum_six_hour']
vars_api = ','.join(vars_request)
# Units
units = 'temp|f,precip|in,speed|kts'
# Retrieve 6-hourly data
start, end = meso_api_dates(start_dt, end_dt)
print('verification: retrieving 6-hourly data from %s to %s' % (start, end))
obs = m.timeseries(stid=stid, start=start, end=end, vars=vars_api, units=units, hfmetars='0')
obs_6hour = pd.DataFrame.from_dict(obs['STATION'][0]['OBSERVATIONS'])
# Rename columns to requested vars. This changes the columns in the DataFrame to corresponding names in
# vars_request, because otherwise the columns returned by MesoPy are weird.
obs_var_names = obs['STATION'][0]['SENSOR_VARIABLES']
obs_var_keys = list(obs_var_names.keys())
col_names = list(map(''.join, obs_6hour.columns.values))
for c in range(len(col_names)):
col = col_names[c]
for k in range(len(obs_var_keys)):
key = obs_var_keys[k]
if col == list(obs_var_names[key].keys())[0]:
col_names[c] = key
obs_6hour.columns = col_names
# Let's add a check here to make sure that we do indeed have all of the variables we want
for var in vars_request + ['wind_speed']:
if var not in col_names:
obs_6hour = obs_6hour.assign(**{var: np.nan})
# Change datetime column to datetime object, subtract 6 hours to use 6Z days
dateobj = pd.Index( | pd.to_datetime(obs_6hour['date_time']) | pandas.to_datetime |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import torch.nn as TN
import numpy as np
import torch
from torch.autograd import Variable
from easydict import EasyDict as edict
from torchvision.models import (resnet18,resnet50,
vgg11,vgg13,vgg16,vgg19,
vgg11_bn,vgg13_bn,vgg16_bn,vgg19_bn,
)
import warnings
class backbone(TN.Module):
def __init__(self,config, use_none_layer=False):
super().__init__()
self.config=config
self.use_none_layer = use_none_layer
self.get_layers()
self.freeze_layers()
def forward_layers(self,x):
features=[]
if self.format=='vgg':
layer_num=0
if not hasattr(self.layer_depths,str(layer_num)):
features.append(x)
layer_num+=1
for idx,layer in enumerate(self.features):
x=layer(x)
if idx == self.layer_depths[str(layer_num)]:
features.append(x)
layer_num+=1
if layer_num>=6:
break
elif self.format=='resnet':
features.append(x)
x=self.prefix_net(x)
features.append(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
else:
assert False,'unexpected format %s'%(self.format)
return features
def forward_aux(self,x,main_level,aux_level):
assert main_level in [1,2,3,4,5],'main feature level %d not in range(0,5)'%main_level
assert aux_level in [1,2,3,4,5],'aux feature level %d not in range(0,5)'%aux_level
features=self.forward_layers(x)
return features[main_level],features[aux_level]
def forward(self,x,level):
assert level in [1,2,3,4,5],'feature level %d not in range(0,5)'%level
if self.format=='vgg':
if not hasattr(self.layer_depths,str(level)):
return x
assert hasattr(self.layer_depths,str(level))
for idx,layer in enumerate(self.features):
x=layer(x)
if idx == self.layer_depths[str(level)]:
return x
elif self.format=='resnet':
x=self.prefix_net(x)
x = self.layer1(x)
# layer 1 not change feature map height and width
if level==2:
return x
x = self.layer2(x)
if level==3:
return x
x = self.layer3(x)
if level==4:
return x
x = self.layer4(x)
if level==5:
return x
assert False,'unexpected level %d for format %s'%(level,self.format)
def get_feature_map_channel(self,level):
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
x=torch.rand(2,3,224,224)
x=Variable(x.to(device).float())
x=self.forward(x,level)
return x.shape[1]
def get_feature_map_size(self,level,input_size):
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
x=torch.rand(2,3,input_size[0],input_size[1])
x=Variable(x.to(device).float())
x=self.forward(x,level)
return x.shape[2:4]
def get_output_shape(self,level,input_size):
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
x=torch.rand(2,3,input_size[0],input_size[1])
x=torch.autograd.Variable(x.to(device).float())
x=self.forward(x,level)
return x.shape
def get_layer_shapes(self,input_size):
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
x=torch.rand(2,3,input_size[0],input_size[1])
x=torch.autograd.Variable(x.to(device).float())
features=self.forward_layers(x)
shapes=[f.shape for f in features]
return shapes
def get_model(self):
if hasattr(self.config,'backbone_pretrained'):
pretrained=self.config.backbone_pretrained
else:
pretrained=False
return globals()[self.config.backbone_name](pretrained=pretrained)
def get_layers(self):
if self.use_none_layer == False:
model=self.get_model()
if self.config.backbone_name.find('vgg')>=0 or self.config.backbone_name.lower().find('mobilenet')>=0:
self.format='vgg'
self.features=model.features
self.df=self.get_dataframe()
self.layer_depths=self.get_layer_depths()
elif self.config.backbone_name.find('resnet')>=0:
self.format='resnet'
if self.config.backbone_name.find('se_resnet')>=0:
self.prefix_net=model.layer0
else:
self.prefix_net = TN.Sequential(model.conv1,
model.bn1,
model.relu,
model.maxpool)
self.layer1=model.layer1
self.layer2=model.layer2
self.layer3=model.layer3
self.layer4=model.layer4
else:
assert False,'unknown backbone name %s'%self.config.backbone_name
else:
model=self.get_model()
if self.config.backbone_name.find('vgg')>=0 or self.config.backbone_name.lower().find('mobilenet')>=0:
self.format='vgg'
self.features=model.features
self.df=self.get_dataframe()
self.layer_depths=self.get_layer_depths()
elif self.config.backbone_name.find('resnet')>=0:
# the output size of resnet layer is different from stand model!
# raise NotImplementedError
self.format='resnet'
self.prefix_net = model.prefix_net
self.layer1=model.layer1
self.layer2=model.layer2
self.layer3=model.layer3
self.layer4=model.layer4
else:
assert False,'unknown backbone name %s'%self.config.backbone_name
def freeze_layers(self):
if self.config.backbone_freeze:
for param in self.parameters():
param.requrires_grad=False
elif self.config.freeze_layer > 0:
if self.config.backbone_freeze:
warnings.warn("it's not good to use freeze layer with backbone_freeze")
freeze_layer=self.config.freeze_layer
if self.format=='vgg':
for idx,layer in enumerate(self.features):
if idx <= self.layer_depths[str(freeze_layer)]:
for param in layer.parameters():
param.requires_grad = False
else:
if freeze_layer>0:
for param in self.prefix_net.parameters():
param.requires_grad = False
if freeze_layer>1:
for param in self.layer1.parameters():
param.requires_grad = False
if freeze_layer>2:
for param in self.layer2.parameters():
param.requires_grad = False
if freeze_layer>3:
for param in self.layer3.parameters():
param.requires_grad = False
if freeze_layer>4:
for param in self.layer4.parameters():
param.requires_grad = False
elif self.config.freeze_ratio > 0.0:
if self.format=='vgg':
freeze_index=len(self.features)*self.config.freeze_ratio
for idx,layer in enumerate(self.features):
if idx < freeze_index:
for param in layer.parameters():
param.requires_grad = False
else:
valid_layer_number=0
for name,param in self.named_parameters():
valid_layer_number+=1
freeze_index=valid_layer_number*self.config.freeze_ratio
for idx,(name,param) in enumerate(self.named_parameters()):
if idx < freeze_index:
print('freeze weight of',name)
param.requires_grad=False
def get_dataframe(self):
assert self.format=='vgg','only vgg models have features'
df= | pd.DataFrame(columns=['level','layer_depth','layer_name']) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Assume that each residential unit in a mixed-used parcel occupies
# 1500 sqft, since residential vs. non-residential sqft is not known.
sqft_per_res_unit = 1500.
def get_res_type(land_use_type_id, res_codes):
lu = pd.Series(index=land_use_type_id.index, dtype=object)
for name, codes in res_codes.items():
lu[land_use_type_id.isin(codes)] = name
return lu
def get_nonresidential_sqft(building_sqft, res_type, residential_units):
sqft = pd.Series(index=res_type.index)
building_sqft = building_sqft.reindex(sqft.index, copy=False)
# If not residential, assume all area is non-residential.
sqft[res_type.isnull()] = building_sqft
# If residential, assume zero non-residential area.
sqft[(res_type == 'single') | (res_type == 'multi')] = 0
# If mixed-use, assume residential units occupy some area.
sqft[res_type == 'mixed'] = (building_sqft -
sqft_per_res_unit * residential_units)
# Non-residential area must not be negative.
sqft[(sqft.notnull()) & (sqft < 0)] = 0
return sqft
def get_residential_units(tot_units, res_type):
units = | pd.Series(index=res_type.index) | pandas.Series |
import os
import json
from datetime import datetime
import argparse
import matplotlib.pyplot as plt
import numpy as np
import osmnx as ox
import pandas as pd
import geopandas as gpd
ox.config(log_console=True, use_cache=True)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="input shapefile or a .json with a list of places"
)
parser.add_argument("-l","--label_field",
help="name of the field that should be used for the label of each graph (shp input only)"
)
parser.add_argument("-t","--title",
default="City Street Network Orientation",
help="title for the output image"
)
parser.add_argument("--weight_by_length",
action="store_true",
help="weight the street bearing counts by length of street"
)
parser.add_argument("--save_network_images",
action="store_true",
help="export an image of each network that is used to create a compass graph"
)
parser.add_argument("--timestamp",
action="store_true",
help="add timestamp to the output image file"
)
args = parser.parse_args()
return args
def reverse_bearing(x):
return x + 180 if x < 180 else x - 180
def bearings_from_graph(G, weight_by_length=False):
# calculate edge bearings
Gu = ox.add_edge_bearings(ox.get_undirected(G))
if weight_by_length:
# weight bearings by length (meters)
city_bearings = []
for u, v, k, d in Gu.edges(keys=True, data=True):
city_bearings.extend([d['bearing']] * int(d['length']))
b = | pd.Series(city_bearings) | pandas.Series |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import warnings
import random
random.seed(10)
np.random.seed(42)
warnings.filterwarnings('ignore')
user_input = input("Enter the path of your file: ")
assert os.path.exists(user_input), "I did not find the file at, "+str(user_input)
data = pd.read_csv(user_input)
test=data.copy()
data=data.rename(columns={"Seat Fare Type 1":"f1","Seat Fare Type 2":"f2"})
data=data.dropna(subset=["f1","f2"], how='all')
test=data.copy()
test["Service Date"]=pd.to_datetime(test["Service Date"],format='%d-%m-%Y %H:%M')
test["RecordedAt"]=pd.to_datetime(test["RecordedAt"],format='%d-%m-%Y %H:%M')
test["Service Date"]-test["RecordedAt"]
data["timediff"]=test["Service Date"]-test["RecordedAt"]
test["timediff"]=test["Service Date"]-test["RecordedAt"]
days=test["timediff"].dt.days
hours=test["timediff"].dt.components["hours"]
mins=test["timediff"].dt.components["minutes"]
test["abstimediff"]=days*24*60+hours*60+mins
test["f1"]=test["f1"].astype(str)
test["f1_1"]=test.f1.str.split(',')
#print(test)
test["f2"]=test["f2"].astype(str)
test["f2_1"]=test.f2.str.split(',')
test=test.reset_index(drop=True)
arr=[]
var=[]
for i in range(0,len(test["f1_1"])):
if test["f1_1"][i][0]=='nan':
arr.append(pd.to_numeric(test["f2_1"][i]).mean())
var.append(pd.to_numeric(test["f2_1"][i]).std())
#print(x)
else:
arr.append(pd.to_numeric(test["f1_1"][i]).mean())
var.append(pd.to_numeric(test["f1_1"][i]).std())
test["meanfare"]=arr
test["devfare"]=var
test["abstimediff"]=(test["abstimediff"]-test["abstimediff"].mean())/test["abstimediff"].std()
test["meanfare"]=(test["meanfare"]-test["meanfare"].mean())/test["meanfare"].std()
test["is_type1"]=1
test.loc[test["f1"]=='nan',"is_type1"]=0
test["devfare"]=(test["devfare"]-test["devfare"].mean())/test["devfare"].std()
processed_data = test
#print(processed_data)
data = processed_data
data["is_weekend"]=0
data.loc[data["Service Date"].dt.dayofweek==5,"is_weekend"]=1
data.loc[data["Service Date"].dt.dayofweek==6,"is_weekend"]=1
data_copy=data.copy()
data=data.drop(["f1","f2","Service Date","RecordedAt","timediff","f1_1","f2_1"],axis=1)
data["maxtimediff"]=data["abstimediff"]
data=data_copy
data=data.drop(["f1","f2","Service Date","RecordedAt","timediff","f1_1","f2_1"],axis=1)
#print(data)
data=data.groupby("Bus").agg(['mean','max'])
data=data.drop([( 'is_weekend', 'max'),( 'is_type1', 'max'),],axis=1)
data=data.drop([( 'devfare', 'max'),( 'meanfare', 'max'),],axis=1)
data_copy=data.copy()
data=data_copy
data.columns = ['{}_{}'.format(x[0], x[1]) for x in data.columns]
#print(data)
data=data.reset_index()
X=data.drop("Bus",axis=1)
features = X
#data = features
#print(data)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(X)
model1 = KMeans(n_clusters=6)
model1.fit(pca_result)
centroids1 = model1.cluster_centers_
labels = model1.labels_
bus = data["Bus"]
bus=pd.DataFrame(bus)
y=pd.concat((bus,pd.DataFrame(pca_result),pd.DataFrame(labels,columns = ["Cluster"])),axis=1)
y = y.rename(columns = {0:"pca1",1:"pca2"})
# print(y)
cluster=[]
for i in range(6):
cluster.append(y[y["Cluster"]==i])
# print(labels)
X0=cluster[0][["pca1","pca2"]].to_numpy()
m0 = KMeans(n_clusters=2)
m0.fit(X0)
X1=cluster[1][["pca1","pca2"]].to_numpy()
m1 = KMeans(n_clusters=7)
m1.fit(X1)
X2=cluster[2][["pca1","pca2"]].to_numpy()
m2 = KMeans(n_clusters=6)
m2.fit(X2)
X3=cluster[3][["pca1","pca2"]].to_numpy()
m3 = KMeans(n_clusters=3)
m3.fit(X3)
X4=cluster[4][["pca1","pca2"]].to_numpy()
m4 = KMeans(n_clusters=2)
m4.fit(X4)
X5=cluster[5][["pca1","pca2"]].to_numpy()
m5 = KMeans(n_clusters=6)
m5.fit(X5)
def leader_follower(cluster): #only bus and prob for a particular cluster sorted
cluster["Follows"] = ""
cluster["Confidence Score 1"] = ""
cluster["Is followed by"] = ""
cluster["Confidence Score 2"] = ""
maxprob = cluster["Probability"][0]
leader = cluster["Bus"][0]
#confidence_score_1 = cluster["Probability"][0]
cluster["Follows"][0] = "Independent"
cluster["Confidence Score 1"][0] = 1-cluster["Probability"][0]
#confidence_score_2 =
if len(cluster)==1:
return cluster
follower = cluster["Bus"][1]
for i in range(1,len(cluster)):
cluster["Follows"][i] = leader
cluster["Confidence Score 1"][i] = cluster["Probability"][i]/cluster["Probability"][i-1]
leader = cluster["Bus"][i]
#confidence_score_1 = cluster["Probability"][i]
for i in range(0,len(cluster)-1):
cluster["Is followed by"][i] = follower
follower = cluster["Bus"][i+1]
cluster["Confidence Score 2"][i] = cluster["Probability"][i+1]/cluster["Probability"][i]
#cluster["Is followed by"][len(cluster)-1] = ""
#cluster["Confidence Score 2"][i]
return cluster
def dist_from_own_centre(pca_result,centroids,labels):
arr=np.zeros(len(labels))
for i in range(len(labels)):
arr[i]=1/((np.sum((pca_result[i] - centroids[labels[i]])**2))**0.5+1e-8)
return arr
def dist_from_other_centre(pca_result,centroids,labels):
arr=np.zeros(len(labels))
for i in range(len(labels)):
for j in range(len(centroids)):
arr[i] += 1/((np.sum((pca_result[i] - centroids[j])**2))**0.5+1e-8)
return arr
prob0 = dist_from_own_centre(X0,m0.cluster_centers_,m0.labels_)/dist_from_other_centre(X0,m0.cluster_centers_,m0.labels_)
cluster[0]["Probability"] = prob0
cluster[0]["labels"] = m0.labels_
output=[]
result=[]
for i in range(max(m0.labels_)+1):
output.append(cluster[0][cluster[0]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y0 = result[0]
for i in range(1,len(result)):
Y0 = pd.concat((Y0,result[i]))
Y0=Y0.set_index("index")
# print(Y0)
prob1 = dist_from_own_centre(X1,m1.cluster_centers_,m1.labels_)/dist_from_other_centre(X1,m1.cluster_centers_,m1.labels_)
cluster[1]["Probability"] = prob1
cluster[1]["labels"] = m1.labels_
output=[]
result=[]
for i in range(max(m1.labels_)+1):
output.append(cluster[1][cluster[1]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y1 = result[0]
for i in range(1,len(result)):
Y1 = pd.concat((Y1,result[i]))
Y1=Y1.set_index("index")
prob2 = dist_from_own_centre(X2,m2.cluster_centers_,m2.labels_)/dist_from_other_centre(X2,m2.cluster_centers_,m2.labels_)
cluster[2]["Probability"] = prob2
cluster[2]["labels"] = m2.labels_
output=[]
result=[]
for i in range(max(m2.labels_)+1):
output.append(cluster[2][cluster[2]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y2 = result[0]
for i in range(1,len(result)):
Y2 = pd.concat((Y2,result[i]))
Y2=Y2.set_index("index")
prob3 = dist_from_own_centre(X3,m3.cluster_centers_,m3.labels_)/dist_from_other_centre(X3,m3.cluster_centers_,m3.labels_)
cluster[3]["Probability"] = prob3
cluster[3]["labels"] = m3.labels_
output=[]
result=[]
for i in range(max(m3.labels_)+1):
output.append(cluster[3][cluster[3]["labels"]==i])
output[i] = output[i].sort_values("Probability",ascending = False)
output[i] = output[i].reset_index()
result.append(leader_follower(output[i]))
Y3 = result[0]
for i in range(1,len(result)):
Y3 = | pd.concat((Y3,result[i])) | pandas.concat |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/10/14 12:19
Desc: 巨潮资讯-数据中心-专题统计-债券报表-债券发行
http://webapi.cninfo.com.cn/#/thematicStatistics
"""
import time
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
js_str = """
function mcode(input) {
var keyStr = "<KEY> <KEY>;
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def bond_treasure_issue_cninfo(
start_date: str = "20210910", end_date: str = "20211109"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-国债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 国债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1120"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_datetime(temp_df["公告日期"]).dt.date
temp_df["计划发行总量"] = pd.to_numeric(temp_df["计划发行总量"])
temp_df["实际发行总量"] = pd.to_numeric(temp_df["实际发行总量"])
temp_df["发行价格"] = pd.to_numeric(temp_df["发行价格"])
temp_df["单位面值"] = pd.to_numeric(temp_df["单位面值"])
temp_df["增发次数"] = pd.to_numeric(temp_df["增发次数"])
return temp_df
def bond_local_government_issue_cninfo(
start_date: str = "20210911", end_date: str = "20211110"
) -> pd.DataFrame:
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行-地方债发行
http://webapi.cninfo.com.cn/#/thematicStatistics
:param start_date: 开始统计时间
:type start_date: str
:param end_date: 开始统计时间
:type end_date: str
:return: 地方债发行
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1121"
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
params = {
"sdate": "-".join([start_date[:4], start_date[4:6], start_date[6:]]),
"edate": "-".join([end_date[:4], end_date[4:6], end_date[6:]]),
}
r = requests.post(url, headers=headers, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.rename(
columns={
"F009D": "缴款日",
"SECNAME": "债券简称",
"DECLAREDATE": "公告日期",
"F004D": "发行起始日",
"F003D": "发行终止日",
"F008N": "单位面值",
"SECCODE": "债券代码",
"F007N": "发行价格",
"F006N": "计划发行总量",
"F005N": "实际发行总量",
"F028N": "增发次数",
"BONDNAME": "债券名称",
"F014V": "发行对象",
"F002V": "交易市场",
"F013V": "发行方式",
},
inplace=True,
)
temp_df = temp_df[
[
"债券代码",
"债券简称",
"发行起始日",
"发行终止日",
"计划发行总量",
"实际发行总量",
"发行价格",
"单位面值",
"缴款日",
"增发次数",
"交易市场",
"发行方式",
"发行对象",
"公告日期",
"债券名称",
]
]
temp_df["发行起始日"] = pd.to_datetime(temp_df["发行起始日"]).dt.date
temp_df["发行终止日"] = pd.to_datetime(temp_df["发行终止日"]).dt.date
temp_df["缴款日"] = pd.to_datetime(temp_df["缴款日"]).dt.date
temp_df["公告日期"] = pd.to_da | tetime(temp_df["公告日期"]) | pandas.to_datetime |
import composeml as cp
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import NaturalLanguage
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE
)
from featuretools.entityset import EntitySet, Timedelta
from featuretools.exceptions import UnusedPrimitiveWarning
from featuretools.primitives import (
GreaterThanScalar,
Max,
Mean,
Min,
Sum,
make_agg_primitive,
make_trans_primitive
)
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
@pytest.fixture
def datetime_es():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5],
"card_id": [1, 1, 5, 1, 5],
"transaction_time": pd.to_datetime([
'2011-2-28 04:00', '2012-2-28 05:00',
'2012-2-29 06:00', '2012-3-1 08:00',
'2014-4-1 10:00']),
"fraud": [True, False, False, False, True]})
datetime_es = EntitySet(id="fraud_data")
datetime_es = datetime_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df,
index="id",
time_index="transaction_time")
datetime_es = datetime_es.add_dataframe(
dataframe_name="cards",
dataframe=cards_df,
index="id")
datetime_es = datetime_es.add_relationship("cards", "id", "transactions", "card_id")
datetime_es.add_last_time_indexes()
return datetime_es
def test_passing_strings_to_logical_types_dfs():
teams = pd.DataFrame({
'id': range(3),
'name': ['Breakers', 'Spirit', 'Thorns']
})
games = pd.DataFrame({
'id': range(5),
'home_team_id': [2, 2, 1, 0, 1],
'away_team_id': [1, 0, 2, 1, 0],
'home_team_score': [3, 0, 1, 0, 4],
'away_team_score': [2, 1, 2, 0, 0]
})
dataframes = {'teams': (teams, 'id', None, {'name': 'natural_language'}), 'games': (games, 'id')}
relationships = [('teams', 'id', 'games', 'home_team_id')]
features = dfs(dataframes, relationships, target_dataframe_name="teams", features_only=True)
name_logical_type = features[0].dataframe['name'].ww.logical_type
assert isinstance(name_logical_type, NaturalLanguage)
def test_accepts_cutoff_time_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_warns_cutoff_time_dask(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
cutoff_times_df = dd.from_pandas(cutoff_times_df, npartitions=2)
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df)
def test_accepts_cutoff_time_compose(dataframes, relationships):
def fraud_occured(df):
return df['fraud'].any()
lm = cp.LabelMaker(
target_dataframe_name='card_id',
time_index='transaction_time',
labeling_function=fraud_occured,
window_size=1
)
transactions_df = to_pandas(dataframes['transactions'][0])
labels = lm.search(
transactions_df,
num_examples_per_instance=-1
)
labels['time'] = pd.to_numeric(labels['time'])
labels.rename({'card_id': 'id'}, axis=1, inplace=True)
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="cards",
cutoff_time=labels)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features) + 1
def test_accepts_single_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=20)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_accepts_no_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
instance_ids=[1, 2, 3, 5, 6])
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_ignores_instance_ids_if_cutoff_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids)
feature_matrix = to_pandas(feature_matrix, index='id')
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_approximate_features(pd_dataframes, relationships):
# TODO: Update to use Dask dataframes when issue #985 is closed
cutoff_times_df = pd.DataFrame({"instance_id": [1, 3, 1, 5, 3, 6],
"time": [11, 16, 16, 26, 17, 22]})
# force column to BooleanNullable
pd_dataframes['transactions'] += ({'fraud': "BooleanNullable"},)
feature_matrix, features = dfs(dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
approximate=5,
cutoff_time_in_index=True)
direct_agg_feat_name = 'cards.PERCENT_TRUE(transactions.fraud)'
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features)
truth_values = pd.Series(data=[1.0, 0.5, 0.5, 1.0, 0.5, 1.0])
assert (feature_matrix[direct_agg_feat_name] == truth_values.values).all()
def test_all_columns(pd_dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3],
"time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids,
agg_primitives=[Max, Mean, Min, Sum],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
max_depth=3,
allowed_paths=None,
ignore_dataframes=None,
ignore_columns=None,
seed_features=None)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_features_only(dataframes, relationships):
if len(dataframes['transactions']) > 3:
dataframes['transactions'][3]['fraud'] = "BooleanNullable"
else:
dataframes['transactions'] += ({'fraud': "BooleanNullable"},)
features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
features_only=True)
# pandas creates 11 features
# dask creates 10 features (no skew)
# koalas creates 9 features (no skew, no percent_true)
if isinstance(dataframes['transactions'][0], pd.DataFrame):
expected_features = 11
elif isinstance(dataframes['transactions'][0], dd.DataFrame):
expected_features = 10
else:
expected_features = 9
assert len(features) == expected_features
def test_accepts_relative_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions")
feature_matrix_2, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"))
feature_matrix_3, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window=Timedelta("3 months"))
feature_matrix_4, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window="3 months")
assert (feature_matrix.index == [1, 2, 3, 4, 5]).all()
assert (feature_matrix_2.index == [1, 2, 3, 4]).all()
assert (feature_matrix_3.index == [2, 3, 4]).all()
assert (feature_matrix_4.index == [2, 3, 4]).all()
# Test case for leap years
feature_matrix_5, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-2-29 04:00"),
training_window=Timedelta("1 year"),
include_cutoff_time=True)
assert (feature_matrix_5.index == [2]).all()
feature_matrix_5, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-2-29 04:00"),
training_window=Timedelta("1 year"),
include_cutoff_time=False)
assert (feature_matrix_5.index == [1, 2]).all()
def test_accepts_pd_timedelta_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-3-31 04:00"),
training_window=pd.Timedelta(61, "D"))
assert (feature_matrix.index == [2, 3, 4]).all()
def test_accepts_pd_dateoffset_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-3-31 04:00"),
training_window= | pd.DateOffset(months=2) | pandas.DateOffset |
import geopandas as gpd
# import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from osgeo import osr, gdal
import gdal
import osr
from rasterstats import zonal_stats
from scipy.spatial import KDTree
class IDWGenerator:
"""
https://stackoverflow.com/a/3119544/8947008
inverse-distance-weighted interpolation using KDTree:
invdisttree = Invdisttree( X, z ) -- data points, values
interpol = invdisttree( q, nnear=3, eps=0, p=1, weights=None, stat=0 )
interpolates z from the 3 points nearest each query point q;
For example, interpol[ a query point q ]
finds the 3 data points nearest q, at distances d1 d2 d3
and returns the IDW average of the values z1 z2 z3
(z1/d1 + z2/d2 + z3/d3)
/ (1/d1 + 1/d2 + 1/d3)
= .55 z1 + .27 z2 + .18 z3 for distances 1 2 3
q may be one point, or a batch of points.
eps: approximate nearest, dist <= (1 + eps) * true nearest
p: use 1 / distance**p
weights: optional multipliers for 1 / distance**p, of the same shape as q
stat: accumulate wsum, wn for average weights
How many nearest neighbors should one take ?
a) start with 8 11 14 .. 28 in 2d 3d 4d .. 10d; see Wendel's formula
b) make 3 runs with nnear= e.g. 6 8 10, and look at the results --
|interpol 6 - interpol 8| etc., or |f - interpol*| if you have f(q).
I find that runtimes don't increase much at all with nnear -- ymmv.
p=1, p=2 ?
p=2 weights nearer points more, farther points less.
In 2d, the circles around query points have areas ~ distance**2,
so p=2 is inverse-area weighting. For example,
(z1/area1 + z2/area2 + z3/area3)
/ (1/area1 + 1/area2 + 1/area3)
= .74 z1 + .18 z2 + .08 z3 for distances 1 2 3
Similarly, in 3d, p=3 is inverse-volume weighting.
Scaling:
if different X coordinates measure different things, Euclidean distance
can be way off. For example, if X0 is in the range 0 to 1
but X1 0 to 1000, the X1 distances will swamp X0;
rescale the data, i.e. make X0.std() ~= X1.std() .
A nice property of IDW is that it's scale-free around query points:
if I have values z1 z2 z3 from 3 points at distances d1 d2 d3,
the IDW average
(z1/d1 + z2/d2 + z3/d3)
/ (1/d1 + 1/d2 + 1/d3)
is the same for distances 1 2 3, or 10 20 30 -- only the ratios matter.
In contrast, the commonly-used Gaussian kernel exp( - (distance/h)**2 )
is exceedingly sensitive to distance and to h.
"""
def __init__( self, X, z, bounds, leafsize=10, stat=1 ):
assert len(X) == len(z), "len(X) %d != len(z) %d" % (len(X), len(z))
self.tree = KDTree( X, leafsize=leafsize ) # build the tree
self.z = z
self.stat = stat
self.wn = 0
self.wsum = None
self.bounds = bounds
def interpolate( self, q, nnear=6, eps=0.1, p=1, weights=None ):
# nnear nearest neighbours of each query point --
# eps is approximate nearest, dist <= (1 + eps) * true nearest
q = np.asarray(q)
qdim = q.ndim
if qdim == 1:
q = np.array([q])
if self.wsum is None:
self.wsum = np.zeros(nnear)
self.distances, self.ix = self.tree.query( q, k=nnear, eps=eps )
interpol = np.zeros( (len(self.distances),) + np.shape(self.z[0]) )
jinterpol = 0
if nnear == 1:
for dist, ix in zip( self.distances, self.ix ):
wz = self.z[ix]
interpol[jinterpol] = wz
jinterpol += 1
else:
for dist, ix in zip( self.distances, self.ix ):
if dist[0] < 1e-10:
wz = self.z[ix[0]]
else: # weight z s by 1/dist --
w = 1 / dist**p
if weights is not None:
w *= weights[ix] # >= 0
w /= np.sum(w)
wz = np.dot( w, self.z[ix] )
if self.stat:
self.wn += 1
self.wsum += w
interpol[jinterpol] = wz
jinterpol += 1
return interpol if qdim > 1 else interpol[0]
# def plot(self, tif_file):
# # plot
# dataset = gdal.Open(tif_file)
# band1 = dataset.GetRasterBand(1)
# plt.imshow(band1.ReadAsArray())
# dataset=None
def generate_query_points(self, resolution: int):
# assumes extent is a perfect square, but that isn't necessarily true
xmin, ymin, xmax, ymax = self.bounds
# nx = ny = resolution
print('resolution', resolution)
xi = np.linspace(xmin, xmax, resolution)
yi = np.linspace(ymin, ymax, resolution)
mesh = np.meshgrid(xi, yi)
flattened = zip(*(i.flat for i in mesh))
return np.asarray(list(flattened))
def export_to_tif(self, grid, output_tif):
# export to tif
# https://gis.stackexchange.com/a/37431/78614
nrows,ncols = np.shape(grid)
xmin, ymin, xmax, ymax = self.bounds
xres = (xmax-xmin)/float(ncols)
yres = (ymax-ymin)/float(nrows)
geotransform=(xmin,xres,0,ymax,0, -yres)
output_raster = gdal.GetDriverByName('GTiff')\
.Create(output_tif,ncols, nrows, 1 ,gdal.GDT_Float32) # open file
output_raster.SetGeoTransform(geotransform)
# set SRS to EPSG 4326 (WGS84)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
output_raster.SetProjection( srs.ExportToWkt() )
# write array to raster
output_raster.GetRasterBand(1).WriteArray(grid)
# make sure there is no lock on the file
output_raster.FlushCache()
output_raster = None
def calculate_zonal_stats(self, vector_file, raster_file, output_file):
tracts = gpd.read_file(vector_file)
stats = zonal_stats(vector_file, raster_file)
df = | pd.DataFrame(stats) | pandas.DataFrame |
from openpyxl import Workbook
from openpyxl import load_workbook
import pandas as pd
wb = load_workbook('/home/nikon-cook/Documents/МИТМО/веб-прога/minos_db.xlsx')
print(wb.get_sheet_names())
Publishers_data = wb.get_sheet_by_name('Publishers')
#print(Publishers_data.dimensions, Publishers_data.max_row, Publishers_data.max_column)
Authors_data = wb.get_sheet_by_name('Authors')
Books_data = wb.get_sheet_by_name('Books')
Directors_data = wb.get_sheet_by_name('Directors')
'''
p_df = pd.DataFrame()
p_rows = Publishers_data.iter_rows()
p_first_row = next(p_rows)
p_sec_row = next(p_rows)
p_headings = [c.value for c in p_first_row]
p_row1 = [c.value for c in p_sec_row]
p_headings
print(p_headings, p_row1)
'''
'''
df = []
for i in range(Publishers_data.max_row):
df.append([c.value for c in next(p_rows)])
publisher_df = pd.DataFrame(df[1:])
publisher_df.columns = df[0]
print(publisher_df)
'''
'''
a_rows = Authors_data.iter_rows()
a_first_row = next(a_rows)
a_headings = [c.value for c in a_first_row]
a_headings
'''
def make_dataframe(data_sheet_name):
df = []
rows = data_sheet_name.iter_rows()
for i in range(data_sheet_name.max_row):
df.append([c.value for c in next(rows)])
res_df = pd.DataFrame(df[1:])
res_df.columns = df[0]
return res_df
publishers_df = make_dataframe(Publishers_data)
authors_df = make_dataframe(Authors_data)
books_df = make_dataframe(Books_data)
directors_df = make_dataframe(Directors_data)
print(publishers_df.shape[0])
print(publishers_df.index, '\n')
#print(publishers_df.iloc[0])
def create_comands_txt(str_model_class, df, str_file_name, foreign):
f = open(str_file_name, 'w')
counter = 0
#comands = ''
headings = list(df.columns)
for i in range(df.shape[0]):
one_command_str = str_model_class + '.objects.create('
#print(df.iloc[i][headings[0]])
for j in range(df.shape[1]):
#print(' ',headings[j],' - ', str(df.iloc[i][headings[j]]).lstrip().rstrip())
if df.iloc[i][headings[j]] != None and df.iloc[i][headings[j]] != "None":
if headings[j] not in foreign:
one_command_str = one_command_str + headings[j] + '=\"' + str(df.iloc[i][headings[j]]).lstrip().rstrip() + '\",'
else:
#print('else')
comma_pos = str(df.iloc[i][headings[j]]).find(',')
if comma_pos > 0 and headings[j][-1]=='s':
str2 = headings[j][:-1].capitalize() + '.objects.get(sort_name=\"'
str_with_comma = str(df.iloc[i][headings[j]]).lstrip().rstrip()
one_command_str = one_command_str + headings[j] + '=[' + str2 +\
str_with_comma[0:comma_pos] + ')\",' + str2 + str_with_comma[comma_pos:] + '\")],'
else:
one_command_str = one_command_str + headings[j] + '=' +\
headings[j].capitalize() + '.objects.get(name=\"' + \
str(df.iloc[i][headings[j]]).lstrip().rstrip() + '\"),'
one_command_str = one_command_str[:-1] + ')\n'
#comands += one_command_str
f.write(one_command_str)
counter += 1
f.close()
print('generated '+str(counter)+' commands')
return 0
#create_comands_txt('Publisher', publishers_df, 'publisher_comands.txt')
#create_comands_txt('Author', authors_df, 'author_comands.txt')
#create_comands_txt('Book', books_df, 'book_comands.txt')
#create_comands_txt('Director', directors_df, 'director_comands.txt')
count = books_df.shape[0]
n = pd.Series([str(i) for i in range(190)])
type_cover = list(books_df.type_cover)
name = list(books_df.orig_lang_name)
res = []
for i in range(count):
str1 = ''
if type_cover[i]!=None:
str1 += type_cover[i] + '_'
str1 += n[i] + '_'
str1 += name[i].replace(' ','_')
#print(str1)
res.append(str1)
books_df['b_slug'] = | pd.Series(res) | pandas.Series |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIF.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from pyqtgraph import PlotWidget
import pyqtgraph
from PyQt5 import QtCore, QtGui, QtWidgets
#from matplotlib.pyplot import draw
#from pyqtgraph.widgets.PlotWidget import PlotWidget
import pandas as pd
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QFileDialog, QWidget, QInputDialog, QLineEdit
import os
import numpy as np
from PyQt5.QtWidgets import QMessageBox
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(847, 674)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
#channel1
self.graphCh1 = PlotWidget(self.centralwidget)
self.graphCh1.setGeometry(QtCore.QRect(10, 40, 491, 151))
self.graphCh1.setObjectName("graphCh1")
#channel2
self.graphCh2 = PlotWidget(self.centralwidget)
self.graphCh2.setGeometry(QtCore.QRect(10, 320, 491, 161))
self.graphCh2.setObjectName("graphCh2")
#sprctogram
self.spectrogram = QtWidgets.QWidget(self.centralwidget)
self.spectrogram.setGeometry(QtCore.QRect(550, 40, 211, 371))
self.spectrogram.setObjectName("spectrogram")
self.playPauseButtonCh1 = QtWidgets.QPushButton(self.centralwidget)
self.playPauseButtonCh1.setGeometry(QtCore.QRect(10, 220, 75, 23))
self.playPauseButtonCh1.setObjectName("playPauseButtonCh1")
self.playPauseButtonCh1.clicked.connect(lambda:self.Pause1()) #pausing 1st
self.zoomInButtonCh1 = QtWidgets.QPushButton(self.centralwidget)
self.zoomInButtonCh1.setGeometry(QtCore.QRect(150, 220, 75, 23))
self.zoomInButtonCh1.setObjectName("zoomInButtonCh1")
self.zoomOutButtonCh1 = QtWidgets.QPushButton(self.centralwidget)
self.zoomOutButtonCh1.setGeometry(QtCore.QRect(290, 220, 75, 23))
self.zoomOutButtonCh1.setObjectName("zoomOutButtonCh1")
self.stopButtonCh1 = QtWidgets.QPushButton(self.centralwidget)
self.stopButtonCh1.setGeometry(QtCore.QRect(430, 220, 75, 23))
self.stopButtonCh1.setObjectName("stopButtonCh1")
self.stopButtonCh1.clicked.connect(lambda:self.clear('ch_one')) #clear 1st
self.playPauseButtonCh2 = QtWidgets.QPushButton(self.centralwidget)
self.playPauseButtonCh2.setGeometry(QtCore.QRect(10, 520, 75, 23))
self.playPauseButtonCh2.setObjectName("playPauseButtonCh2")
self.playPauseButtonCh2.clicked.connect(lambda:self.Pause2()) #pausing 2nd
self.zoomInButtonCh2 = QtWidgets.QPushButton(self.centralwidget)
self.zoomInButtonCh2.setGeometry(QtCore.QRect(150, 520, 75, 23))
self.zoomInButtonCh2.setObjectName("zoomInButtonCh2")
self.zoomOutButtonCh2 = QtWidgets.QPushButton(self.centralwidget)
self.zoomOutButtonCh2.setGeometry(QtCore.QRect(290, 520, 75, 23))
self.zoomOutButtonCh2.setObjectName("zoomOutButtonCh2")
self.stopButtonCh2 = QtWidgets.QPushButton(self.centralwidget)
self.stopButtonCh2.setGeometry(QtCore.QRect(430, 520, 75, 23))
self.stopButtonCh2.setObjectName("stopButtonCh2")
self.stopButtonCh2.clicked.connect(lambda:self.clear('ch_two')) #clear 2nd
self.spectroRun = QtWidgets.QPushButton(self.centralwidget)
self.spectroRun.setGeometry(QtCore.QRect(620, 450, 75, 23))
self.spectroRun.setObjectName("spectroRun")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 5, 191, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 275, 131, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(590, 20, 161, 16))
font = QtGui.QFont()
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(620, 520, 75, 23))
self.pushButton.setObjectName("pushButton")
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 847, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuOpen = QtWidgets.QMenu(self.menuFile)
self.menuOpen.setObjectName("menuOpen")
self.menuSave_as = QtWidgets.QMenu(self.menuFile)
self.menuSave_as.setObjectName("menuSave_as")
self.menuActions = QtWidgets.QMenu(self.menubar)
self.menuActions.setObjectName("menuActions")
self.menuChannel_1 = QtWidgets.QMenu(self.menuActions)
self.menuChannel_1.setObjectName("menuChannel_1")
self.menuChannel_2 = QtWidgets.QMenu(self.menuActions)
self.menuChannel_2.setObjectName("menuChannel_2")
self.menuAbout = QtWidgets.QMenu(self.menubar)
self.menuAbout.setObjectName("menuAbout")
self.menuSpectrogram_Actions = QtWidgets.QMenu(self.menubar)
self.menuSpectrogram_Actions.setObjectName("menuSpectrogram_Actions")
self.menuColor_Palettes = QtWidgets.QMenu(self.menuSpectrogram_Actions)
self.menuColor_Palettes.setObjectName("menuColor_Palettes")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.menuOpen_Ch1 = QtWidgets.QAction(mainWindow)
self.menuOpen_Ch1.setObjectName("menuOpen_Ch1")
self.menuOpen_Ch2 = QtWidgets.QAction(mainWindow)
self.menuOpen_Ch2.setObjectName("menuOpen_Ch2")
self.actionSpectro_Ch_1 = QtWidgets.QAction(mainWindow)
self.actionSpectro_Ch_1.setObjectName("actionSpectro_Ch_1")
self.actionSpectro_Ch2 = QtWidgets.QAction(mainWindow)
self.actionSpectro_Ch2.setObjectName("actionSpectro_Ch2")
self.menuSave_asImg = QtWidgets.QAction(mainWindow)
self.menuSave_asImg.setObjectName("menuSave_asImg")
self.menuSave_asMp4 = QtWidgets.QAction(mainWindow)
self.menuSave_asMp4.setObjectName("menuSave_asMp4")
self.actionPlay_Pause = QtWidgets.QAction(mainWindow)
self.actionPlay_Pause.setObjectName("actionPlay_Pause")
self.actionZoom_in = QtWidgets.QAction(mainWindow)
self.actionZoom_in.setObjectName("actionZoom_in")
self.actionZoom_out = QtWidgets.QAction(mainWindow)
self.actionZoom_out.setObjectName("actionZoom_out")
self.actionPlay_Pause_2 = QtWidgets.QAction(mainWindow)
self.actionPlay_Pause_2.setObjectName("actionPlay_Pause_2")
self.actionZoom_in_2 = QtWidgets.QAction(mainWindow)
self.actionZoom_in_2.setObjectName("actionZoom_in_2")
self.actionZoom_out_2 = QtWidgets.QAction(mainWindow)
self.actionZoom_out_2.setObjectName("actionZoom_out_2")
self.actionStop = QtWidgets.QAction(mainWindow)
self.actionStop.setObjectName("actionStop")
self.actionSpectro = QtWidgets.QAction(mainWindow)
self.actionSpectro.setObjectName("actionSpectro")
self.actionPlay_Pause_3 = QtWidgets.QAction(mainWindow)
self.actionPlay_Pause_3.setObjectName("actionPlay_Pause_3")
self.actionZoom_in_3 = QtWidgets.QAction(mainWindow)
self.actionZoom_in_3.setObjectName("actionZoom_in_3")
self.actionZoom_out_3 = QtWidgets.QAction(mainWindow)
self.actionZoom_out_3.setObjectName("actionZoom_out_3")
self.actionStop_2 = QtWidgets.QAction(mainWindow)
self.actionStop_2.setObjectName("actionStop_2")
self.actionSpectro_2 = QtWidgets.QAction(mainWindow)
self.actionSpectro_2.setObjectName("actionSpectro_2")
self.actionExit_App = QtWidgets.QAction(mainWindow)
self.actionExit_App.setObjectName("actionExit_App")
self.actionExit = QtWidgets.QAction(mainWindow)
self.actionExit.setObjectName("actionExit")
self.actionShow_Hide = QtWidgets.QAction(mainWindow)
self.actionShow_Hide.setObjectName("actionShow_Hide")
self.actionChange_Color = QtWidgets.QAction(mainWindow)
self.actionChange_Color.setObjectName("actionChange_Color")
self.actionShow_Hide_2 = QtWidgets.QAction(mainWindow)
self.actionShow_Hide_2.setObjectName("actionShow_Hide_2")
self.actionChange_Color_2 = QtWidgets.QAction(mainWindow)
self.actionChange_Color_2.setObjectName("actionChange_Color_2")
self.actionSpeed_Options = QtWidgets.QAction(mainWindow)
self.actionSpeed_Options.setObjectName("actionSpeed_Options")
self.actionFull_Window = QtWidgets.QAction(mainWindow)
self.actionFull_Window.setObjectName("actionFull_Window")
self.actionPalette_1 = QtWidgets.QAction(mainWindow)
self.actionPalette_1.setObjectName("actionPalette_1")
self.actionPalette_2 = QtWidgets.QAction(mainWindow)
self.actionPalette_2.setObjectName("actionPalette_2")
self.actionPalette_3 = QtWidgets.QAction(mainWindow)
self.actionPalette_3.setObjectName("actionPalette_3")
self.actionPalette_4 = QtWidgets.QAction(mainWindow)
self.actionPalette_4.setObjectName("actionPalette_4")
self.actionPalette_5 = QtWidgets.QAction(mainWindow)
self.actionPalette_5.setObjectName("actionPalette_5")
self.actionPDF = QtWidgets.QAction(mainWindow)
self.actionPDF.setObjectName("actionPDF")
self.actionSpeed_Options_2 = QtWidgets.QAction(mainWindow)
self.actionSpeed_Options_2.setObjectName("actionSpeed_Options_2")
self.actionAdd_Title = QtWidgets.QAction(mainWindow)
self.actionAdd_Title.setObjectName("actionAdd_Title")
self.actionAdd_Title_2 = QtWidgets.QAction(mainWindow)
self.actionAdd_Title_2.setObjectName("actionAdd_Title_2")
self.menuOpen.addAction(self.menuOpen_Ch1)
self.menuOpen.addAction(self.menuOpen_Ch2)
self.menuOpen.addAction(self.actionSpectro_Ch_1)
self.menuOpen.addAction(self.actionSpectro_Ch2)
self.menuSave_as.addAction(self.actionPDF)
self.menuFile.addAction(self.menuOpen.menuAction())
self.menuFile.addAction(self.menuSave_as.menuAction())
self.menuFile.addAction(self.actionExit)
self.menuChannel_1.addAction(self.actionShow_Hide)
self.menuChannel_1.addAction(self.actionChange_Color)
self.menuChannel_1.addAction(self.actionSpeed_Options_2)
self.menuChannel_1.addAction(self.actionAdd_Title)
self.menuChannel_2.addAction(self.actionShow_Hide_2)
self.menuChannel_2.addAction(self.actionChange_Color_2)
self.menuChannel_2.addAction(self.actionSpeed_Options)
self.menuChannel_2.addAction(self.actionAdd_Title_2)
self.menuActions.addAction(self.menuChannel_1.menuAction())
self.menuActions.addAction(self.menuChannel_2.menuAction())
self.menuColor_Palettes.addAction(self.actionPalette_1)
self.menuColor_Palettes.addAction(self.actionPalette_2)
self.menuColor_Palettes.addAction(self.actionPalette_3)
self.menuColor_Palettes.addAction(self.actionPalette_4)
self.menuColor_Palettes.addAction(self.actionPalette_5)
self.menuSpectrogram_Actions.addAction(self.menuColor_Palettes.menuAction())
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menuSpectrogram_Actions.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.timer1 = QtCore.QTimer()
self.timer2 = QtCore.QTimer()
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "BioMedical Signal Viewer"))
self.playPauseButtonCh1.setText(_translate("mainWindow", "Play/Pause"))
self.zoomInButtonCh1.setText(_translate("mainWindow", "Zoom in"))
self.zoomOutButtonCh1.setText(_translate("mainWindow", "Zoom out"))
self.stopButtonCh1.setText(_translate("mainWindow", "Clear"))
self.playPauseButtonCh2.setText(_translate("mainWindow", "Play/Pause"))
self.zoomInButtonCh2.setText(_translate("mainWindow", "Zoom in"))
self.zoomOutButtonCh2.setText(_translate("mainWindow", "Zoom out"))
self.stopButtonCh2.setText(_translate("mainWindow", "Clear"))
self.spectroRun.setText(_translate("mainWindow", "Spectro 1"))
self.label.setText(_translate("mainWindow", "CHANNEL 1"))
self.label_2.setText(_translate("mainWindow", "CHANNEL 2"))
self.label_3.setText(_translate("mainWindow", "SPECTROGRAM"))
self.pushButton.setText(_translate("mainWindow", "Spectro 2"))
self.menuFile.setTitle(_translate("mainWindow", "File"))
self.menuOpen.setTitle(_translate("mainWindow", "Open"))
self.menuSave_as.setTitle(_translate("mainWindow", "Save as"))
self.menuActions.setTitle(_translate("mainWindow", "Actions"))
self.menuChannel_1.setTitle(_translate("mainWindow", "Channel 1"))
self.menuChannel_2.setTitle(_translate("mainWindow", "Channel 2"))
self.menuAbout.setTitle(_translate("mainWindow", "About"))
self.menuSpectrogram_Actions.setTitle(_translate("mainWindow", "Spectrogram Actions"))
self.menuColor_Palettes.setTitle(_translate("mainWindow", "Color Palettes"))
self.menuOpen_Ch1.setText(_translate("mainWindow", "Channel 1"))
self.menuOpen_Ch2.setText(_translate("mainWindow", "Channel 2"))
self.actionSpectro_Ch_1.setText(_translate("mainWindow", "Spectro-Ch 1"))
self.actionSpectro_Ch2.setText(_translate("mainWindow", "Spectro-Ch2"))
self.menuSave_asImg.setText(_translate("mainWindow", "Img"))
self.menuSave_asMp4.setText(_translate("mainWindow", "mp4"))
self.actionPlay_Pause.setText(_translate("mainWindow", "Play/Pause"))
self.actionZoom_in.setText(_translate("mainWindow", "Zoom in"))
self.actionZoom_out.setText(_translate("mainWindow", "Zoom out"))
self.actionPlay_Pause_2.setText(_translate("mainWindow", "Play/Pause"))
self.actionZoom_in_2.setText(_translate("mainWindow", "Zoom in"))
self.actionZoom_out_2.setText(_translate("mainWindow", "Zoom out"))
self.actionStop.setText(_translate("mainWindow", "Clear"))
self.actionSpectro.setText(_translate("mainWindow", "Spectro"))
self.actionPlay_Pause_3.setText(_translate("mainWindow", "Play/Pause"))
self.actionZoom_in_3.setText(_translate("mainWindow", "Zoom in"))
self.actionZoom_out_3.setText(_translate("mainWindow", "Zoom out"))
self.actionStop_2.setText(_translate("mainWindow", "Clear"))
self.actionSpectro_2.setText(_translate("mainWindow", "Spectro"))
self.actionExit_App.setText(_translate("mainWindow", "Exit App"))
self.actionExit.setText(_translate("mainWindow", "Exit"))
self.actionShow_Hide.setText(_translate("mainWindow", "Show/Hide"))
self.actionChange_Color.setText(_translate("mainWindow", "Change Color"))
self.actionShow_Hide_2.setText(_translate("mainWindow", "Show/Hide"))
self.actionChange_Color_2.setText(_translate("mainWindow", "Change Color"))
self.actionSpeed_Options.setText(_translate("mainWindow", "Speed Options"))
self.actionFull_Window.setText(_translate("mainWindow", "Full Window"))
self.actionPalette_1.setText(_translate("mainWindow", "Palette 1"))
self.actionPalette_2.setText(_translate("mainWindow", "Palette 2"))
self.actionPalette_3.setText(_translate("mainWindow", "Palette 3"))
self.actionPalette_4.setText(_translate("mainWindow", "Palette 4"))
self.actionPalette_5.setText(_translate("mainWindow", "Palette 5"))
self.actionPDF.setText(_translate("mainWindow", "PDF"))
self.actionSpeed_Options_2.setText(_translate("mainWindow", "Speed Options"))
self.actionAdd_Title.setText(_translate("mainWindow", "Add Title"))
self.actionAdd_Title_2.setText(_translate("mainWindow", "Add Title"))
self.actionExit.triggered.connect(lambda: self.exitApp())
self.menuOpen_Ch1.triggered.connect(lambda: self.openFile("ch1"))
self.menuOpen_Ch2.triggered.connect(lambda: self.openFile("ch2"))
def exitApp(self):
sys.exit()
def openFile(self,text):
"""opens a file from the brower """
file_path=QFileDialog.getOpenFileName()
file_name=file_path[0].split('/')[-1]
self.read_data(file_name,text)
def read_data(self,file_name,text):
"""loads the data from chosen file"""
if text=="ch1":
df1=pd.read_csv(file_name)
time1=list(pd.to_numeric(df1['time'],downcast="float"))
amp1=list(pd.to_numeric(df1['amplitude'],downcast="float"))
self.draw(time1,amp1,text)
else:
df2= | pd.read_csv(file_name) | pandas.read_csv |
import copy
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import numpy as np
import pandas as pd
import pytz
from pandas import DataFrame
from src.constants import UTC, US_EASTERN, NAN
from src.dao.dao import DAO
from src.dao.intraday_dao import IntradayDAO
from src.entity.configuration_entity import ConfigurationEntity
from src.entity.evaluation_entity import EvaluationEntity
from src.entity.forward_entity import ForwardEntity
from src.entity.intraday_entity import IntradayEntity
from src.entity.stock_entity import StockEntity
class Utils:
@staticmethod
def create_table_frame() -> DataFrame:
dates = | pd.date_range('1/1/2000', periods=150, tz=UTC) | pandas.date_range |
import torch
import pandas as pd
import os
import bisect
class ConcatDataset(torch.utils.data.ConcatDataset):
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
# obtain bounding boxes
defect_details = pd.DataFrame(columns=['label', 'defect_1', 'defect_2', 'shape', 'x_1', 'y_1', 'x_2', 'y_2'])
for defect in ['chipping', 'lead_defect', 'foreign_material', 'pocket_damage', 'lead_glue', 'marking_defect', 'scratch']:
file_name = os.path.join('/home/kaiyihuang/nexperia/new_data/20191129_Labeled_Image', defect,
'ImageLabel.csv')
table = pd.read_csv(file_name, names=['label', 'defect_2', 'shape', 'x_1', 'y_1', 'x_2', 'y_2'])
table['defect_1'] = defect
defect_details = defect_details.append(table)
defect_details = defect_details.set_index(['defect_1', 'label'])
self.defect_details = defect_details
self.classes = ['chipping', 'device_flip', 'empty_pocket', 'foreign_material', 'good',
'lead_defect', 'lead_glue', 'marking_defect', 'pocket_damage', 'scratch']
self.class_to_idx = {'chipping': 0,
'device_flip': 1,
'empty_pocket': 2,
'foreign_material': 3,
'good': 4,
'lead_defect': 5,
'lead_glue': 6,
'marking_defect': 7,
'pocket_damage': 8,
'scratch': 9}
self.image_index_id = | pd.read_csv('/home/kaiyihuang/nexperia/image_id_index.csv', index_col=0) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.