prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
###############################################################################
from pandas import DataFrame, Series
from sklearn.metrics import adjusted_rand_score
from io_helper import IOHelper
class MetricsHelper():
# Class for updating, calculating and storing metrics
# Best objective score, rand index and other execution information
metrics = DataFrame([10e9], columns=["objective_score"])
# Best partition
partition = | DataFrame(columns=["partition"]) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
pd.set_option('expand_frame_repr', False)
pd.set_option('precision', 4)
# constructing binary features
def process_embarked():
global df_titanic_data
# replacing the missing values with the most commmon value in the variable
df_titanic_data.Embarked[df_titanic_data.Embarked.isnull()] = df_titanic_data.Embarked.dropna().mode().values
# converting the values into numbers
df_titanic_data['Embarked'] = pd.factorize(df_titanic_data['Embarked'])[0]
# binarizing the constructed features
if keep_binary:
df_titanic_data = pd.concat([df_titanic_data, | pd.get_dummies(df_titanic_data['Embarked']) | pandas.get_dummies |
import math
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn import svm
def get_past_midfielders():
data = pd.read_csv('../resources/merged.csv', sep=',', encoding='utf-8', index_col=0)
model = data[['player_id', 'name', 'season', 'pos', 'round', 'team_rank', 'opponent_team_rank', 'team_pot', 'opp_pot',
'concede_pot', 'opp_concede_pot', 'prev_points', 'form_points', 'total_points',
'long_form', 'ict_form']]
MidfielderModal = model.loc[model['pos'] == 'Defender']
MidfielderModal.drop('pos', axis=1, inplace=True)
MidfielderModal.sort_values(['season', 'round'], ascending=True, inplace=True)
MidfielderModal.to_csv('../resources/predictions/MIDFIELDERS.csv', sep=',', encoding='utf-8')
players = MidfielderModal[8587:]
keys = MidfielderModal['round']
values = pd.cut(MidfielderModal['round'], 3, labels=[1, 2, 3])
dictionary = dict(zip(keys, values))
MidfielderModal['round'] = values
X = MidfielderModal.drop(['total_points', 'season', 'player_id', 'name'], axis=1)
y = MidfielderModal[['total_points']]
X_train = X[:8586]
X_test = X[8587:]
y_train = y[:8586]
y_test = y[8587:]
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
score = regression_model.score(X_test, y_test)
y_pred = regression_model.predict(X_test)
testing = | pd.concat([X_test, y_test], 1) | pandas.concat |
"""
***************************************************************************************
Description: This module is designed to perform 'LE' summary calculations and store
them for future use
***********************************************************************************
Input Parameters: | N/A
Output Parameters: | N/A
Tables Accessed: | All of LE Schema
Tables Affected: | N/A
----------------------------------------------------------------------------------
Version Control
----------------------------------------------------------------------------------
Version Developer Date Change
------- --------- ---------- ------------------------------------------------
1.0 <NAME> 08/01/2019 Initial Creation
***************************************************************************************
"""
import sys
sys.path.append('../')
def CalculateSummaryInfo(LEName, ForecastName, SummaryName, SummaryDate, Update_User):
from Model import ModelLayer as m
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
import numpy as np
import pandas as pd
from datetime import datetime
from datetime import timedelta
import calendar
from Controller import DataInterface as di
Success = True
Messages = []
try:
#Get netting factor for each well
#Calculate the Monthly AveragMBOED
print('Gathering wedge information...')
LEHeaderObj = m.LEHeader('', [], [], [LEName], [])
Rows, Success, Message = LEHeaderObj.ReadTable()
LEDataRowObj = m.LEData('', [LEName], [], [])
DataRows, Success, Message = LEDataRowObj.ReadTable()
if not Success or len(DataRows) == 0:
if not Message:
Message = 'No LE found in the database.'
Messages.append(Message)
else:
well_list = []
for row in Rows:
well_list.append(row.WellName)
NettingObj = m.GasNetting('', well_list, [])
NettingRows, Success, Message = NettingObj.ReadTable()
if isinstance(SummaryDate, str):
today = pd.to_datetime(SummaryDate)
elif isinstance(SummaryDate, datetime):
today = SummaryDate
else:
today = datetime.today()
first_of_month = datetime(today.year, today.month, 1)
first_of_year = datetime(today.year, 1, 1)
quarter_start = pd.to_datetime(today - pd.tseries.offsets.QuarterBegin(startingMonth = 1)).date()
quarter_end = pd.to_datetime(today + pd.tseries.offsets.QuarterEnd(startingMonth = 3)).date()
next_quarter_start = quarter_end + timedelta(days = 1)
month_end = pd.to_datetime(today + | pd.tseries.offsets.MonthEnd(1) | pandas.tseries.offsets.MonthEnd |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 13:45:37 2018
@author: <NAME>
"""
import os
import pandas as pd
import sys
import csv
import datetime
def read_meta_data():
"""
Reads in the meta data and returns them as dataframe.
:return: meta_df
"""
# Set the cwd to the directory of the file
os.chdir(os.path.dirname(sys.argv[0]))
# Read in the files
list_ = []
os.chdir(os.pardir + os.sep + "data" + os.sep + "camels_attributes_v2.0")
for file in os.listdir(os.getcwd()):
if file.endswith(".txt"):
temp_df = pd.read_table(file, sep=";", index_col=0)
list_.append(temp_df)
return pd.concat(list_, axis=1)
def read_streamflow_data():
"""
Reads in the streamflow data and returns them as dataframe.
:return: stream_df: Column names are the stream ids. Index is the date.
"""
# Set the cwd to the directory of the file
os.chdir(os.path.dirname(sys.argv[0]))
# Read in the files
list_ = []
os.chdir(os.pardir + os.sep + "data" + os.sep + "usgs_streamflow")
temp_cwd = os.getcwd()
# Go through the directory order of the CAMELS data set
for name in os.listdir(temp_cwd):
print("Directory = " + name)
os.chdir(temp_cwd + os.sep + name)
for file in os.listdir(os.getcwd()):
if file.endswith(".txt"):
# Use a regular expression to seperate by different
# amounts of whitespaces
temp_df = pd.read_table(file, header=None, sep=r"\s+",
engine="python",
na_values=-999.00)
# Get the date as index
temp_df.index = temp_df.apply(
lambda x:
datetime.datetime.strptime(
"{0} {1} {2} 00:00:00".format(
x[1], x[2], x[3]),
"%Y %m %d %H:%M:%S"), axis=1)
name = temp_df[0].unique()
temp_df = pd.Series(temp_df[4])
temp_df.name = name[0]
list_.append(temp_df)
# Combine all separate streams into one dataframe.
return pd.concat(list_, axis=1)
def calculate_signatures(stream_df: pd.DataFrame, transform_to_mm=True,
meta_df=None):
"""
Calculates the signatures mean_annual_discharge, mean_winter_discharge and
mean_sommer_discharge
:param stream_df:
:param transform_to_mm: bool
:param meta_df:
:return: Returns a dataframe for all hydrological signatures for all
catchments, with the catchment name as index and the signatures as
column.
"""
# Convert to a unit which is normalized to the catchment area. This
# avoids a clustering by catchment area
if transform_to_mm:
stream_df = cubic_feet_to_mm(stream_df, meta_df)
sig_df_list = []
# Get all steam separately to calculate the signatures
for stream in stream_df.columns:
print("Calculating signatures for stream: " + str(stream))
single_stream = pd.Series(stream_df[stream])
# Safe the mean discharge for the river
yearly_dis = []
summer_dis = []
winter_dis = []
# Seperate the stream by years
for year in set(single_stream.index.year):
stream_year = single_stream.loc[single_stream.index.year == year]
# skip years without data
if stream_year.isnull().values.any():
continue
sum_year = stream_year.mean()
yearly_dis.append(sum_year)
sum_winter = stream_year[stream_year.index.month.isin([11, 12, 1, 2, 3, 4])].mean()
winter_dis.append(sum_winter)
sum_summer = stream_year[stream_year.index.month.isin([5, 6, 7, 8, 9, 10])].mean()
summer_dis.append(sum_summer)
# Combine the signatures
stream_sigs = pd.DataFrame({"mean_ann_dis":(sum(yearly_dis) / len(yearly_dis)),
"mean_sum_dis":(sum(summer_dis) / len(summer_dis)),
"mean_win_dis":(sum(winter_dis) / len(winter_dis))},
index=[stream])
sig_df_list.append(stream_sigs)
# Combine all streams into one df
return | pd.concat(sig_df_list, axis=0) | pandas.concat |
"""Spatial operations for demand allocation."""
import itertools
import warnings
from typing import Callable, Iterable, Literal, Union
import geopandas as gpd
import pandas as pd
import shapely.ops
from shapely.geometry import GeometryCollection, MultiPolygon, Polygon
from shapely.geometry.base import BaseGeometry
def check_gdf(gdf: gpd.GeoDataFrame) -> None:
"""Check that GeoDataFrame contains (Multi)Polygon geometries with non-zero area.
Args:
gdf: GeoDataFrame.
Raises:
TypeError: Object is not a GeoDataFrame.
AttributeError: GeoDataFrame has no geometry.
TypeError: Geometry is not a GeoSeries.
ValueError: Geometry contains null geometries.
ValueError: Geometry contains non-(Multi)Polygon geometries.
ValueError: Geometry contains (Multi)Polygon geometries with zero area.
ValueError: MultiPolygon contains Polygon geometries with zero area.
"""
if not isinstance(gdf, gpd.GeoDataFrame):
raise TypeError("Object is not a GeoDataFrame")
if not hasattr(gdf, "geometry"):
raise AttributeError("GeoDataFrame has no geometry")
if not isinstance(gdf.geometry, gpd.GeoSeries):
raise TypeError("Geometry is not a GeoSeries")
warnings.filterwarnings("ignore", "GeoSeries.isna", UserWarning)
if gdf.geometry.isna().any():
raise ValueError("Geometry contains null geometries")
if not gdf.geometry.geom_type.isin(["Polygon", "MultiPolygon"]).all():
raise ValueError("Geometry contains non-(Multi)Polygon geometries")
if not gdf.geometry.area.all():
raise ValueError("Geometry contains (Multi)Polygon geometries with zero area")
is_mpoly = gdf.geometry.geom_type == "MultiPolygon"
for mpoly in gdf.geometry[is_mpoly]:
for poly in mpoly:
if not poly.area:
raise ValueError(
"MultiPolygon contains Polygon geometries with zero area"
)
def polygonize(geom: BaseGeometry) -> Union[Polygon, MultiPolygon]:
"""Convert geometry to (Multi)Polygon.
Args:
geom: Geometry to convert to (Multi)Polygon.
Returns:
Geometry converted to (Multi)Polygon, with all zero-area components removed.
Raises:
ValueError: Geometry has zero area.
"""
polys = []
# Explode geometries to polygons
if isinstance(geom, GeometryCollection):
for g in geom:
if isinstance(g, Polygon):
polys.append(g)
elif isinstance(g, MultiPolygon):
polys.extend(g)
elif isinstance(geom, MultiPolygon):
polys.extend(geom)
elif isinstance(geom, Polygon):
polys.append(geom)
# Remove zero-area polygons
polys = [p for p in polys if p.area]
if not polys:
raise ValueError("Geometry has zero area")
if len(polys) == 1:
return polys[0]
return MultiPolygon(polys)
def explode(gdf: gpd.GeoDataFrame, ratios: Iterable[str] = None) -> gpd.GeoDataFrame:
"""Explode MultiPolygon to multiple Polygon geometries.
Args:
gdf: GeoDataFrame with non-zero-area (Multi)Polygon geometries.
ratios: Names of columns to rescale by the area fraction of the Polygon
relative to the MultiPolygon.
If provided, MultiPolygon cannot self-intersect.
By default, the original value is used unchanged.
Raises:
ValueError: Geometry contains self-intersecting MultiPolygon.
Returns:
GeoDataFrame with each Polygon as a separate row in the GeoDataFrame.
The index is the number of the source row in the input GeoDataFrame.
"""
check_gdf(gdf)
gdf = gdf.reset_index(drop=True)
is_mpoly = gdf.geometry.geom_type == "MultiPolygon"
if ratios and is_mpoly.any():
union_area = gdf.geometry[is_mpoly].apply(shapely.ops.unary_union).area
if (union_area != gdf.geometry[is_mpoly].area).any():
raise ValueError("Geometry contains self-intersecting MultiPolygon")
result = gdf.explode(index_parts=False)
if ratios:
fraction = result.geometry.area.values / gdf.geometry.area[result.index].values
result[ratios] = result[ratios].multiply(fraction, axis="index")
return result[gdf.columns]
def self_union(gdf: gpd.GeoDataFrame, ratios: Iterable[str] = None) -> gpd.GeoDataFrame:
"""Calculate the geometric union of a feature layer with itself.
Areas of overlap are split into two or more geometrically-identical features:
one for each of the original overlapping features.
Each split feature contains the attributes of the original feature.
Args:
gdf: GeoDataFrame with non-zero-area MultiPolygon geometries.
ratios: Names of columns to rescale by the area fraction of the split feature
relative to the original. By default, the original value is used unchanged.
Returns:
GeoDataFrame representing the union of the input features with themselves.
Its index contains tuples of the index of the original overlapping features.
Raises:
NotImplementedError: MultiPolygon geometries are not yet supported.
"""
check_gdf(gdf)
gdf = gdf.reset_index(drop=True)
is_mpoly = gdf.geometry.geom_type == "MultiPolygon"
if is_mpoly.any():
raise NotImplementedError("MultiPolygon geometries are not yet supported")
# Calculate all pairwise intersections
# https://nbviewer.jupyter.org/gist/jorisvandenbossche/3a55a16fda9b3c37e0fb48b1d4019e65
pairs = itertools.combinations(gdf.geometry, 2)
intersections = gpd.GeoSeries([a.intersection(b) for a, b in pairs])
# Form polygons from the boundaries of the original polygons and their intersections
boundaries = pd.concat([gdf.geometry, intersections]).boundary.unary_union
polygons = gpd.GeoSeries(shapely.ops.polygonize(boundaries))
# Determine origin of each polygon by a spatial join on representative points
points = gpd.GeoDataFrame(geometry=polygons.representative_point())
oids = gpd.sjoin(
points,
gdf[["geometry"]],
how="left",
predicate="within",
)["index_right"]
# Build new dataframe
columns = get_data_columns(gdf)
df = gpd.GeoDataFrame(
data=gdf.loc[oids, columns].reset_index(drop=True),
geometry=polygons[oids.index].values,
)
if ratios:
fraction = df.area.values / gdf.area[oids].values
df[ratios] = df[ratios].multiply(fraction, axis="index")
# Add original row indices to index
df.index = oids.groupby(oids.index).agg(tuple)[oids.index]
df.index.name = None
# Return with original column order
return df[gdf.columns]
def dissolve(
gdf: gpd.GeoDataFrame,
by: Iterable[str],
func: Union[Callable, str, list, dict],
how: Union[
Literal["union", "first"], Callable[[gpd.GeoSeries], BaseGeometry]
] = "union",
) -> gpd.GeoDataFrame:
"""Dissolve layer by aggregating features based on common attributes.
Args:
gdf: GeoDataFrame with non-empty (Multi)Polygon geometries.
by: Names of columns to group features by.
func: Aggregation function for data columns (see :meth:`pd.DataFrame.groupby`).
how: Aggregation function for geometry column.
Either 'union' (:meth:`gpd.GeoSeries.unary_union`),
'first' (first geometry in group),
or a function aggregating multiple geometries into one.
Returns:
GeoDataFrame with dissolved geometry and data columns,
and grouping columns set as the index.
"""
check_gdf(gdf)
merges = {"union": lambda x: x.unary_union, "first": lambda x: x.iloc[0]}
data = gdf.drop(columns=gdf.geometry.name).groupby(by=by).aggregate(func)
geometry = gdf.groupby(by=by, group_keys=False)[gdf.geometry.name].aggregate(
merges.get(how, how)
)
return gpd.GeoDataFrame(geometry, geometry=gdf.geometry.name, crs=gdf.crs).join(
data
)
def overlay(
*gdfs: gpd.GeoDataFrame,
how: Literal[
"intersection", "union", "identity", "symmetric_difference", "difference"
] = "intersection",
ratios: Iterable[str] = None,
) -> gpd.GeoDataFrame:
"""Overlay multiple layers incrementally.
When a feature from one layer overlaps the feature of another layer,
the area of overlap is split into two geometrically-identical features:
one for each of the original overlapping features.
Each split feature contains the attributes of the original feature.
TODO: To identify the source of output features, the user can ensure that each
layer contains a column to index by.
Alternatively, tuples of indices of the overlapping
feature from each layer (null if none) could be returned as the index.
Args:
gdfs: GeoDataFrames with non-empty (Multi)Polygon geometries
assumed to contain no self-overlaps (see :func:`self_union`).
Names of (non-geometry) columns cannot be used more than once.
Any index colums are ignored.
how: Spatial overlay method (see :func:`gpd.overlay`).
ratios: Names of columns to rescale by the area fraction of the split feature
relative to the original. By default, the original value is used unchanged.
Raises:
ValueError: Duplicate column names in layers.
Returns:
GeoDataFrame with the geometries and attributes resulting from the overlay.
"""
for gdf in gdfs:
check_gdf(gdf)
if ratios is None:
ratios = []
# Check for duplicate non-geometry column names
seen = set()
duplicates = set(
c for df in gdfs for c in get_data_columns(df) if c in seen or seen.add(c)
)
if duplicates:
raise ValueError(f"Duplicate column names in layers: {duplicates}")
# Drop index columns and replace with default index of known name
# NOTE: Assumes that default index name not already names of columns
keys = [f"__id{i}__" for i in range(len(gdfs))]
gdfs = [df.reset_index(drop=True).rename_axis(k) for df, k in zip(gdfs, keys)]
overlay = None
for i in range(len(gdfs) - 1):
a, b = overlay if i else gdfs[i], gdfs[i + 1]
# Perform overlay with geometry and constant fields
constants = [
[c for c in df.columns if c == df.geometry.name or c not in ratios]
for df in (a, b)
]
overlay = gpd.overlay(
a[constants[0]].reset_index(), b[constants[1]].reset_index(), how=how
)
# For uniform fields, compute area fraction of originals and merge by index
# new_value = (old_value / old_area) * new_area
dfs = []
for j, df in enumerate((a, b)):
df_ratios = [c for c in df.columns if c != df.geometry.name and c in ratios]
if df_ratios:
dfs.append(
df[df_ratios]
.div(df.area, axis="index")
.reindex(overlay[keys[j]])
.reset_index(drop=True)
.mul(overlay.area, axis="index")
)
if dfs:
# Assumed to be faster than incremental concat
overlay = | pd.concat([overlay] + dfs, axis="columns") | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import re
import click
from ...exceptions import InvalidFileExtension
def concat_command(files, output, **kwargs):
verbose = kwargs.pop("verbose", False)
# make sure the extension is csv
output = Path(output)
if output.suffix not in (".csv", ".feather"):
raise InvalidFileExtension("Invalid file extension")
save_as_csv = True if output.suffix == ".csv" else False
# concat everything in filepath
if verbose:
click.secho("Files to read: {}".format(files), fg='green')
# read all files
data = []
with click.progressbar(files, label="Parsing files") as bar:
for f in bar:
tmp = pd.read_csv(f, nrows=1, header=None)
# hack to support new products with a different file format
if tmp.iloc[0, 0] == 'deviceModel':
tmp = pd.read_csv(f, skiprows=3)
elif tmp.shape[1] == 2:
tmp = pd.read_csv(f, skiprows=1)
else:
tmp = pd.read_csv(f)
data.append(tmp)
# concat all of the files together
df = pd.concat(data, sort=False)
# try sorting based on a time column
if "timestamp_iso" in df.columns:
df = df.sort_values(by='timestamp_iso')
elif "timestamp" in df.columns:
df = df.sort_values(by='timestamp')
if df.empty:
raise Exception("No data")
# save the file
if verbose:
click.secho("Saving file to {}".format(output), fg='green')
if save_as_csv:
df.to_csv(output)
else:
df.reset_index().to_feather(output)
def concat_logs_command(files, output, **kwargs):
verbose = kwargs.pop("verbose", False)
# make sure the extension is txt
output = Path(output)
if output.suffix not in (".csv", ".feather"):
raise InvalidFileExtension("Invalid file extension")
save_as_csv = True if output.suffix == ".csv" else False
# concat everything in filepath
if verbose:
click.secho("Files to read: {}".format(files), fg='green')
# read all files
data = []
with click.progressbar(files, label="Parsing files") as bar:
for each in bar:
with open(each, "r", errors='ignore') as f:
for line in f:
line = line.split(":")
x, y = line[0], line[1:]
y = ":".join(y).strip()
try:
millis, location, level = re.split("[ ]", x)
except:
continue
location = location[1:-1]
data.append(dict(millis=int(millis), location=location, level=level, message=y))
# concat all results
data = pd.DataFrame(data)
# group data
data["group"] = (data.millis - data.millis.shift(1) < -1000).cumsum()
data["timestamp_iso"] = np.nan
rv = []
for _, grp in data.groupby("group"):
t0 = None
idx = grp[grp["message"].str.contains("Current time", regex=False)]
if not idx.empty:
t0 = pd.to_datetime(":".join(idx.message.values[0].split(":")[1:]))
else:
idx = grp[grp["message"].str.contains("Time from RTC", regex=False)]
if not idx.empty:
t0 = pd.to_datetime(idx.message.values[0][18:])
if t0:
millis0 = idx["millis"].values[0]
grp.loc[:, "timestamp_iso"] = grp["millis"].apply(lambda x: t0 + pd.Timedelta(x - millis0, unit='millis'))
rv.append(grp)
rv = | pd.concat(rv) | pandas.concat |
"""Data Analysis
================
"""
from os.path import exists
import nixio as nix
import numpy as np
import numpy.linalg
from typing import Dict, List, Tuple, Type, Union, Set, Any, Optional, Iterator
import pandas as pd
from collections import defaultdict
from kivy_garden.collider import Collide2DPoly, CollideEllipse
from kivy_garden.painter import PaintCircle, PaintEllipse, PaintPolygon, \
PaintFreeformPolygon, PaintPoint, PaintShape
from glitter2.storage.data_file import DataFile
__all__ = (
'default_value', 'not_cached', 'AnalysisFactory', 'AnalysisSpec',
'FileDataAnalysis', 'AnalysisChannel', 'TemporalAnalysisChannel',
'EventAnalysisChannel', 'PosAnalysisChannel', 'ZoneAnalysisChannel',
'get_variable_type_optional')
def _sort_dict(d: dict) -> List[tuple]:
return list(sorted(d.items(), key=lambda x: x[0]))
def _get_flat_types(type_hint: Type) -> Tuple[Type]:
if hasattr(type_hint, '__origin__') and type_hint.__origin__ is Union:
return type_hint.__args__
return type_hint,
def _filter_default(type_hint: Type) -> List[Type]:
types = _get_flat_types(type_hint)
return [t for t in types if t != DefaultType]
known_arg_types = {
int, float, str, List[int], List[float], List[str], type(None)}
known_ret_types = {
int, float, str, List[int], List[float], List[str], Tuple[int],
Tuple[float], Tuple[str]}
def is_type_unknown(known_types, query):
return set(query) - known_types
def get_variable_type_optional(type_hint: List[Type]) -> Tuple[Type, bool]:
if len(type_hint) == 1:
return type_hint[0], False
if type(None) not in type_hint:
raise ValueError('Expected to contain none type if more than one type')
type_hint.remove(type(None))
if len(type_hint) == 1:
return type_hint[0], True
raise ValueError('Expected only one type')
class default_value(int):
pass
DefaultType = Type[default_value]
DefaultFloat = Union[float, DefaultType]
DefaultStr = Union[str, DefaultType]
not_cached = object()
class AnalysisFactory:
analysis_classes: Set[Type['AnalysisChannel']] = set()
by_name: Dict[str, Type['AnalysisChannel']] = {}
@classmethod
def register_analysis_class(cls, analysis_class: Type['AnalysisChannel']):
cls.analysis_classes.add(analysis_class)
name = f'{analysis_class.__module__}\0{analysis_class.__qualname__}'
cls.by_name[name] = analysis_class
@classmethod
def get_class_from_method(
cls, method) -> Tuple[Type['AnalysisChannel'], str]:
mod = method.__module__
cls_name, method_name = method.__qualname__.rsplit('.', maxsplit=1)
name = f'{mod}\0{cls_name}'
if name not in cls.by_name:
raise ValueError(
f'Unrecognized class {cls_name} of method {method}')
return cls.by_name[name], method_name
@classmethod
def get_classes_from_type(
cls, analysis_type: str) -> List[Type['AnalysisChannel']]:
return [c for c in cls.analysis_classes
if c.analysis_type == analysis_type]
@classmethod
def get_variables(
cls, global_vars=True, local_vars=True
) -> Dict[
str,
Tuple[List[Type['AnalysisChannel']], str, Tuple[Type, bool], Any]]:
variables = {}
all_variables = {}
for c in cls.analysis_classes:
special_args = c.spec_get_special_arg_type()
for key, (doc, tp) in c.spec_get_compute_variables().items():
if key in all_variables:
doc_, tp_ = all_variables[key]
# we allow empty doc, in which case non-empty is used
if doc and doc_ and doc != doc_ or tp != tp_:
raise ValueError(
f'Variable "{key}" of class {c} was previously '
f'defined with type "{tp_}" and doc "{doc_}", but '
f'we now got type "{tp}" and doc "{doc}"')
if doc:
all_variables[key] = doc, tp
else:
all_variables[key] = doc, tp
is_global = c.spec_get_is_global_arg(key)
if is_global and global_vars or not is_global and local_vars:
if key not in variables:
special_arg = special_args.get(key, None)
variables[key] = [c], doc, tp, special_arg
else:
classes, doc_, tp_, special_arg = variables[key]
classes.append(c)
# just in case previously we had empty doc
if doc:
variables[key] = classes, doc, tp, special_arg
return variables
@classmethod
def _get_methods_from_type(
cls, analysis_type: str, creating_methods
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
methods = {}
for c in cls.analysis_classes:
if c.analysis_type != analysis_type:
continue
special_type = c.spec_get_channel_creating_methods()
for key, (doc, tp) in c.spec_get_compute_methods().items():
if creating_methods:
if key in special_type:
methods[key] = c, doc, tp
else:
if key not in special_type:
methods[key] = c, doc, tp
return methods
@classmethod
def get_channel_creating_methods_from_type(
cls, analysis_type: str
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
return cls._get_methods_from_type(analysis_type, True)
@classmethod
def get_compute_methods_from_type(
cls, analysis_type: str
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
return cls._get_methods_from_type(analysis_type, False)
@classmethod
def get_channel_creating_method_spec(
cls, analysis_cls: Type['AnalysisChannel'], name: str
) -> Tuple[str, Type, str, Dict[str, Tuple[Tuple[Type, bool], str]]]:
create_type = analysis_cls.spec_get_channel_creating_methods()[name]
doc, ret_type = analysis_cls.spec_get_compute_methods()[name]
special_args = analysis_cls.spec_get_special_arg_type()
variables = {}
for var, (_, tp) in analysis_cls.spec_get_compute_method_args(
name).items():
variables[var] = tp, special_args.get(var, None)
return doc, ret_type, create_type, variables
@classmethod
def get_compute_method_spec(
cls, analysis_cls: Type['AnalysisChannel'], name: str
) -> Tuple[str, Type, Dict[str, Tuple[Tuple[Type, bool], str]]]:
doc, ret_type = analysis_cls.spec_get_compute_methods()[name]
special_args = analysis_cls.spec_get_special_arg_type()
variables = {}
for var, (_, tp) in analysis_cls.spec_get_compute_method_args(
name).items():
variables[var] = tp, special_args.get(var, None)
return doc, ret_type, variables
class AnalysisSpec:
_default_args: Dict[Type['AnalysisChannel'], Dict[str, Any]] = {}
_new_channels: List[
Tuple[str, str, Type['AnalysisChannel'], str, tuple, dict]] = []
_computations: List[
Tuple[Optional[List[str]], str, Type['AnalysisChannel'], str, tuple,
dict]] = []
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._default_args = defaultdict(dict)
self._new_channels = []
self._computations = []
def add_arg_default(
self, cls: Type['AnalysisChannel'], name: str, value: Any):
self._default_args[cls][name] = value
def add_new_channel_computation(
self, channel: str, new_channel_name: str, compute_method,
*args, **kwargs):
cls, method_name = AnalysisFactory.get_class_from_method(
compute_method)
self._new_channels.append(
(channel, new_channel_name, cls, method_name, args, kwargs))
def add_computation(
self, channels: List[str], compute_method, *args,
compute_key: str = '', **kwargs):
cls, method_name = AnalysisFactory.get_class_from_method(
compute_method)
self._computations.append(
(channels, compute_key, cls, method_name, args, kwargs))
def compute_create_channels(self, analysis_object: 'FileDataAnalysis'):
default_args = self._default_args
cls_cache = {}
for channel, new_name, cls, method_name, args, kwargs in \
self._new_channels:
cache_key = cls, channel
if cache_key not in cls_cache:
obj = cls_cache[cache_key] = cls(
name=channel, analysis_object=analysis_object)
for name, value in default_args.get(cls, {}).items():
setattr(obj, name, value)
analysis_channel = cls_cache[cache_key]
brief_name = method_name
if brief_name.startswith('compute_'):
brief_name = brief_name[8:]
# get the type of channel created
create_map = \
analysis_channel.spec_get_channel_creating_methods()
ret_type = create_map[brief_name]
f = getattr(analysis_channel, method_name)
res = f(*args, **kwargs)
# add the channel to the data analysis object
add = getattr(analysis_object, f'add_{ret_type}_channel')
add(new_name, *res)
def compute(self, analysis_object: 'FileDataAnalysis') -> list:
output = []
default_args = self._default_args
cls_cache = {}
for channels, compute_key, cls, method_name, args, kwargs in \
self._computations:
if not channels:
if cls.analysis_type == 'event':
channels = analysis_object.event_channels_data.keys()
elif cls.analysis_type == 'pos':
channels = analysis_object.pos_channels_data.keys()
elif cls.analysis_type == 'zone':
channels = analysis_object.zone_channels_shapes.keys()
for channel in channels:
cache_key = cls, channel
if cache_key not in cls_cache:
obj = cls_cache[cache_key] = cls(
name=channel, analysis_object=analysis_object)
for name, value in default_args.get(cls, {}).items():
setattr(obj, name, value)
analysis_channel = cls_cache[cache_key]
brief_name = method_name
if brief_name.startswith('compute_'):
brief_name = brief_name[8:]
f = getattr(analysis_channel, method_name)
res = f(*args, **kwargs)
output.append(
(analysis_channel.analysis_type, channel, brief_name,
compute_key, res))
return output
def clear_arg_defaults(self):
self._default_args = defaultdict(dict)
def clear_new_channel_computation(self):
self._new_channels = []
def clear_computation(self):
self._computations = []
class FileDataAnalysis:
filename: str = ''
data_file: DataFile = None
nix_file: Optional[nix.File] = None
metadata: Dict = {}
video_metadata: Dict = {}
timestamps: np.ndarray = None
event_channels_data: Dict[str, Optional[np.ndarray]] = {}
pos_channels_data: Dict[str, Optional[np.ndarray]] = {}
zone_channels_shapes: Dict[str, Optional[PaintShape]] = {}
channels_metadata: Dict[str, dict] = {}
normalized_names_map: Dict[str, str] = {}
missed_timestamps = False
missing_timestamp_values = []
pixels_per_meter = 0
def __init__(self, filename, **kwargs):
super(FileDataAnalysis, self).__init__(**kwargs)
self.filename = filename
self.event_channels_data = {}
self.pos_channels_data = {}
self.zone_channels_shapes = {}
self.channels_metadata = {}
self.normalized_names_map = {}
def flatten_data(self, data_arrays) -> np.ndarray:
ordered_indices = self.data_file.timestamp_intervals_ordered_keys
if len(data_arrays) > 1:
data = [data_arrays[i] for i in ordered_indices]
return np.concatenate(data)
else:
return np.array(data_arrays[0])
def __enter__(self):
self.open_data_file()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_data_file()
def open_data_file(self):
self.nix_file = nix.File.open(self.filename, nix.FileMode.ReadOnly)
self.data_file = DataFile(nix_file=self.nix_file)
def load_file_metadata(self, channels: Set[str] = None):
data_file = self.data_file
data_file.open_file()
self.video_metadata = data_file.video_metadata_dict
self.metadata = metadata = {}
metadata['saw_all_timestamps'] = data_file.saw_all_timestamps
metadata['glitter2_version'] = data_file.glitter2_version
metadata['ffpyplayer_version'] = data_file.ffpyplayer_version
metadata['pixels_per_meter'] = data_file.pixels_per_meter
self.pixels_per_meter = data_file.pixels_per_meter
self.missed_timestamps = not data_file.saw_all_timestamps
if self.missed_timestamps:
data_arrays_order = data_file.timestamp_intervals_ordered_keys
data = [data_file.timestamps_arrays[i] for i in data_arrays_order]
if not data:
raise ValueError('No data found in the file')
missing = [float(item[-1]) for item in data[:-1]]
if not data_file._saw_first_timestamp:
missing.insert(0, float(data[0][0]))
if not data_file._saw_last_timestamp:
missing.append(float(data[-1][-1]))
self.missing_timestamp_values = missing
else:
self.missing_timestamp_values = []
metadata = self.channels_metadata
normalized_names_map = self.normalized_names_map
for channels_data, src_channels in (
(self.event_channels_data, data_file.event_channels),
(self.pos_channels_data, data_file.pos_channels),
(self.zone_channels_shapes, data_file.zone_channels)):
for _, channel in _sort_dict(src_channels):
m = channel.channel_config_dict
name = m['name']
if channels and name not in channels:
continue
normalized_names_map[name.lower()] = name
metadata[name] = m
channels_data[name] = None
def load_file_data(self, channels: Set[str] = None):
self.load_file_metadata(channels)
data_file = self.data_file
self.timestamps = self.flatten_data(data_file.timestamps_arrays)
zone_channels_shapes = self.zone_channels_shapes
shape_cls_map = {
'PaintCircle': PaintCircle, 'PaintEllipse': PaintEllipse,
'PaintPolygon': PaintPolygon,
'PaintFreeformPolygon': PaintFreeformPolygon,
'PaintPoint': PaintPoint
}
for channels_data, src_channels in (
(self.event_channels_data, data_file.event_channels),
(self.pos_channels_data, data_file.pos_channels),
(None, data_file.zone_channels)):
for _, channel in _sort_dict(src_channels):
m = channel.channel_config_dict
name = m['name']
if channels and name not in channels:
continue
if channels_data is None:
state = m['shape_config']
cls = shape_cls_map[state['cls']]
shape = cls.create_shape_from_state(state)
zone_channels_shapes[name] = shape
else:
channels_data[name] = self.flatten_data(
channel.data_arrays)
def close_data_file(self):
if self.nix_file is None:
return
self.nix_file.close()
self.nix_file = None
def compute_data_summary(self, spec: AnalysisSpec) -> list:
# export_computed_statistics provides the header
rows = []
filename = self.filename
video_head = self.video_metadata['filename_head']
video_tail = self.video_metadata['filename_tail']
missed_timestamps = self.missed_timestamps
row = [filename, video_head, video_tail, missed_timestamps]
# first create all new data channels
spec.compute_create_channels(self)
# now compute any stats
for stat in spec.compute(self):
rows.append(row + list(stat))
return rows
@staticmethod
def export_computed_data_summary(filename: str, data: list):
"""Adds .xlsx to the name.
:param filename:
:param data:
:return:
"""
if not filename.endswith('.xlsx'):
filename += '.xlsx'
if exists(filename):
raise ValueError('"{}" already exists'.format(filename))
excel_writer = pd.ExcelWriter(filename, engine='xlsxwriter')
header = [
'data file', 'video path', 'video filename', 'missed timestamps',
'channel_type', 'channel', 'measure', 'measure_key', 'value']
df = pd.DataFrame(data, columns=header)
df.to_excel(excel_writer, sheet_name='statistics', index=False)
excel_writer.save()
def export_raw_data_to_excel(self, filename, dump_zone_collider=False):
if not filename.endswith('.xlsx'):
filename += '.xlsx'
if exists(filename):
raise ValueError('"{}" already exists'.format(filename))
excel_writer = pd.ExcelWriter(filename, engine='xlsxwriter')
if self.missed_timestamps:
# if we have timestamp discontinuities, indicate it
data = [
'Not all video frames were watched - timestamps are missing']
if self.missing_timestamp_values:
data.append('timestamps around where frames are missing:')
data.extend(self.missing_timestamp_values)
df = pd.DataFrame(data)
df.to_excel(
excel_writer, sheet_name='missing_timestamps', index=False)
file_metadata = dict(self.metadata)
file_metadata.update(self.video_metadata)
file_metadata = _sort_dict(file_metadata)
df = pd.DataFrame(file_metadata, columns=['Property', 'Value'])
df.to_excel(excel_writer, sheet_name='file_metadata', index=False)
# add sheet for all the channels metadata
metadata = []
channels_metadata = self.channels_metadata
for channel_name in self.event_channels_data:
metadata.append(('event_channel', channel_name))
metadata.extend(_sort_dict(channels_metadata[channel_name]))
for channel_name in self.pos_channels_data:
metadata.append(('pos_channel', channel_name))
metadata.extend(_sort_dict(channels_metadata[channel_name]))
for channel_name in self.zone_channels_shapes:
metadata.append(('zone_channel', channel_name))
# shape info is saved in the zone channels sheet
d = dict(channels_metadata[channel_name])
d.pop('shape_config', None)
metadata.extend(_sort_dict(d))
df = pd.DataFrame(metadata, columns=['Property', 'Value'])
df.to_excel(excel_writer, sheet_name='channels_metadata', index=False)
# add timestamps
df = | pd.DataFrame(self.timestamps, columns=['timestamp']) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from xarray import concat, open_dataset, Dataset
from benedict.dicts import benedict as Benedict
from mosartwmpy.config.parameters import Parameters
def load_reservoirs(self, config: Benedict, parameters: Parameters) -> None:
"""Loads the reservoir information from file onto the grid.
Args:
config (Benedict): the model configuration
parameters (Parameters): the model parameters
"""
logging.info('Loading reservoir file.')
# reservoir parameter file
reservoirs = open_dataset(config.get('water_management.reservoirs.path'))
# load reservoir variables
for key, value in config.get('water_management.reservoirs.variables').items():
setattr(self, key, np.array(reservoirs[value]).flatten())
# correct the fields with different units
# surface area from km^2 to m^2
self.reservoir_surface_area = self.reservoir_surface_area * 1.0e6
# capacity from millions m^3 to m^3
self.reservoir_storage_capacity = self.reservoir_storage_capacity * 1.0e6
# map dams to all their dependent grid cells
# this will be a table of many to many relationship of grid cell ids to reservoir ids
self.reservoir_to_grid_mapping = reservoirs[
config.get('water_management.reservoirs.grid_to_reservoir')
].to_dataframe().reset_index()[[
config.get('water_management.reservoirs.grid_to_reservoir_reservoir_dimension'),
config.get('water_management.reservoirs.grid_to_reservoir')
]].rename(columns={
config.get('water_management.reservoirs.grid_to_reservoir_reservoir_dimension'): 'reservoir_id',
config.get('water_management.reservoirs.grid_to_reservoir'): 'grid_cell_id'
})
# drop nan grid ids
self.reservoir_to_grid_mapping = self.reservoir_to_grid_mapping[self.reservoir_to_grid_mapping.grid_cell_id.notna()]
# correct to zero-based grid indexing for grid cell
self.reservoir_to_grid_mapping.loc[:, self.reservoir_to_grid_mapping.grid_cell_id.name] = self.reservoir_to_grid_mapping.grid_cell_id.values - 1
# set to integer
self.reservoir_to_grid_mapping = self.reservoir_to_grid_mapping.astype(int)
# count of the number of reservoirs that can supply each grid cell
self.reservoir_count = np.array(pd.DataFrame(self.id).join(
self.reservoir_to_grid_mapping.groupby('grid_cell_id').count().rename(columns={'reservoir_id': 'reservoir_count'}),
how='left'
).reservoir_count)
# index by grid cell
self.reservoir_to_grid_mapping = self.reservoir_to_grid_mapping.set_index('grid_cell_id')
# prepare the month or epiweek based reservoir schedules mapped to the domain
prepare_reservoir_schedule(self, config, parameters, reservoirs)
reservoirs.close()
def prepare_reservoir_schedule(self, config: Benedict, parameters: Parameters, reservoirs: Dataset) -> None:
"""Establishes the reservoir schedule and flow.
Args:
config (Benedict): the model configuration
parameters (Parameters): the model parameters
reservoirs (Dataset): the reservoir dataset loaded from file
"""
# the reservoir streamflow and demand are specified by the time resolution and reservoir id
# so let's remap those to the actual mosart domain for ease of use
# TODO i had wanted to convert these all to epiweeks no matter what format provided, but we don't know what year all the data came from
# streamflow flux
streamflow_time_name = config.get('water_management.reservoirs.streamflow_time_resolution')
streamflow = reservoirs[config.get('water_management.reservoirs.streamflow')]
schedule = None
for t in np.arange(streamflow.shape[0]):
flow = streamflow[t, :].to_pandas().to_frame('streamflow')
sched = | pd.DataFrame(self.reservoir_id, columns=['reservoir_id']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
#import pybedtools #not used
import pandas as pd
import numpy as np
import time
import argparse
import sys
from matplotlib import pyplot as plt
# In[ ]:
# %matplotlib inline
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_name = 'repeat_masker.mapable.k50.Umap.hg38'
# genome_GC_frequency = '/fh/fast/ha_g/user/adoebley/projects/griffin_paper/genome_GC_frequency/results'
# out_dir = 'tmp'
# size_range = [15,500]
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_name', help='name of mapable regions file (with .bed removed)', required=True)
parser.add_argument('--genome_GC_frequency',help='folder containing GC counts in the reference sequence (made by generate_reference_files.snakemake)',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
args = parser.parse_args()
bam_file_name = args.bam_file_name
mapable_name=args.mapable_name
genome_GC_frequency = args.genome_GC_frequency
out_dir = args.out_dir
size_range = args.size_range
# In[ ]:
print('arguments provided:')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_name = "'+mapable_name+'"')
print('\tgenome_GC_frequency = "'+genome_GC_frequency+'"')
out_dir = out_dir.rstrip('/')
print('\tout_dir = "'+out_dir+'"')
print('\tsize_range = '+str(size_range))
# In[ ]:
#For now I'm going to keep the smoothing bin size as a set variable
GC_smoothing_step = 20
# In[ ]:
#input is the out_file from the previous step
in_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('in_file:',in_file)
#output is smoothed version
smoothed_out_file = out_dir +'/'+mapable_name+'/GC_bias/'+ bam_file_name+'.GC_bias.txt'
#plot files
plot_file1 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.pdf'
plot_file2 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.summary.pdf'
plot_file3 = out_dir +'/'+mapable_name+'/GC_plots/'+ bam_file_name+'.GC_bias.key_lengths.pdf'
print('out_file:',smoothed_out_file)
sys.stdout.flush()
# In[ ]:
#create output folders if needed
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_plots/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_plots/')
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_bias/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_bias/')
# In[ ]:
#import the GC info from the genome
frequency_prefix = genome_GC_frequency+'/'+mapable_name+'.'
GC_freq = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
biome = pd.read_table('cold/biome.txt', index_col=0, squeeze=True)
mapstats_fr12 = pd.read_table('cold/profiles/freeze12only.mapstats.txt', index_col=0, comment='#').T
computed = pd.read_table('cold/sample.computed.tsv', index_col=0)
input_fr12 = | pd.read_table('../../profiles-all/freeze12only.input-stats.parsed.txt', index_col=0) | pandas.read_table |
from __future__ import print_function, unicode_literals
if __name__ == "__main__":
# Import Matplotlib
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 20, 'figure.figsize': (10, 8)}) # set font and plot size to be larger
# Import pandas as "pd"
import pandas as pd
# Import PyInquirer to use as user input library
from PyInquirer import style_from_dict, Token, prompt, Separator, print_json
# Import plotly for graphs
import plotly.graph_objects as go
round_data = pd.read_csv("data/round_data.csv", index_col="round_name")
popular_armor_data = | pd.read_csv("data/popular_armor_stats.csv", index_col="armor_class") | pandas.read_csv |
import pandas as pd
from bsoclinicaltrials.server.main.strings import normalize
from bsoclinicaltrials.server.main.utils import chunks, get_dois_info
def tag_sponsor(x):
x_normalized = normalize(x)
for f in ['hopit', 'hosp', 'universi', 'chu ', 'ihu ', 'cmc ', '<NAME>', 'pasteur',
'<NAME>', ' national', 'calmettes', 'curie', 'direction centrale', 'société francaise', 'anrs', 'inserm']:
if f in x_normalized:
return 'academique'
return 'industriel'
def enrich(all_ct):
res = []
dois_to_get = []
for ct in all_ct:
enriched = enrich_ct(ct)
references = enriched.get('references', [])
for r in references:
if r.get('doi') and r.get('ReferenceType') in ['result', 'derived']:
dois_to_get.append(r['doi'])
res.append(enriched)
dois_info_dict = {}
for c in chunks(list(set(dois_to_get)), 1000):
dois_info = get_dois_info([{'doi': doi} for doi in c])
for info in dois_info:
doi = info['doi']
dois_info_dict[doi] = info
for p in res:
has_publication_oa = None
publication_access = []
publications_date = []
for r in p['references']:
if r.get('doi'):
doi = r.get('doi')
if doi in dois_info_dict:
r.update(dois_info_dict[doi])
if r.get('ReferenceType') in ['result']:
if isinstance(r.get('published_date'), str):
publications_date.append(r.get('published_date'))
if has_publication_oa is None:
has_publication_oa = False
oa_details = r.get('oa_details', {})
if len(oa_details) == 0:
continue
last_obs_date = max(r.get('observation_dates', []))
for obs_date in r.get('oa_details', {}):
if obs_date == last_obs_date:
oa_detail = oa_details[obs_date]
is_oa = oa_detail.get('is_oa', False)
publication_access.append(is_oa)
has_publication_oa = has_publication_oa or is_oa # at least one publi is oa
if publications_date:
p['first_publication_date'] = min(publications_date)
if isinstance(p.get('results_first_submit_date'), str) and isinstance(p.get('first_publication_date'), str):
p['first_results_or_publication_date'] = min(p['results_first_submit_date'], p['first_publication_date'])
elif isinstance(p.get('results_first_submit_date'), str):
p['first_results_or_publication_date'] = p['results_first_submit_date']
elif isinstance(p.get('first_publication_date'), str):
p['first_results_or_publication_date'] = p['first_publication_date']
if isinstance(p.get('first_results_or_publication_date'), str) and isinstance(p.get('study_completion_date'),
str):
p['delay_first_results_completion'] = (pd.to_datetime(p['first_results_or_publication_date']) - pd.to_datetime(
p['study_completion_date'])).days
p['has_publication_oa'] = has_publication_oa
p['publication_access'] = publication_access
return res
def enrich_ct(ct):
ct['study_start_year'] = None
if isinstance(ct.get('study_start_date'), str):
ct['study_start_year'] = int(ct['study_start_date'][0:4])
ct['study_completion_year'] = None
if isinstance(ct.get('study_completion_date'), str):
ct['study_completion_year'] = int(ct['study_completion_date'][0:4])
if isinstance(ct.get('study_start_date'), str) and isinstance(ct.get('study_first_submit_date'), str):
delay_submission_start = (
pd.to_datetime(ct['study_first_submit_date']) - pd.to_datetime(ct['study_start_date'])).days
ct['delay_submission_start'] = delay_submission_start
if isinstance(ct.get('study_start_date'), str) and isinstance(ct.get('study_first_submit_date'), str) \
and ct['study_start_date'] > ct['study_first_submit_date']:
ct['submission_temporality'] = 'before_start'
elif isinstance(ct.get('study_first_submit_date'), str) and isinstance(ct.get('study_completion_date'), str) \
and ct['study_completion_date'] >= ct['study_first_submit_date']:
ct['submission_temporality'] = 'during_study'
elif isinstance(ct.get('study_first_submit_date'), str) and isinstance(ct.get('study_completion_date'), str) \
and ct['study_completion_date'] < ct['study_first_submit_date']:
ct['submission_temporality'] = 'after_completion'
else:
ct['submission_temporality'] = None
if isinstance(ct.get('study_completion_date'), str) and isinstance(ct.get('study_start_date'), str):
delay_start_completion = (
| pd.to_datetime(ct['study_completion_date']) | pandas.to_datetime |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
# GH28156: to_json not correctly formatting Timedelta
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
)
else:
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
if as_object:
expected = expected.replace("}", ',"a":"a"}')
result = ser.to_json(date_format=date_format)
assert result == expected
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = | pd.DatetimeIndex(tz_range) | pandas.DatetimeIndex |
import copy
import itertools
import multiprocessing
import string
import traceback
import warnings
from multiprocessing import Pool
from operator import itemgetter
import jellyfish as jf
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy.stats import wasserstein_distance
from simod.configuration import Configuration, Metric
from . import alpha_oracle as ao
from .alpha_oracle import Rel
from ..support_utils import progress_bar_async
class SimilarityEvaluator:
"""Evaluates the similarity of two event-logs."""
def __init__(self, log_data: pd.DataFrame, simulation_data: pd.DataFrame, settings: Configuration, max_cases=500,
dtype='log'):
self.dtype = dtype
self.log_data = copy.deepcopy(log_data)
self.simulation_data = copy.deepcopy(simulation_data)
self.max_cases = max_cases
self.one_timestamp = settings.read_options.one_timestamp
self._preprocess_data(dtype)
def _preprocess_data(self, dtype):
preprocessor = self._get_preprocessor(dtype)
return preprocessor()
def _get_preprocessor(self, dtype):
if dtype == 'log':
return self._preprocess_log
elif dtype == 'serie':
return self._preprocess_serie
else:
raise ValueError(dtype)
def _preprocess_log(self):
self.ramp_io_perc = 0.2
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
data = pd.concat([self.log_data, self.simulation_data], axis=0, ignore_index=True)
if (('processing_time' not in data.columns) or ('waiting_time' not in data.columns)):
data = self.calculate_times(data)
data = self.scaling_data(data)
# save data
self.log_data = data[data.source == 'log']
self.simulation_data = data[data.source == 'simulation']
self.alias = self.create_task_alias(data, 'task')
self.alpha_concurrency = ao.AlphaOracle(self.log_data, self.alias, self.one_timestamp, True)
# reformat and sampling data
self.log_data = self.reformat_events(self.log_data.to_dict('records'), 'task')
self.simulation_data = self.reformat_events(self.simulation_data.to_dict('records'), 'task')
num_traces = int(len(self.simulation_data) * self.ramp_io_perc)
self.simulation_data = self.simulation_data[num_traces:-num_traces]
self.log_data = list(map(lambda i: self.log_data[i],
np.random.randint(0, len(self.log_data), len(self.simulation_data))))
def _preprocess_serie(self):
# load data
self.log_data['source'] = 'log'
self.simulation_data['source'] = 'simulation'
def measure_distance(self, metric: Metric, verbose=False):
"""
Measures the distance of two event-logs
with with tsd or dl and mae distance
Returns
-------
distance : float
"""
self.verbose = verbose
# similarity measurement and matching
evaluator = self._get_evaluator(metric)
if metric in [Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
distance = evaluator(self.log_data, self.simulation_data, criteria=metric)
else:
distance = evaluator(self.log_data, self.simulation_data, metric)
self.similarity = {'metric': metric, 'sim_val': np.mean([x['sim_score'] for x in distance])}
def _get_evaluator(self, metric: Metric):
if self.dtype == 'log':
if metric in [Metric.TSD, Metric.DL, Metric.MAE, Metric.DL_MAE]:
return self._evaluate_seq_distance
elif metric is Metric.LOG_MAE:
return self.log_mae_metric
elif metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.log_emd_metric
else:
raise ValueError(metric)
elif self.dtype == 'serie':
if metric in [Metric.HOUR_EMD, Metric.DAY_EMD, Metric.DAY_HOUR_EMD, Metric.CAL_EMD]:
return self.serie_emd_metric
else:
raise ValueError(metric)
else:
raise ValueError(self.dtype)
# =============================================================================
# Timed string distance
# =============================================================================
def _evaluate_seq_distance(self, log_data, simulation_data, metric: Metric):
"""
Timed string distance calculation
Parameters
----------
log_data : Ground truth list
simulation_data : List
Returns
-------
similarity : tsd similarity
"""
similarity = list()
# define the type of processing sequencial or parallel
cases = len(set([x['caseid'] for x in log_data]))
if cases <= self.max_cases:
args = (metric, simulation_data, log_data,
self.alpha_concurrency.oracle,
({'min': 0, 'max': len(simulation_data)},
{'min': 0, 'max': len(log_data)}))
df_matrix = self._compare_traces(args)
else:
cpu_count = multiprocessing.cpu_count()
mx_len = len(log_data)
ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count / 2)))
ranges = list(itertools.product(*[ranges, ranges]))
reps = len(ranges)
pool = Pool(processes=cpu_count)
# Generate
args = [(metric, simulation_data[r[0]['min']:r[0]['max']],
log_data[r[1]['min']:r[1]['max']],
self.alpha_concurrency.oracle,
r) for r in ranges]
p = pool.map_async(self._compare_traces, args)
if self.verbose:
progress_bar_async(p, f'evaluating {metric}:', reps)
pool.close()
# Save results
df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)
df_matrix.sort_values(by=['i', 'j'], inplace=True)
df_matrix = df_matrix.reset_index().set_index(['i', 'j'])
if metric == Metric.DL_MAE:
dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()
mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()
# MAE normalized
max_mae = mae_matrix.max()
mae_matrix = np.divide(mae_matrix, max_mae)
# multiple both matrixes by Beta equal to 0.5
dl_matrix = np.multiply(dl_matrix, 0.5)
mae_matrix = np.multiply(mae_matrix, 0.5)
# add each point in between
cost_matrix = np.add(dl_matrix, mae_matrix)
else:
cost_matrix = df_matrix[['distance']].unstack().to_numpy()
row_ind, col_ind = linear_sum_assignment(np.array(cost_matrix))
# Create response
for idx, idy in zip(row_ind, col_ind):
similarity.append(dict(caseid=simulation_data[idx]['caseid'],
sim_order=simulation_data[idx]['profile'],
log_order=log_data[idy]['profile'],
sim_score=(cost_matrix[idx][idy]
if metric == Metric.MAE else
(1 - (cost_matrix[idx][idy])))
)
)
return similarity
@staticmethod
def _compare_traces(args):
def ae_distance(et_1, et_2, st_1, st_2):
cicle_time_s1 = (et_1 - st_1).total_seconds()
cicle_time_s2 = (et_2 - st_2).total_seconds()
ae = np.abs(cicle_time_s1 - cicle_time_s2)
return ae
def tsd_alpha(s_1, s_2, p_1, p_2, w_1, w_2, alpha_concurrency):
"""
Compute the Damerau-Levenshtein distance between two given
strings (s_1 and s_2)
Parameters
----------
comp_sec : dict
alpha_concurrency : dict
Returns
-------
Float
"""
def calculate_cost(s1_idx, s2_idx):
t_1 = p_1[s1_idx] + w_1[s1_idx]
if t_1 > 0:
b_1 = (p_1[s1_idx] / t_1)
cost = ((b_1 * np.abs(p_2[s2_idx] - p_1[s1_idx])) +
((1 - b_1) * np.abs(w_2[s2_idx] - w_1[s1_idx])))
else:
cost = 0
return cost
dist = {}
lenstr1 = len(s_1)
lenstr2 = len(s_2)
for i in range(-1, lenstr1 + 1):
dist[(i, -1)] = i + 1
for j in range(-1, lenstr2 + 1):
dist[(-1, j)] = j + 1
for i in range(0, lenstr1):
for j in range(0, lenstr2):
if s_1[i] == s_2[j]:
cost = calculate_cost(i, j)
else:
cost = 1
dist[(i, j)] = min(
dist[(i - 1, j)] + 1, # deletion
dist[(i, j - 1)] + 1, # insertion
dist[(i - 1, j - 1)] + cost # substitution
)
if i and j and s_1[i] == s_2[j - 1] and s_1[i - 1] == s_2[j]:
if alpha_concurrency[(s_1[i], s_2[j])] == Rel.PARALLEL:
cost = calculate_cost(i, j - 1)
dist[(i, j)] = min(dist[(i, j)], dist[i - 2, j - 2] + cost) # transposition
return dist[lenstr1 - 1, lenstr2 - 1]
def gen(metric: Metric, serie1, serie2, oracle, r):
"""Reads the simulation results stats"""
try:
df_matrix = list()
for i, s1_ele in enumerate(serie1):
for j, s2_ele in enumerate(serie2):
element = {'i': r[0]['min'] + i, 'j': r[1]['min'] + j}
if metric in [Metric.TSD, Metric.DL, Metric.DL_MAE]:
element['s_1'] = s1_ele['profile']
element['s_2'] = s2_ele['profile']
element['length'] = max(len(s1_ele['profile']), len(s2_ele['profile']))
if metric is Metric.TSD:
element['p_1'] = s1_ele['proc_act_norm']
element['p_2'] = s2_ele['proc_act_norm']
element['w_1'] = s1_ele['wait_act_norm']
element['w_2'] = s2_ele['wait_act_norm']
if metric in [Metric.MAE, Metric.DL_MAE]:
element['et_1'] = s1_ele['end_time']
element['et_2'] = s2_ele['end_time']
element['st_1'] = s1_ele['start_time']
element['st_2'] = s2_ele['start_time']
df_matrix.append(element)
df_matrix = | pd.DataFrame(df_matrix) | pandas.DataFrame |
""" Normalization methods
This script allows the user to perform NMR spesific methods
This script requires that `pandas` and 'numpy' be installed within the Python
environment you are running this script in.
This file can also be imported as a module and contains the following methods:
* Mean normalizastion - Each spectrum is divided by its mean so that its mean becomes 1
* Median normalization - Each spectrum is divided by its mean so that its median becomes 1
* Quantile normalization - Each spectrum is divided by its mean so that its first quartile becomes 1
* Peak normalization Each spectrum divided by the maximum peak in peak range
* PQN normalization - Perform Probabilistic Quotient Normalization
"""
import pandas as pd
import numpy as np
def mean_normalization(spectrum, verbose=False):
"""
Each spectrum is divided by its mean so that its mean becomes 1
:param spectrum: dataframe of the spectrun values index are the cases and columns are the ppm value
:type pandas dataframe or numpy array
:param verbose : if set to True return the factor defaults to false
:type bool
:return: noemalized spectrum
:rtype pandas dataframe or numpy array
"""
# Data initialisation and checks
assert type(verbose) == bool , "verbose must be boolean"
# Normalization
# get the factor to devide the data
factor = spectrum.mean(axis=1)
# if verbos equal to true print the factor
if verbose == True :
print("factors : ", factor)
#create new dataframe to store the data
new_data = pd.DataFrame(index=spectrum.index , columns=spectrum.columns)
for i in range (spectrum.values.shape[0]) :
new_data.iloc[i] =spectrum.iloc[i] / factor.iloc[i]
return new_data
# median normalization
def median_normalization(spectrum, verbose=False):
"""
Each spectrum is divided by its mean so that its median becomes 1
:param spectrum: dataframe of the spectrun values index are the cases and columns are the ppm value
:type pandas dataframe or numpy array
:param verbose : if set to True return the factor defaults to false
:type bol
:return: normalizaed spectrum
:rtype: pandas dataframe or numpy array
"""
# Data initialisation and checks
assert type(verbose) == bool, "verbose must be boolean"
# Normalization
# get the factor to devide the data
factor = spectrum.median(axis=1)
if verbose == True:
print("factors : ", factor)
# create new dataframe to store the data
new_data = | pd.DataFrame(index=spectrum.index, columns=spectrum.columns) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
# Run
transformed = pd.DataFrame({
'a': | pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']) | pandas.to_datetime |
import os
import requests
import time
from datetime import datetime
import pandas as pd
import re
from threading import Thread
def get_home_dir():
cwd = os.getcwd()
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == 'crypto_predict']
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def get_all_currency_pairs(show_mkts=False):
res = requests.get('https://bittrex.com/api/v1.1/public/getmarkets')
if res.json()['success']:
markets = res.json()['result']
market_names = []
for m in markets:
if show_mkts:
print(m['MarketName'])
market_names.append(m['MarketName'])
return market_names
else:
print('error! ', res.json()['message'])
return None
HOME_DIR = get_home_dir()
MARKETS = get_all_currency_pairs()
# just for private stuff, don't run for now
if False:
apikey = os.environ.get('btx_key')
apisecret = os.environ.get('btx_sec')
nonce = time.time()
uri = 'https://bittrex.com/api/v1.1/market/getopenorders?apikey=' + apikey + '&nonce=' + nonce
h = hmac()
sign = h.digest('sha512', uri, apisecret)
r = requests.get(uri, header={'apisign':sign})
def get_all_summaries():
res = requests.get('https://bittrex.com/api/v1.1/public/getmarketsummaries')
if res.json()['success']:
summary = res.json()['result']
return summary
else:
print('error! ', res.json()['message'])
return None
def get_ticker(m):
res = requests.get('https://bittrex.com/api/v1.1/public/getticker?market=' + m)
if res.json()['success']:
t = res.json()['result']
if t is None:
print('error for', m + '!', 'result was None. Message:', res.json()['message'])
return None
return t
else:
print('error for', m + '!', res.json()['message'])
return None
def get_all_tickers():
tickers = []
for m in MARKETS:
res = requests.get('https://bittrex.com/api/v1.1/public/getticker?market=' + m)
if res.json()['success']:
t = res.json()['result']
if t is None:
print('error for', m + '!', 'result was None. Message:', res.json()['message'])
continue
t['MarketName'] = m
tickers.append(t)
else:
print('error for', m + '!', res.json()['message'])
df = | pd.io.json.json_normalize(tickers) | pandas.io.json.json_normalize |
# -*- coding: utf-8 -*-
"""CICID1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q-T0VLplhSabpHZXApgXDZsoW7aG3Hnw
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import time
from sklearn.metrics import accuracy_score
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
#for dirname, _, filenames in os.walk('/content/drive/My Drive/Colab Notebooks/kshield_project/dataset'):
# for filename in filenames:
# print(filename)
#print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
#/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv
pd.set_option('display.float_format', '{:.5f}'.format)
df1=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Morning.pcap_ISCX.csv")
df4=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Monday-WorkingHours.pcap_ISCX.csv")
df5=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df6= | pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv") | pandas.read_csv |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
def preprocess_data(train,test):
y=train['is_screener']
id_test=test['patient_id']
train=train.drop(['patient_id','is_screener'],axis=1)
test=test.drop(['patient_id'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir('/home/cuoco/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
train_ex_file=('../input/train_patients_to_exclude.csv.gz')
train_ex=pd.read_csv(train_ex_file,low_memory=False)
train=train[train.patient_id.isin(train_ex.patient_id)==False]
test_ex_file=('../input/test_patients_to_exclude.csv.gz')
test_ex=pd.read_csv(test_ex_file,low_memory=False)
test=test[test.patient_id.isin(test_ex.patient_id)==False]
print(train.shape,test.shape)
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine= | pd.read_csv('../features/procedure_vaccine.csv.gz') | pandas.read_csv |
"""Utility functions for all of the functions submodule."""
from itertools import chain
import fnmatch
from collections.abc import Callable as dispatch_callable
import re
from typing import Hashable, Iterable, List, Optional, Pattern, Union
import pandas as pd
from janitor.utils import check, _expand_grid
from pandas.api.types import (
union_categoricals,
is_scalar,
is_extension_array_dtype,
is_list_like,
)
import numpy as np
from multipledispatch import dispatch
from janitor.utils import check_column
import functools
def unionize_dataframe_categories(
*dataframes, column_names: Optional[Iterable[pd.CategoricalDtype]] = None
) -> List[pd.DataFrame]:
"""
Given a group of dataframes which contain some categorical columns, for
each categorical column present, find all the possible categories across
all the dataframes which have that column.
Update each dataframes' corresponding column with a new categorical object
that contains the original data
but has labels for all the possible categories from all dataframes.
This is useful when concatenating a list of dataframes which all have the
same categorical columns into one dataframe.
If, for a given categorical column, all input dataframes do not have at
least one instance of all the possible categories,
Pandas will change the output dtype of that column from `category` to
`object`, losing out on dramatic speed gains you get from the former
format.
Usage example for concatenation of categorical column-containing
dataframes:
Instead of:
```python
concatenated_df = pd.concat([df1, df2, df3], ignore_index=True)
```
which in your case has resulted in `category` -> `object` conversion,
use:
```python
unionized_dataframes = unionize_dataframe_categories(df1, df2, df2)
concatenated_df = pd.concat(unionized_dataframes, ignore_index=True)
```
:param dataframes: The dataframes you wish to unionize the categorical
objects for.
:param column_names: If supplied, only unionize this subset of columns.
:returns: A list of the category-unioned dataframes in the same order they
were provided.
:raises TypeError: If any of the inputs are not pandas DataFrames.
"""
if any(not isinstance(df, pd.DataFrame) for df in dataframes):
raise TypeError("Inputs must all be dataframes.")
if column_names is None:
# Find all columns across all dataframes that are categorical
column_names = set()
for dataframe in dataframes:
column_names = column_names.union(
[
column_name
for column_name in dataframe.columns
if isinstance(
dataframe[column_name].dtype, pd.CategoricalDtype
)
]
)
else:
column_names = [column_names]
# For each categorical column, find all possible values across the DFs
category_unions = {
column_name: union_categoricals(
[df[column_name] for df in dataframes if column_name in df.columns]
)
for column_name in column_names
}
# Make a shallow copy of all DFs and modify the categorical columns
# such that they can encode the union of all possible categories for each.
refactored_dfs = []
for df in dataframes:
df = df.copy(deep=False)
for column_name, categorical in category_unions.items():
if column_name in df.columns:
df[column_name] = pd.Categorical(
df[column_name], categories=categorical.categories
)
refactored_dfs.append(df)
return refactored_dfs
def patterns(regex_pattern: Union[str, Pattern]) -> Pattern:
"""
This function converts a string into a compiled regular expression;
it can be used to select columns in the index or columns_names
arguments of `pivot_longer` function.
:param regex_pattern: string to be converted to compiled regular
expression.
:returns: A compile regular expression from provided
`regex_pattern`.
"""
check("regular expression", regex_pattern, [str, Pattern])
return re.compile(regex_pattern)
def _computations_expand_grid(others: dict) -> pd.DataFrame:
"""
Creates a cartesian product of all the inputs in `others`.
Combines NumPy's `mgrid`, with the `take` method in NumPy/pandas
to expand each input to the length of the cumulative product of
all inputs in `others`.
There is a performance penalty for small entries
(lenght less than 10)
in using this method, instead of `itertools.product`;
however, there are significant performance benefits
as the size of the data increases.
Another benefit of this approach, in addition to the significant
performance gains, is the preservation of data types.
This is particularly relevant for pandas' extension arrays `dtypes`
(categoricals, nullable integers, ...).
A DataFrame of all possible combinations is returned.
"""
for key in others:
check("key", key, [Hashable])
grid = {}
for key, value in others.items():
if is_scalar(value):
value = pd.DataFrame([value])
elif (not isinstance(value, pd.Series)) and is_extension_array_dtype(
value
):
value = | pd.DataFrame(value) | pandas.DataFrame |
from google.protobuf.symbol_database import Default
import nltk
import random
import pickle
from nltk.corpus.reader.chasen import test
from pandas.core.indexes import period
from statsmodels.tsa.seasonal import _extrapolate_trend
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from numpy.lib.function_base import append, select
lemmatizer = WordNetLemmatizer()
import pandas as pd
import yfinance as yf
import streamlit as st
import statsmodels.api as sm
import datetime as dt
import plotly.graph_objects as go
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import requests
import json
import numpy as np
from keras.models import load_model
from bs4 import BeautifulSoup
import csv
from requests.exceptions import ConnectionError
words=pickle.load(open('words.pkl','rb'))
classes=pickle.load(open('classes.pkl','rb'))
model = load_model("stock_model.h5")
intents=json.loads(open('training.json').read())
def calcMovingAverage(data, size):
df = data.copy()
df['sma'] = df['Adj Close'].rolling(size).mean()
df['ema'] = df['Adj Close'].ewm(span=size, min_periods=size).mean()
df.dropna(inplace=True)
return df
def calc_macd(data):
df = data.copy()
df['ema12'] = df['Adj Close'].ewm(span=12, min_periods=12).mean()
df['ema26'] = df['Adj Close'].ewm(span=26, min_periods=26).mean()
df['macd'] = df['ema12'] - df['ema26']
df['signal'] = df['macd'].ewm(span=9, min_periods=9).mean()
df.dropna(inplace=True)
return df
def calcBollinger(data, size):
df = data.copy()
df["sma"] = df['Adj Close'].rolling(size).mean()
df["bolu"] = df["sma"] + 2*df['Adj Close'].rolling(size).std(ddof=0)
df["bold"] = df["sma"] - 2*df['Adj Close'].rolling(size).std(ddof=0)
df["width"] = df["bolu"] - df["bold"]
df.dropna(inplace=True)
return df
def graphMyStock(finalvar,a,b,col):
stock2 = yf.Ticker(finalvar)
info2=stock2.info
ln2=info2['longName']
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(finalvar,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ finalvar+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col)
)
)
fig2.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def graphAllStocks(stocka,stockb,stockc,a,b,col1,col2,col3):
stock2 = yf.Ticker(stocka)
info2=stock2.info
ln2=info2['longName']
st.write('')
st.subheader('**Graph of optimal stocks:** ')
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(stocka,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ stocka+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col1)
)
)
livedata2=yf.download(stockb,start2,end2)
df_ma2= calcMovingAverage(livedata2, windowSizeMAb)
df_ma2= df_ma2.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ stockb+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col2)
))
livedata3=yf.download(stockc,start2,end2)
df_ma3= calcMovingAverage(livedata3, windowSizeMAb)
df_ma3= df_ma3.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma3['Date'],
y=df_ma3['Adj Close'],
name = '('+ stockc+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col3)
))
fig2.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def RootWordGen(lw):
j=nltk.word_tokenize(lw)
j= [lemmatizer.lemmatize(word.lower()) for word in j]
return(j)
def matrix(sentence, words, show_details=True):
sentence_words= RootWordGen(sentence)
# sentence_words is bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
#matrix contains number of elements = vocabulary, preset value=0
for s in sentence_words:
#traverses root words
for i,w in enumerate(words):
#i is roll no/dir no
#w is unique word
#makes directory, gives a 'roll no' to each word. If 'cramping' is entered, directory till cramping prints along w roll number, then matrix with 0s other than one 1 (one being element number=roll no of cramping)
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
#will give name of bag of unique base word the entered word is found in
print ("found in bag: %s" % w)
#removes commas from list, returns matrix
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold probability
pred= matrix(sentence, words,show_details=False)
res = model.predict(np.array([pred]))[0]
ERROR_THRESHOLD = 0.25
global results
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
global results1
results1 = [[i,r] for i,r in enumerate(res)]
print(results)
#for guesses above threshold
#f=open('r.txt','w')
#for all guesses
#f1=open('s.txt','w')
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
results1.sort(key=lambda x: x[1], reverse=True)
pr=results1[0]
global pp
pp=pr[1]
print(pp)
global return_list
return_list = []
global return_list1
return_list1=[]
for r in results1:
return_list1.append({"intent": classes[r[0]], "probability": str(r[1])})
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
#for x in return_list1:
# f1.write(str(x))
#for x in return_list:
#print(x)
#f.write(str(x))
return return_list[0]
def getResponse(ints, intents_json):
global tag
tag = ints[0]['intent']
print(tag)
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def FinalPrediction(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
stockdata = pd.read_csv("SP500.csv")
symbols = stockdata['Symbol'].sort_values().tolist()
st.title('Investment Optimizer and Stock Growth Predictor')
#We'll add this when we come up with something
expander=st.beta_expander(label='',expanded=False)
expander.write("This application aims at evaluating stock trends and current news to predict it's future growth. It provides a clean and efficient user interface to view current prices and fluctuation history. It also provides a tool to identify an ideal combination of stocks that one should invest in based on the given budget, using our machine learning and optimization algorithm. We have named our ML model 'ATHENA', which stands for Algorithmic Enhancer")
st.write("")
st.write("")
st.write('**Would you like to know where to invest or understand each Stock?**')
a=st.radio("", ("Invest", "Understand"))
if(a=="Invest"):
budget=st.sidebar.number_input("Enter your budget ($): ")
if(st.sidebar.button("Enter")):
st.header("")
st.header("**Following is the combination of stocks you should invest in: ** ")
st.write("")
st.write('Processing...')
invest=[]
invstock_sym=[]
invstock_name=[]
f= open("SP500.csv",'r')
rd=csv.reader(f)
for x in rd:
if x!=[]:
if x[2]=='badboy':
invstock_sym.append(x[0])
invstock_name.append(x[1])
invstock_price=[]
for ticker in invstock_sym:
ticker_yahoo = yf.Ticker(ticker)
data = ticker_yahoo.history()
last_quote = (data.tail(1)['Close'].iloc[0])
invstock_price.append(float(last_quote))
invstock_conf=[]
st.markdown("""
<style>
.stProgress .st-bo {
background-color: green;
}
</style>
""", unsafe_allow_html=True)
my_bar=st.progress(0)
progresscount=10
for badgirl in invstock_name:
checkerb=0
try:
send="https://www.google.com/search?q=should+you+invest+in+ "+badgirl.lower()+" stock"
res=requests.get(send)
except ReadTimeout:
checkerb=checkerb+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkerb=checkerb+1
else:
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
count=0
for i in soup.select("a"):
if count==1:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
count+=1
list1=[]
c=0
for i in all_links:
if c==1:
break
option=requests.get(i)
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select("p")
for j in pageinfo:
m=j.text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
find=predict_class(tex,model)
varun=[]
varun.append(float(find['probability']))
varun.append(find['intent'])
invstock_conf.append(varun)
progresscount=progresscount+10
my_bar.progress(progresscount)
stocks={}
for i in range(len(invstock_name)):
temp=[]
if invstock_conf[i][1]=='up':
temp.append(invstock_conf[i][0])
temp.append(invstock_price[i])
temp.append(invstock_name[i])
temp.append(invstock_sym[i])
length= len(stocks)
stocks[length]=temp
###### NEED TO GET "STOCKS" DICTIONARY DATA FROM ########
all_stocks={}
for i in range(len(stocks)):
if((budget >= stocks[i][1]) and (stocks[i][0]>0.5)):
n=len(all_stocks)
all_stocks[n]=[stocks[i][0], stocks[i][1], stocks[i][2], stocks[i][3]]
if len(all_stocks)>=3:
st.balloons()
quad1={}
quad2={}
quad3={}
quad4={}
for i in range(len(all_stocks)):
if((all_stocks[i][0]>=0.8) and (all_stocks[i][1]<=100)):
quad1[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]>=0.8) and (all_stocks[i][1]>100)):
quad2[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]<0.8) and (all_stocks[i][1]<=100)):
quad3[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
else:
quad4[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
def inputs(quad):
global invest
spq=[]
for i in quad:
spq.append(quad[i][1])
length=len(spq)
for i in range(length):
if(len(invest)==3):
break
minval=min(spq)
for i in quad:
if(quad[i][1]==minval):
invest.append(quad[i])
spq.remove(minval)
inputs(quad1)
if(len(invest)<3):
inputs(quad2)
if(len(invest)<3):
inputs(quad3)
if(len(invest)<3):
inputs(quad4)
#stock1 should get 60%
#stock2 should get 30%
#stock3 should get 10%
s1=budget*0.6
s2=budget*0.3
s3=budget*0.1
n_s1=s1//invest[0][1]
n_s2=s2//invest[1][1]
n_s3=s3//invest[2][1]
left=budget-invest[0][1]*n_s1-invest[1][1]*n_s2-invest[2][1]*n_s3
invest_val=[]
for i in range(3):
invest_val.append(invest[i][1])
a_s1=0
a_s2=0
a_s3=0
a_s3=left//invest[2][1]
left=left-a_s3*invest[2][1]
a_s2=left//invest[1][1]
left=left-a_s2*invest[1][1]
a_s1=left//invest[0][1]
left=left-a_s1*invest[0][1]
t_s1=n_s1+a_s1
t_s2=n_s2+a_s2
t_s3=n_s3+a_s3
st.write("")
st.subheader('**Summary:** ')
summary_table={}
names=[]
prices=[]
nstocks=[]
totalcosts=[]
confidences=[]
for i in range(len(invest)):
names.append(invest[i][2])
prices.append(invest[i][1])
if(i==0):
nstocks.append(t_s1)
tcost=t_s1*invest[i][1]
totalcosts.append(tcost)
if(i==1):
nstocks.append(t_s2)
tcost=t_s2*invest[i][1]
totalcosts.append(tcost)
if(i==2):
nstocks.append(t_s3)
tcost=t_s3*invest[i][1]
totalcosts.append(tcost)
confidences.append(invest[i][0])
summary_table["Stock Name"]=names
summary_table["Cost per Stock"]=prices
summary_table["Number to Purchase"]=nstocks
summary_table["Total Cost"]=totalcosts
summary_table["Our Confidence"]=confidences
column_order=["Stock Name", "Cost per Stock", "Number to Purchase", "Total Cost", "Our Confidence"]
summary_df=pd.DataFrame(data=summary_table)
st.dataframe(summary_df)
st.write("")
bala='**Your balance:** '+ '_$' + str(left) +'_'
st.write(bala)
graphAllStocks(invest[0][3],invest[1][3],invest[2][3],14,15,'royalblue','springgreen','indianred')
st.header('**In depth review:** ')
st.write('')
text1='Your first stock: ' + '_' + str(invest[0][2]) + '_'
st.header(text1)
graphMyStock(invest[0][3],1,2,'royalblue')
text1a='**Price:** '+ '_$'+ str(invest[0][1]) + '_'
st.write(text1a)
text1b='**Number of stocks you should buy:** '+ '_' + str(t_s1) + '_'
st.write(text1b)
text1c="**Athena's confidence: **"+'_'+ str(100*invest[0][0])+'%' + '_'
st.write(text1c)
st.write('')
st.write('')
text2='Your second stock: ' +'_'+ str(invest[1][2])+ '_'
st.header(text2)
graphMyStock(invest[1][3],3,4,'springgreen')
text2a='**Price:** '+ '_$'+ str(invest[1][1])+ '_'
st.write(text2a)
text2b='**Number of stocks you should buy:** '+'_'+ str(t_s2)+ '_'
st.write(text2b)
text2c="**Athena's confidence:** "+'_'+ str(100*invest[1][0]) + '%'+'_'
st.write(text2c)
st.write('')
st.write('')
text3= 'Your third stock: '+'_'+ str(invest[2][2])+ '_'
st.header(text3)
graphMyStock(invest[2][3],5,6,'indianred')
text3a='**Price:** '+ '_$'+ str(invest[2][1])+ '_'
st.write(text3a)
text3b='**Number of stocks you should buy: **'+'_'+ str(t_s3)+'_'
st.write(text3b)
text3c="**Athena's confidence: **"+'_'+ str(100*invest[2][0]) + '%'+'_'
st.write(text3c)
st.write('')
st.write('')
st.header("")
st.header("")
st.write("Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.")
else:
st.write('Budget too low to diversify')
if a=='Understand':
ticker = st.sidebar.selectbox(
'Choose a Stock',symbols)
stock = yf.Ticker(ticker)
info=stock.info
ln=info['longName']
st.title(info['longName'])
st.title(ticker)
opt1, opt2 = st.beta_columns(2)
with opt1:
numYearMA = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=0)
with opt2:
windowSizeMA = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=1)
start = dt.datetime.today()-dt.timedelta(numYearMA * 365)
end = dt.datetime.today()
livedata = yf.download(ticker,start,end)
df_ma = calcMovingAverage(livedata, windowSizeMA)
df_ma = df_ma.reset_index()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['Adj Close'],
name = '('+ ticker+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='royalblue')
)
)
compstock2=st.selectbox('Choose stock to compare with: ', symbols)
st.info("If you don't wish to compare, select the same stock again")
livedata2=yf.download(compstock2,start,end)
df_ma2= calcMovingAverage(livedata2, windowSizeMA)
df_ma2= df_ma2.reset_index()
fig.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ compstock2+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='firebrick')
))
fig.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig.update_layout(legend_title_text='Trend')
fig.update_yaxes(tickprefix="$")
st.plotly_chart(fig, use_container_width=True)
livedata3 = yf.download(ticker,start,end)
df_ma3 = calcMovingAverage(livedata3, windowSizeMA)
df_ma3 = df_ma.reset_index()
train_data, test_data = df_ma3[0:int(len(df_ma3)*0.7)], df_ma3[int(len(df_ma3)*0.7):]
training_data = train_data['Adj Close'].values
test_data = test_data['Adj Close'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
abcd=0
for time_point in range(N_test_observations):
model = ARIMA(history, order=(4,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat[0])
true_test_value = test_data[time_point]
history.append(true_test_value)
abcd=abcd+1
af=time_point
MSE_error = mean_squared_error(test_data, model_predictions)
test_set_range = df_ma3[int(len(df_ma3)*0.7):]
dts=df_ma3.loc[:,['Date']]
new = pd.date_range(test_set_range.Date.iloc[-1], periods=30)
df1 = | pd.DataFrame(new[1:], columns=['Date']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import tableauhyperapi as tab_api
import pantab
def test_basic(df, tmp_hyper, table_name, table_mode):
# Write twice; depending on mode this should either overwrite or duplicate entries
pantab.frame_to_hyper(df, tmp_hyper, table=table_name, table_mode=table_mode)
pantab.frame_to_hyper(df, tmp_hyper, table=table_name, table_mode=table_mode)
result = pantab.frame_from_hyper(tmp_hyper, table=table_name)
expected = df.copy()
if table_mode == "a":
expected = pd.concat([expected, expected]).reset_index(drop=True)
expected["float32"] = expected["float32"].astype(np.float64)
tm.assert_frame_equal(result, expected)
def test_missing_data(tmp_hyper, table_name, table_mode):
df = pd.DataFrame([[np.nan], [1]], columns=list("a"))
df["b"] = pd.Series([None, np.nan], dtype=object) # no inference
df["c"] = | pd.Series([np.nan, "c"]) | pandas.Series |
import pandas as pd
import numpy as np
import random
import logging
import cv2
import sys
class DataProcessor:
@staticmethod
def GetSizeDataFromDataFrame(dataset):
h = int(dataset['h'].values[0])
w = int(dataset['w'].values[0])
c = int(dataset['c'].values[0])
return h, w, c
@staticmethod
def CreateSizeDataFrame(h, w, c):
sizes_df = pd.DataFrame({'c': c, 'w': w, 'h': h}, index=[0])
return sizes_df
@staticmethod
def ProcessTrainData(trainPath):
"""Reads the .pickle file and converts it into a format suitable fot training
Parameters
----------
trainPath : str
The file location of the .pickl
Returns
-------
list
list of video frames and list of labels (poses)
"""
train_set = | pd.read_pickle(trainPath) | pandas.read_pickle |
# -*- coding: utf-8 -*-
import os
import pandas as pd
pd.options.display.max_rows=10
import numpy as np
from indicators import indicators_for_dirverse,indicators_for_crosses
from dateutil.parser import parse
import talib
import sys
from scipy import interpolate
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf-8')
CLOSE_SAMPLE=[ 24.76, 24.28, 25.21, 26.25, 28.88, 28.99, 28.35, 31.19,34.31, 36.49, 35.67, 32.1 , 32.18, 31.7 , 30.8 , 30.77,29.77, 27.7 , 28.76]
LOW_SAMPLE=[ 24.2 , 24.01, 23.41, 24. , 26.37, 27.25, 27.4 , 31.19,33.4 , 33.4 , 35.08, 32.1 , 30.7 , 31.01, 30.27, 30.5 ,29.45, 27.6 , 27.7 ]
GBKM_SAMPLE=[ 75.27683505, 74.16337925, 74.90652869, 77.40264178,81.75542302, 86.66794839, 88.29240889, 86.10675256,84.7067632 , 87.00756837, 90.50308921, 89.76234594,82.57561793, 71.43528003, 59.91510841, 50.53179488,43.08981872, 36.17388661, 29.83802615]
CLOSE_SAMPLE2=[ 20.33, 21.05, 21.49, 20.29, 22.32, 24.55, 27.01, 29.71,32.68, 31.77, 34.95, 38.45, 42.3 , 46.53, 51.18, 50. ,47.5 , 48. , 47. , 42.41, 43.68, 48.05]
LOW_SAMPLE2=[ 19.99, 20.25, 20.68, 20.29, 19.78, 22.81, 25. , 28.36,30.45, 30.7 , 31.18, 35.52, 41.1 , 46.53, 47.65, 46.63,45.5 , 46. , 45.5 , 42.3 , 41.5 , 43.18]
GBKM_SAMPLE2=[ 93.71592611, 91.21636003, 87.46623061, 83.41955066,80.66983087, 81.01571395, 84.73545107, 90.40899863,95.05322187, 96.89845728, 96.5647677 , 95.76976925,96.00042368, 97.37205819, 98.6860291 , 99.1305236 ,98.05462598, 94.43946125, 88.22010362, 79.89313723,70.47144951, 62.78129296]
def indicators_for_dirverse(df):
df=df.dropna()
df=df.sort_index()
df['MACD'],df['MACD_signal'],df['MACD_hist']=talib.MACD(np.array(df['close']))
df['var1']=(2*df['close']+df['high']+df['low']+df['open'])/5.0
df['var2']=talib.MIN(np.array(df['low']),timeperiod=34)
df['var3']=talib.MAX(np.array(df['low']),timeperiod=34)
df['buffer']=(df['var1']-df['var2'])/(df['var3']-df['var2'])*100
df['SK']=talib.EMA(np.array(df['buffer']),timeperiod=13)
df['SD']=talib.EMA(np.array(df['SK']),timeperiod=3)
df['MACD_MIN']=talib.MIN(np.array(df['MACD']),timeperiod=9)
df['PRICE_MIN']=talib.MIN(np.array(df.close),9)
df['PRICE_MAX']=talib.MAX(np.array(df.close),9)
df['RSI'] = talib.RSI(np.array(df.close))
df=df.sort_index(ascending=False)
df=df.dropna()
return df
def indicators_for_crosses(df):
df=df.dropna()
df=df.sort_index()
df['MA5']=talib.MA(np.array(df['close']),5)
df['MA10']=talib.MA(np.array(df['close']),10)
df['MA20']=talib.MA(np.array(df['close']),20)
df['LONG_ARR']=(df['MA5']>df['MA10'])&(df['MA10']>df['MA20'])
df['SHORT_ARR']=(df['MA5']<df['MA10'])&(df['MA10']<df['MA20'])
df['PRICE_MAX']=talib.MAX(np.array(df.close),3)
df=df.sort_index(ascending=False)
df=df.dropna()
return df
def diverse_strategy_buy(df): #策略
'''
PRICE_MIN_9,MACD,MIN_MACD
'''
df['price_gorge']=df['PRICE_MIN']!=df['PRICE_MIN'].shift(-1)
df['MACD_gorge']=df['MACD']>df['MACD_MIN'].shift(-1)
df['SDSKlt20']=(df['SK']<20)&(df['SD']<20)
df['buy_signal']=df['SDSKlt20']&df['MACD_gorge']&df['price_gorge']
df=df.dropna()
return df
def diverse_strategy_sell(df):
'''背离'''
df['price_peak']=df['PRICE_MAX']!=df['PRICE_MAX'].shift(-1)
df['MACD_peak']=df['MACD']>df['MACD_MIN'].shift(-1)
df['SDSKgt80']=(df['SK']>80)&(df['SD']>80)
#df['quick_sell']=(df['ma5']<df['ma20'])&(df['ma5'].shift(-1)>df['ma20'].shift(-1))
#df['LossLimit']=df['close']<df['PRICE_MAX']*0.92
df['sell_signal']=(df['SDSKgt80']&df['MACD_peak']&df['price_peak'])#|df['LossLimit']#|df['quick_sell']
df=df.dropna()
return df
def golden_cross(df):
df=indicators_for_crosses(df)
df['buy_signal']=df['LONG_ARR']&(df['SHORT_ARR'].shift(-4))
df=df.dropna()
return df
def dead_cross(df):
df=indicators_for_crosses(df)
df['sell_signal']=df['SHORT_ARR']&(df['LONG_ARR'].shift(-4))
df=df.dropna()
return df
def return_similarity(va,vb,ignore_start=True):
'''regardless of where you start'''
va=np.array(va)
vb=np.array(vb)
lena=len(va)
lenb=len(vb)
if lena!=lenb:
if lena>lenb:
sarr=vb
larr=va
if lena<lenb:
sarr=va
larr=vb
xs=np.array(np.linspace(1,len(sarr),len(sarr)))
xl=np.array(np.linspace(1,len(sarr),len(larr)))
f = interpolate.interp1d(xs, sarr)
va = f(xl)
vb = larr
if ignore_start:
va=va-va[0]
vb=vb-vb[0]
num=float(va.T.dot(vb))
denom=np.linalg.norm(va)*np.linalg.norm(vb)
an_cos=num/denom
an_sin=0.5+0.5*an_cos
#越接近1,相似度越高
return an_sin
def rs(arr,*args,**kwargs):
arr=list(arr)
results=[]
for sample in args:
results.append(return_similarity(arr,sample,ignore_start=True))
result=np.mean(results)
return result
def rs2(arr,*args,**kwargs):
arr=list(arr)
results=[]
for sample in args:
results.append(return_similarity(arr,sample,ignore_start=False))
result=np.mean(results)
return result
def stra_simi(df,idx,ignore_start=True,*args,**kwargs):
'''
idx:column name youwant to compare with
args should be passed into samples of list or array type
kwargs' keys got 'name'...
'''
if not args:
return
bucket=[]
for sample in args:
bucket.append(df[idx].rolling(center=False,window=len(sample)).apply(func=rs,args=args))
srs= | pd.concat(bucket,axis=1) | pandas.concat |
import sys
import pandas as pd
from sqlalchemy import create_engine
from sklearn.metrics import classification_report
import pickle
import nltk
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'omw-1.4'])
def tokenize(text):
"""Tokenise text with lemmatizer and case normalisation.
Args:
text (str): text required to be tokenized
Returns:
list: tokenised list of strings
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""Verb extractor using part of speech.
Args:
BaseEstimator (class): Base class for all estimators in scikit-learn.
TransformerMixin (class): class for all transformers in scikit-learn.
"""
def starting_verb(self, text):
"""Perform starting verb feature creation.
Args:
text (str): text that requires analysis
Returns:
binary: returns 1 if starting verb found or 0 if not.
"""
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return 1
return 0
def fit(self, x, y=None):
"""Fit class for sklearn.
Args:
x (df): dataframe to perform class on
y (df, optional): dataframe to perform class on. Defaults to None.
Returns:
self: class handling
"""
return self
def transform(self, X):
"""Transform Class for sklearn.
Args:
X (dataframe): dataframe that requires treatment
Returns:
dataframe: dataframe that has the starting verb performed on.
"""
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def load_data(database_filepath):
"""Load data from disaster management db.
Args:
database_filepath (str): filepath to dataframe
Returns:
dataframe, dataframe, str: the two dataframes returned and categories
"""
engine = create_engine(f'sqlite:///{database_filepath}')
df = | pd.read_sql(database_filepath, con=engine) | pandas.read_sql |
import datetime
from collections.abc import Iterable, Mapping, MutableMapping
from numbers import Number
import pandas as pd
from intervaltree import Interval, IntervalTree
from ..diffs import Deletion, Diff, Edit, Insertion
from ..utils import equal
from .base import BaseTree
from .labels import LabelTree
class BaseIntervalTree(BaseTree):
_tree: IntervalTree
@staticmethod
def compatible_keys(keys):
for key in keys:
if not isinstance(key, tuple):
return False
if not len(key) == 2:
return False
if not all([isinstance(x, (int, datetime.datetime)) for x in key]):
return False
return True
@classmethod
def from_dict(cls, d):
ivs = [Interval(*k, v) for k, v in d.items()]
return cls(IntervalTree(ivs))
@classmethod
def from_label_dict(cls, d):
ivs = [Interval(*cls.label_to_key(k), v) for k, v in d.items()]
return cls(IntervalTree(ivs))
def add_group(self, name, group):
self[name] = group
@staticmethod
def filter_keys(pattern):
raise NotImplementedError
@staticmethod
def key_to_label(key):
raise NotImplementedError
@staticmethod
def label_to_key(label):
raise NotImplementedError
@staticmethod
def _validate_itype(begin, end):
raise NotImplementedError
def to_label_dict(self):
return {
self.key_to_label((iv.begin, iv.end)): iv.data
for iv in sorted(self._tree)
}
def to_dict(self):
return {(iv.begin, iv.end): iv.data for iv in sorted(self._tree)}
def __init__(self, tree=None, *args, **kwargs):
if tree is None:
tree = IntervalTree()
elif isinstance(tree, IntervalTree):
tree = tree
elif isinstance(tree, BaseIntervalTree):
tree = tree._tree
else:
raise TypeError("tree must be an instance of IntervalTree.")
self._tree = tree
def __getitem__(self, key):
if isinstance(key, str):
key = self.label_to_key(key)
if isinstance(key, Number):
return self.value(key)
elif isinstance(key, tuple) and len(key) == 2:
return self.overlap_content(*key)
elif isinstance(key, Iterable):
return self.values_at(key)
elif isinstance(key, slice):
start = key.start or self.start
stop = key.stop or self.end
if key.step is None:
return self.overlap(key.start, key.stop)
else:
return self.values_at(range(start, stop, key.step))
raise KeyError('No overlapping data found.')
@property
def start(self):
return self._tree.begin()
@property
def end(self):
return self._tree.end()
def __setitem__(self, key, value):
if isinstance(key, str):
key = self.label_to_key(key)
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
elif isinstance(key, tuple):
if len(key) == 2:
start, stop = key
step = None
elif len(key) == 3:
start, stop, step = key
else:
raise ValueError("Setting intervals with tuple must be \
of form (start, end) or (start, end, step)")
else:
raise TypeError(
"Wrong type. Setting intervals can only be done using a \
slice or tuple of (start, end) or (start, end, step)"
)
if start is None:
start = self.start
if stop is None:
stop = self.end
if step is None:
self.set_interval(start, stop, value)
else:
indices = list(range(start, stop, step))
for begin, end, val in zip(indices[:-1], indices[1:], value):
self.set_interval(begin, end, val)
def __delitem__(self, key):
if isinstance(key, str):
key = self.label_to_key(key)
elif isinstance(key, tuple) and len(key) == 2:
self._tree.chop(*key)
elif isinstance(key, slice):
self._tree.chop(key.start, key.end)
else:
raise TypeError("Must pass a tuple of (begin,end) or slice.")
def keys(self):
for iv in sorted(self._tree):
yield iv.begin, iv.end
def labels(self):
return map(self.key_to_label, self.keys())
def items(self):
for iv in sorted(self._tree):
yield (iv.begin, iv.end), iv.data
def values(self):
for iv in sorted(self._tree):
yield iv.data
def __iter__(self):
return self.keys()
def __len__(self):
return len(self._tree)
def __bool__(self):
return bool(len(self._tree))
def __contains__(self, key):
return bool(self[key])
def __getstate__(self):
return tuple(sorted([tuple(iv) for iv in self._tree]))
def __setstate__(self, d):
ivs = [Interval(*iv) for iv in d]
self._tree = IntervalTree(ivs)
def overlap(self, begin, end):
begin, end = self._validate_itype(begin, end)
hits = sorted(self._tree.overlap(begin, end))
return [
Interval(max(iv.begin, begin), min(iv.end, end), iv.data)
for iv in hits
]
def overlap_content(self, begin, end):
hits = sorted(self._tree.overlap(begin, end))
if len(hits) == 1:
return hits[0].data
return [hit.data for hit in hits]
def value(self, index):
index, = self._validate_itype(index)
hits = sorted(self._tree.at(index))
if not hits:
raise KeyError(f'No data overlapps {index}')
if len(hits) == 1:
return hits[0].data
return hits
def values_at(self, indices):
indices = self._validate_itype(*indices)
return [self.value(i) for i in indices]
def set_interval(self, begin, end, value):
begin, end = self._validate_itype(begin, end)
self._tree.chop(begin, end)
self._tree.addi(begin, end, value)
def to_df(self, title="tree"):
import pandas as pd
ivs = []
for (begin, end), data in self.items():
if isinstance(data, BaseTree):
data = float("nan")
interval = {
"label": f"{begin}-{end}",
"begin": begin,
"parameter": title,
"mid": (begin + end) / 2,
"end": end,
"data": data
}
ivs.append(interval)
return pd.DataFrame(ivs)
def to_native(self):
ivs = []
for (begin, end), data in self.items():
if isinstance(data, BaseTree):
iv = Interval(begin, end, data.to_native())
else:
iv = Interval(begin, end, data)
ivs.append(iv)
return IntervalTree(ivs)
def explorer(self, title="tree"):
import panel as pn
pn.extension()
from ..visualizations import IntervalTreeExplorer
return IntervalTreeExplorer(tree=self, label=title)
def diff(self, other):
if not isinstance(other, self.__class__):
return Edit(old=self, new=other)
if self == other:
return self.__class__()
u = self._tree.union(other._tree)
u.split_overlaps()
u.merge_equals()
diffs = self.__class__()
for iv in u:
k = iv.begin, iv.end
if k not in self:
diffs[k] = Insertion(new=other[k])
elif k not in other:
diffs[k] = Deletion(old=self[k])
elif isinstance(self[k], BaseTree) and isinstance(
other[k], BaseTree):
d = self[k].diff(other[k])
if len(d):
diffs[k] = d
elif not equal(self[k], other[k]):
diffs[k] = Edit(old=self[k], new=other[k])
return diffs
class IntIntervalTree(BaseIntervalTree):
def filter_keys(self, pattern):
if isinstance(pattern, tuple):
begin, end = pattern
elif isinstance(pattern, slice):
begin, end = pattern.start, pattern.stop
else:
raise TypeError(
'pattern must be an interval defined by tuple or slice.')
return [(iv.begin, iv.end) for iv in self.overlap(begin, end)]
@staticmethod
def key_to_label(key):
return f"{key[0]}-{key[1]}"
@staticmethod
def label_to_key(label):
return tuple(map(int, label.split("-")))
def to_label_dict(self):
return {f"{iv.begin}-{iv.end}": iv.data for iv in sorted(self._tree)}
def _validate_itype(self, *args):
return tuple(int(arg) for arg in args)
class TimeIntervalTree(BaseIntervalTree):
unit: str
def __init__(self, *args, unit='s', **kwargs):
self.unit = unit
super().__init__(*args, **kwargs)
def filter_keys(self, pattern):
if isinstance(pattern, tuple):
begin, end = pattern
elif isinstance(pattern, slice):
begin, end = pattern.start, pattern.stop
else:
raise TypeError(
'pattern must be an interval defined by tuple or slice.')
return [(iv.begin, iv.end) for iv in self.overlap(begin, end)]
@staticmethod
def key_to_label(key):
return f"({key[0]})-({key[1]})"
@staticmethod
def label_to_key(label):
return tuple(map(pd.to_datetime, label.strip('()').split(")-(")))
def _validate_itype(self, *args):
return tuple( | pd.to_datetime(arg, unit=self.unit) | pandas.to_datetime |
# # rfe Selection
# from pathlib import path
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFECV
import typer
import datetime
import os
import shutil
from globals import DATA_MODELLING_FOLDER
def identify_top_features(X, y, estimator, step = 10, n_feat_select = 30):
ranks = pd.DataFrame(index = X.columns)
for i in range(10):
print(i)
rfecv = RFECV(estimator = RandomForestClassifier(class_weight ='balanced'), step = step , cv = 10, scoring = 'balanced_accuracy', n_jobs = -1)
rfecv.fit(X, y.to_numpy().ravel())
print(rfecv.n_features_)
ranks[str(i)] = rfecv.ranking_
ranks['total_rank'] = np.sum(ranks, axis =1)
print(ranks.sort_values(by = "total_rank").head(30))
selected_columns = ranks.sort_values(by = "total_rank").head(30).index
return selected_columns
def main(eval_type : str, rfe_step : int = typer.Argument(10), n_features :int = typer.Argument(30)):
startTime = datetime.datetime.now()
# Assure the type of dataset is correctly established
assert eval_type in ['full', 'over'], "eval_type (1st argument) was not valid. The only 2 options are 'full', 'over'."
if eval_type == "full":
x_train_data = "X_train_77"
y_train_data = "y_train_77"
x_test_data = "X_test_77"
y_test_data = "y_test_77"
elif eval_type == "over":
x_train_data = "X_train_77_over"
y_train_data = "y_train_77"
x_test_data = "X_test_77_over"
y_test_data = "y_test_77"
# Create pathing
x_train_path = os.path.join(DATA_MODELLING_FOLDER, x_train_data)
y_train_path = os.path.join(DATA_MODELLING_FOLDER, y_train_data)
x_test_path = os.path.join(DATA_MODELLING_FOLDER, x_test_data)
X_train = pd.read_csv(x_train_path + ".csv").set_index('subjectkey')
y_train = pd.read_csv(y_train_path + ".csv").set_index('subjectkey')
X_test = | pd.read_csv(x_test_path + ".csv") | pandas.read_csv |
import pandas as pd
def freq(data=None, variable=None, dropna=False):
"""
Description:
this function: freq, stands for frequency, which will calculate each unique value of a variable in a Pandas DataFrame.
Parameters:
data: A Pandas DataFrame (required)
variable: Default None
A Python List,if not given, will output the frequency of each vaiables
if supplied, will output the frequency of variable specified.
dropna: Boolean, default False
if False, will count as the NaN values
return:
DataFrame
描述:
这个函数将会输出一个Pandas DataFrame的所有变量中,变量值出现的频数
参数:
data: 一个Pandas DataFrame (必须填写)
variable: 默认为空, 一个python list,如果不提供,将会计算所有的变量的频数,如果提供,则会计算所提供的变量的频数。
dropna: 布尔, 默认为False,如果为Fasle,则将NaN包含在计算中,否则不包含。
返回:
DataFrame
"""
frequency = pd.DataFrame()
if data is None:
raise TypeError('Please supply a DataFrame')
elif variable is None:
for i in list(data.columns):
tmp = data[i].value_counts(dropna=False).to_frame().reset_index()
tmp.columns = [i,'count of '+i]
frequency = pd.concat([frequency,tmp], axis=1, sort=True)
else:
for i in variable:
tmp = data[i].value_counts(dropna=False).to_frame().reset_index()
tmp.columns = [i,'count of '+i]
frequency = | pd.concat([frequency,tmp], axis=1, sort=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:23:14 2020
This script will do the next steps:
1 - create necessary directories (if not exists)
2 - connect or create a sqlite3 database
3 - create the table to store prices in the database (if not exists)
4 - update prices, as follows:
* checks the most recent date of prices in the database
- if there are no prices in the database, download prices from a formated csv file in github
to avoid download and process lots of xml files.
- if there are prices in the database, and the current date is ahead
of the date of the last prices in the database, then proceed as follows:
- download and procees the file with prices from bvmfbovespa website;
- download and process the file with the number of shares of each ticker
- merge prices and shares
- upload the data to the database
@author: Diego
"""
import pandas as pd
import sqlite3
import os
import zipfile
import xml.etree.ElementTree as ET
import datetime
import wget
cwd = os.getcwd()
if not os.path.exists("data"):
os.makedirs("data")
if not os.path.exists("data\\cotahist"):
os.makedirs("data\\cotahist")
if not os.path.exists("data\\ftp_files"):
os.makedirs("data\\ftp_files")
if not os.path.exists("data\\temp"):
os.makedirs("data\\temp")
conn = sqlite3.connect(cwd + '\\data\\finance.db')
db = conn.cursor()
#%% functions
def create_prices():
query = """
CREATE TABLE IF NOT EXISTS prices
(
date DATE,
ticker TEXT,
preult REAL,
totneg INTEGER,
quatot INTEGER,
voltot INTEGER,
number_shares INTEGER
)"""
db.execute(query)
return
def get_last_database_price_date():
"""
Returns
date of the last available price in the database
"""
query = "SELECT date FROM prices ORDER BY date DESC LIMIT (1)"
x = | pd.read_sql(query, conn) | pandas.read_sql |
from pathlib import Path
from typing import Union, List
import pandas as pd
import pymzml
HERE = Path(__file__).parent
def mzml(file_path: Union[str, Path], scan_low, scan_high, min_intensity: int = 600):
"""Import mzML files derived from applying MSConvert to .raw files."""
headers = ["scanindex", "rettime", "mz", "intensity", "function"]
# Borrowed and modified from https://github.com/rlinington/ms2analyte/blob/master/ms2analyte/converters/waters.py
# Waters data includes the lockspray internal calibrant scans as 'MS1' data. These are differentiated from true
# MS1 data by the 'function' attribute in the spectrum element. Data MS1 scans are function 1. Lockspray scans are
# assigned the highest possible function number (floating, depends on how many DDA scans were permitted during
# acquisition setup). Commonly lockspray function=5. This is always 3 for MSe (DIA) data.
# NOTE: If MS2 functionality is added, this is not an issue, because all MS2 data have ms level = 2, and are
# therefore all legitimate for inclusion.
# Parse mzML file and format appropriate scan data as Dataframe
run = pymzml.run.Reader(str(file_path))
input_data = []
for spec in run:
# Skip over non-MS1 data
if spec.ms_level != 1:
continue
# Skip lockspray or other functions if there are any!
# If not, this is probably not Waters data and should be fine...
fn = spec.id_dict.get("function")
# if fn is not None:
# if fn != 1:
# continue
scan_number = spec.ID
# if scan_number < scan_low:
# continue
# if scan_number > scan_high:
# print("End of useful scans")
# break
retention_time = round(spec.scan_time_in_minutes(), 3)
for peak in spec.peaks("raw"):
mz = round(peak[0], 4)
intensity = int(peak[1])
if intensity < min_intensity:
continue
input_data.append([scan_number, retention_time, mz, intensity, fn])
# Print import progress. Useful because importing large mzML files can be slow.
if spec.index % 100 == 0 and spec.index > 0:
print(f"{file_path.name} - Completed import of scan " + str(spec.ID))
return pd.DataFrame(input_data, columns=headers)
# REPLACE THIS WITH THE PATH OF YOUR MZML FILES
mzml_dir = Path("Z:\Linington\working\isoanalyst_example\generalized\mzmls")
# REPLACE THIS WITH THE PATH OF TEST FEATURES CSV FILES
df = | pd.read_csv(HERE / "RLUS1353_test_features.csv") | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_Create Project.ipynb (unless otherwise specified).
__all__ = ['init_available_projects', 'summarize_h5', 'pgrid_to_run_parameters', 'collect_statistics', 'fix_geojson',
'main']
# Cell
from pathlib import Path
import shutil
import os
import pandas as pd
import geopandas as gpd
import shapely
from shapely.geometry.polygon import Polygon
from shapely.ops import cascaded_union, unary_union
import numpy as np
from time import time
from typing import *
import junevis.path_fixes as pf
import json
import junevis.process_loggers as process_loggers
# Cell
def init_available_projects(project_name: str):
pf.AVAILABLE_PROJECTS.touch()
with open(str(pf.AVAILABLE_PROJECTS), 'r+') as fp:
available_projects = set([p.strip() for p in fp.readlines()])
if project_name in available_projects:
if not force_add_project:
raise ValueError(f"Cannot create project of name '{project_name}': Project already exists in {pf.AVAILABLE_PROJECTS}"
)
else:
shutil.rmtree(outdir) # Delete existing project of that name
fp.truncate(0); fp.seek(0); # Delete file contents
available_projects.remove(project_name)
return available_projects
# Cell
def summarize_h5(record_f, outdir):
"""Dependent on the context variable `output_dir`. The actual summarized output is much smaller than the record file itself"""
start = time()
runId = record_f.stem.split("_")[1]
print(f"Processing {runId}: ")
df = process_loggers.regional_outputs(record_f)
# Add cumulative columns
region_grouped_df = df.groupby(level=0)
df['currently_dead'] = region_grouped_df.deaths.cumsum()
df['currently_recovered'] = region_grouped_df.recovered.cumsum()
# Rename region
df = df.rename_axis(index=["region", "timestamp"])
outfile = outdir / f"summary_{int(runId):03}.csv"
print(f"Saving to {str(outfile)}")
df.to_csv(str(outfile))
print(f"\nTook {time() - start} seconds")
print("\n-------\n")
return df
# Cell
def pgrid_to_run_parameters(parameter_grid: dict) -> dict:
"""Convert parameter_grid dictionary to desired metadata dictionary"""
run_parameters = {}
# Create run_parameters
for k, v in parameter_grid.items():
for i in range(len(v)):
curr = run_parameters.get(str(i), {})
curr[k] = v[i]
run_parameters[str(i)] = curr
params_varied = list(parameter_grid.keys())
return {
"parameters_varied": params_varied,
"run_parameters": run_parameters,
}
# Cell
def collect_statistics(project: Union[str, Path]):
project = Path(project)
csvfs = list(project.glob("summary*.csv"))
dfs = [pd.read_csv(csvf) for csvf in csvfs]
big_df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import logging
import itertools
import numpy as np
import pandas as pd
from numpy.core import getlimits
from sklearn.base import TransformerMixin
from sklearn.preprocessing.label import LabelBinarizer
from src.utils import collect_garbage
_logger = logging.getLogger()
########################################################################
# Transformers
# - Input: Pandas DataFrame
# - Output: Either Pandas DataFrame (or scipy.sparse.csr_matrix)
########################################################################
# Handling of NAs
########################################################################
class StringImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return X.fillna("missing")
class ZeroImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
result = X.fillna(0)
result = InfinityImputer().fit_transform(result)
return result
class MinusOneImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
result = X.fillna(-1)
result = InfinityImputer().fit_transform(result)
return result
class MeanImputer(TransformerMixin):
def fit(self, X, y=None):
self.__mean = X.mean()
return self
def transform(self, X):
result = X.fillna(self.__mean)
result = InfinityImputer().fit_transform(result)
return result
class MedianImputer(TransformerMixin):
def fit(self, X, y=None):
self.__median = X.median()
return self
def transform(self, X):
result = X.fillna(self.__median)
result = InfinityImputer().fit_transform(result)
return result
class BooleanImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **fit_params):
collect_garbage()
result = X.astype(np.float32)
result = result.fillna(0.5)
return pd.DataFrame(result)
class InfinityImputer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
result = X
for column in X.columns:
datatype = result.loc[:, column].dtype.type
limits = getlimits.finfo(datatype)
result.loc[:, column].replace(np.inf, limits.max, inplace=True)
result.loc[:, column].replace(-np.inf, limits.min, inplace=True)
return result
class NaIndicator(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
result = X.isnull()
result = result.to_frame()
return result
class Float32Transformer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
result = X.astype(np.float32)
return result
########################################################################
# Categories
########################################################################
class CategoryTransformer(TransformerMixin):
def __init__(self, cat_column_names):
self.__cat_column_names = cat_column_names
# Shortcut to gain some speed instead of fit, transform
def fit_transform(self, X, y=None):
for column in self.__cat_column_names:
_logger.debug("Categorizing " + str(column))
collect_garbage()
X[column] = X[column].astype('category')
return X
########################################################################
# Scaling
########################################################################
# computes the formula sign(X)*ceil(log2(|X|+1))
class LogTransformer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
result = X
sign = result.apply(np.sign)
result = result.apply(np.absolute)
result = result + 1
result = result.apply(np.log2)
result = result.apply(np.ceil)
result = sign * result
result = result.fillna(0)
return result
class SqrtTransformer(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
result = X
result = result + 1
result = result.apply(np.sqrt)
result = result.apply(np.ceil)
return result
########################################################################
# Handling of Strings
########################################################################
class LengthTransformer(TransformerMixin):
"""Computes the length of a string (both in bytes as well as in words)."""
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if len(X.columns) > 1:
raise Exception("Only one column supported")
else:
X = X.iloc[:, 0]
collect_garbage()
X = X.astype(str) # sometimes fails with MemoryError
result = | pd.DataFrame() | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from deep_sentinel.dataset import utils
@pytest.mark.parametrize(
"continuous,discrete", [
({'a': [1.0, 2.0, 3.0]}, {}),
({'a': [1.0, 2.0, 3.0]}, {'b': [1, 2, 3]}),
({}, {'b': [1, 2, 3]}),
]
)
def test_split_category_data(continuous, discrete):
discrete_keys = list(discrete.keys())
continuous_keys = list(continuous.keys())
given = pd.concat(
[
pd.DataFrame(continuous) if len(continuous) != 0 else pd.DataFrame(),
pd.DataFrame(discrete).astype("category") if len(discrete) != 0 else pd.DataFrame(),
], axis=1
)
actual = utils.split_category_data(given)
| pd.testing.assert_frame_equal(actual[0], given[continuous_keys]) | pandas.testing.assert_frame_equal |
import logging
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import utils
from lob_data_utils import lob, model
from sklearn.decomposition import PCA
from sklearn.svm import SVC
logger = logging.getLogger(__name__)
class SvmGdfResults(object):
def __init__(self, stock, r=1.0, s=1.0, data_length=10000, gdf_filename_pattern='',
data_dir='../data/data_gdf', reg_data_dir='../data/prepared'):
self.stock = stock
self.r = r
self.s = s
self.data_length = data_length
self.gdf_filename_pattern = gdf_filename_pattern
self.data_dir = data_dir
self.reg_data_dir = reg_data_dir
self.df, self.df_test = self._read_stock()
all_gdf = ['gdf_{}'.format(i) for i in range(0, 50)]
all_gdf_que = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance']
all_gdf_que_prev = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance', 'prev_queue_imbalance']
feature_columns_dict = {
'que': ['queue_imbalance'],
'que_prev': ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_24_26': ['gdf_24', 'gdf_25'],
'gdf_24-26_que': ['gdf_24', 'gdf_25', 'queue_imbalance'],
'gdf_24-26_que_prev': ['gdf_24', 'gdf_25', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_23-27': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26'],
'gdf_23-27_que': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance'],
'gdf_23-27_que_prev': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_20_30': ['gdf_{}'.format(i) for i in range(20, 30)],
'gdf_20_30_que': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance'],
'gdf_20_30_que_prev': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_0_50': all_gdf,
'gdf_0-50_que': all_gdf_que,
'gdf_0-50_que_prev': all_gdf_que_prev,
'pca_gdf1': all_gdf,
'pca_gdf2': all_gdf,
'pca_gdf3': all_gdf,
'pca_gdf4': all_gdf,
'pca_gdf5': all_gdf,
'pca_gdf6': all_gdf,
'pca_gdf7': all_gdf,
'pca_gdf8': all_gdf,
'pca_gdf9': all_gdf,
'pca_gdf10': all_gdf,
'pca_n_gdf': all_gdf,
'pca_gdf_que1': all_gdf_que,
'pca_gdf_que2': all_gdf_que,
'pca_gdf_que3': all_gdf_que,
'pca_gdf_que4': all_gdf_que,
'pca_gdf_que5': all_gdf_que,
'pca_gdf_que6': all_gdf_que,
'pca_gdf_que7': all_gdf_que,
'pca_gdf_que8': all_gdf_que,
'pca_gdf_que9': all_gdf_que,
'pca_gdf_que10': all_gdf_que,
'pca_n_gdf_que': all_gdf_que,
'pca_gdf_que_prev1': all_gdf_que_prev,
'pca_gdf_que_prev2': all_gdf_que_prev,
'pca_gdf_que_prev3': all_gdf_que_prev,
'pca_gdf_que_prev4': all_gdf_que_prev,
'pca_gdf_que_prev5': all_gdf_que_prev,
'pca_gdf_que_prev6': all_gdf_que_prev,
'pca_gdf_que_prev7': all_gdf_que_prev,
'pca_gdf_que_prev8': all_gdf_que_prev,
'pca_gdf_que_prev9': all_gdf_que_prev,
'pca_gdf_que_prev10': all_gdf_que_prev,
'pca_n_gdf_que_prev': all_gdf_que_prev,
'pca_gdf_que_prev_split10': all_gdf_que_prev
}
def get_score_for_clf(self, clf, df_test, feature_name, pca=None):
x_test = df_test[self.feature_columns_dict[feature_name]]
if pca:
x_test = pca.transform(x_test)
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
@staticmethod
def get_number_of_pca_components(feature_name: str) -> Optional[int]:
if 'pca_gdf_que_prev_split' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev_split', ''))
if 'pca_gdf_que_prev' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev', ''))
if 'pca_gdf_que' in feature_name:
return int(feature_name.replace('pca_gdf_que', ''))
if 'pca_gdf' in feature_name:
return int(feature_name.replace('pca_gdf', ''))
return None
@classmethod
def split_sequences(cls, sequences, labels, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
end_ix = i + n_steps
if end_ix > len(sequences):
break
seq_x = sequences[i:end_ix]
lab = labels[end_ix - 1]
X.append(seq_x)
y.append(lab)
return np.array(X), np.array(y)
@staticmethod
def get_mean_scores(scores: dict) -> dict:
mean_scores = {}
for k, v in scores.items():
mean_scores[k] = np.mean(v)
return mean_scores
def get_score_for_clf_split_pca(self, clf, df_test, feature_name, pca=None) -> dict:
x_test = df_test[self.feature_columns_dict[feature_name]]
x_test_pca = x_test[[col for col in x_test.columns if 'gdf' in col]]
x_test = x_test[[col for col in x_test.columns if 'gdf' not in col]]
if pca:
x_test_pca = pca.transform(x_test_pca)
for n in range(pca.n_components):
x_test['pca_{}'.format(n)] = x_test_pca[:, n]
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
def get_pca(self, feature_name) -> Optional[PCA]:
train_x = self.df[self.feature_columns_dict[feature_name]].values
if feature_name in ['pca_n_gdf_que', 'pca_n_gdf_que_prev', 'pca_n_gdf']:
n_components = self.calculate_number_of_components(train_x, threshold=0.99)
else:
n_components = self.get_number_of_pca_components(feature_name)
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_x)
return pca
return None
@classmethod
def calculate_number_of_components(cls, train_x, threshold=0.99) -> int:
pca = PCA(n_components=10)
pca.fit(train_x)
for i in range(1, len(pca.explained_variance_ratio_)):
sum_of_ratio = np.sum(pca.explained_variance_ratio_[0:i])
if sum_of_ratio > threshold:
return i
return 10
def get_classes_weights(self):
y_train = self.df['mid_price_indicator'].values
classes = np.unique(y_train)
class_weight_list = utils.class_weight.compute_class_weight('balanced', classes, y_train)
class_weights = {classes[0]: class_weight_list[0], classes[1]: class_weight_list[1]}
return class_weights
def train_clf_with_split_pca(self, clf, feature_name, method=None):
"""
Deprecated
"""
logger.info('Training %s r=%s s=%s:',
self.stock, self.r, self.s)
train_x = self.df[self.feature_columns_dict[feature_name]]
train_pca = train_x[[col for col in train_x.columns if 'gdf' in col]]
train_x = train_x[[col for col in train_x.columns if 'gdf' not in col]]
n_components = self.get_number_of_pca_components(feature_name)
pca = None
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_pca)
train_pca = pca.transform(train_pca)
for n in range(n_components):
train_x['pca_{}'.format(n)] = train_pca[:, n]
scores = model.validate_model(clf, train_x, self.df['mid_price_indicator'])
res = {
**self.get_mean_scores(scores),
'stock': self.stock,
'method': method,
'features': feature_name
}
test_scores = self.get_score_for_clf_split_pca(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def get_train_set(self, feature_name='', n_steps=None):
train_x = self.df[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
return train_x, train_y
def get_test_set(self, feature_name='', n_steps=None):
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
test_x = pca.transform(test_x)
if n_steps:
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
return test_x, test_y
def train_mlp(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, plot_name=None, class_weight=None,
should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'mlp'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
m = clf()
model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight) # to have a clean fitted model
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_lstm(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, n_steps=None,
plot_name=None, class_weight=None, should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
test_x = np.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1]))
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'lstm'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
# m = clf()
# model.train_model(
# m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
# class_weight=class_weight)
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_clf(self, clf, feature_name='', should_validate=True, method=None, class_weight=None):
logger.info('Training %s r=%s s=%s: clf=%s',
self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
if not method:
method = 'logistic'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def train_svm(self, C=np.nan, gamma=np.nan, feature_name='', kernel='rbf', coef0=np.nan, should_validate=True,
class_weight=None):
logger.info('Training %s r=%s s=%s: kernel=%s C=%s gamma=%s coef0=%s',
self.stock, self.r, self.s, kernel, C, gamma, coef0)
if C and gamma and coef0:
clf = SVC(kernel=kernel, C=C, gamma=gamma, coef0=coef0)
elif C and gamma:
clf = SVC(kernel=kernel, C=C, gamma=gamma)
else:
clf = SVC(kernel=kernel)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'C': C,
'gamma': gamma,
'coef0': coef0,
'kernel': kernel,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def _read_stock(self):
gdf_filename = self.gdf_filename_pattern.format(self.stock, self.r, self.s)
reg_filename = '{}'.format(self.stock)
logger.debug('Will read %s and %s', gdf_filename, reg_filename)
d = lob.load_prepared_data(
gdf_filename, data_dir=self.data_dir, length=self.data_length)
if len(d) == 2:
df, df_test = d
else:
return pd.DataFrame(), pd.DataFrame()
df_reg, df_reg_test = lob.load_prepared_data(
reg_filename, data_dir=self.reg_data_dir, length=self.data_length)
df['datetime'] = df_reg['Unnamed: 0']
df['bid_price'] = df_reg['bid_price']
df['ask_price'] = df_reg['ask_price']
df['queue_imbalance'] = df_reg['queue_imbalance']
df['prev_queue_imbalance'] = df['queue_imbalance'].shift()
df.index = pd.to_datetime(df['datetime'])
df.dropna(inplace=True)
df_test['datetime'] = df_reg_test['Unnamed: 0']
df_test['bid_price'] = df_reg_test['bid_price']
df_test['ask_price'] = df_reg_test['ask_price']
df_test['queue_imbalance'] = df_reg_test['queue_imbalance']
df_test['prev_queue_imbalance'] = df_test['queue_imbalance'].shift()
df_test.index = pd.to_datetime(df_test['datetime'])
df_test.dropna(inplace=True)
return df, df_test
def features(self, clf) -> pd.DataFrame:
res = []
for feature_name, features in self.feature_columns_dict.items():
logger.debug('Checking feature %s for %s r=%s s=%s', feature_name, self.stock, self.r, self.s)
train_x = self.df[features]
n_components = self.get_number_of_pca_components(feature_name)
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_x)
train_x = pca.transform(train_x)
scores = model.validate_model(clf, train_x, self.df['mid_price_indicator'])
df_score = | pd.DataFrame(scores) | pandas.DataFrame |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
This file performs the renaming of all statistical variables present in the Data
Commons Knowledge Graph. Human-intelligible StatVar names are useful for end
users as they may be pulled from both the Python API or Google Sheets API by
name.
1) Base Schema: The basic schema for any human readable statistical variable is
mprop_popType_v1_v2_v3... For example, Count_Person_BornInStateOfResidence
2) Optional inclusion of StatType: statType is included when the StatType is not
measuredValue or Unknown. For example, instead of Age_Person, we output
MedianAge_Person
3) Certain data sets are blacklisted: for example, all bio data sets and a few
miscellaneous ones are excluded. This blacklist was created by tjann.
4) Dependent variables are removed. dependent variables are constraints that are
inherently included, but not really necessary. For example, a person earning
an income of 10k to 15k USD may only be measured by the US Census if they are
older than 15 and have an income. For example,
"Count_Person_Years15Onwards_IncomeOfUSDollar10000To14999_WithIncome" becomes
"Count_Person_IncomeOfUSDollar10000To14999" after accounting for the
unnecessary variables. These dependent variables are defined in the textproto
stat vars config file.
4) Boolean constraints are replaced by their populations: for example, p1 =
isInternetUser and v1=True/False becomes v1=isInternetUser/notInternetUser.
5) Measurement properties are stripped from constraints: for example,
p1 = employment and v1 = USC_Unemployed becomes v1=Unemployed
6) NAICS Industry codes are replaced by industry names: we have a combination of
NAICS specific and overview codes. In both cases, we replace the industry
code (e.g. NAICS/23) with the industry. An example statistical variable is
WagesAnnual_Establishment_NAICSConstruction
7) Cause of death properties are renamed: e.g., p1 = causeOfDeath and
v1="ICD10/E00-E89" becomes v1="EndocrineNutritionalMetabolicDiseases". These
names are generated directly from the ICD10 names stored in BigQuery.
Exceptionally long or confusing names were manually renamed.
8) DEA drug names are renamed: e.g., p1="drugPrescribed" and v1="drug/dea/9250"
become v1="Methadone". These are manually renamed. Some drug names are
intentionally left as their codes. For example, dea/7444 corresponds to
"4-Hydroxy-3-methoxy-methamphetamine", which does not have a common name.
Both the codes and drug names will be valid constraints.
9) Certain variables have text prepended or appended to their constraints to
improve readability: for example p1 = childSchoolEnrollment and
v1=EnrolledInPublicSchool is changed to v1="ChildEnrolledInPublicSchool".
These mappings are applied to ~15 variables.
10) Miscellaneous changes: a) MeasuredProp InsuredUnemploymentRate changed to
Rate_InsuredUnemployment to match the existing formula.
"""
from absl import app
from google.protobuf import text_format
from google.cloud import bigquery
from google.colab import auth
import re
import os
import pandas as pd
import numpy as np
import stat_var_renaming_constants as svrc
import stat_var_renaming_functions as svrf
# Constants
# Max total number of constraints of a variable to include (Dependent
# variables excluded).
_MAX_CONSTRAINTS = 3
_MAX_CONSTRAINTS_WITH_DPV = 6
# If true, no new statistical variables will be introduced.
_ONLY_REGENERATE_EXISTING = False
def authenticate_bq_client():
""" Authenticates and returns a BigQuery client connection. By default this
code assumes it will be run in Google Colab which handles BigQuery
authentication. To run this code elsewhere this method needs to be updated
to properly authenticate a BigQuery client.
Returns:
An authenticated SQL client with a function called query that given a SQL
query returns a response object that can be converted into a dataframe.
"""
# Users should update the authentication method if not using Google CoLab.
auth.authenticate_user()
# Create and return client.
project_id = "google.com:datcom-store-dev"
return bigquery.Client(project=project_id)
def download_stat_vars(client):
""" Queries unique list of statistical variables from BigQuery.
Creates a join across statistical populations and observations to generate
distinct list of statistical variables. Certain datasets like bio are
excluded. The original dpvs are preserved in new columns.
Args:
client: An authenticate BigQuery SQL client.
Returns:
stat_vars: Pandas dataframe containing unique information for all
potential stat vars in the database.
Raises:
Query failure: If improper authentication is given.
"""
# Dynamically create query for constraints in SQL query.
constraint_string = ""
pop_string = ""
for num in range(1, _MAX_CONSTRAINTS_WITH_DPV + 1):
constraint_string += f"SP.v{num} as v{num},\n"
pop_string += f"SP.p{num} as p{num},\n"
# Dynamically create list of blacklisted provences, as a string.
blacklist = [
'"%s"' % prov_id
for prov_id in frozenset().union(*[svrc._MISC_DATASETS,
svrc._BIO_DATASETS])
]
blacklist_str = ', '.join(blacklist) if blacklist else '""'
# Input information into SQL template and perform the query.
query_for_all_stat_vars = (svrc.QUERY_FOR_ALL_STAT_VARS.replace(
"{CONSTRAINTS}",
constraint_string).replace("{POPULATIONS}", pop_string).replace(
"{comma_sep_prov_blacklist}",
blacklist_str).replace("{MAX_CONTRAINTS}", str(_MAX_CONSTRAINTS)))
stat_vars = client.query(query_for_all_stat_vars).to_dataframe()
# Make a pristine copy of constraint names for output MCF.
for c in range(1, _MAX_CONSTRAINTS_WITH_DPV + 1):
stat_vars[f"orig_p{c}"] = stat_vars[f"p{c}"]
stat_vars[f"orig_v{c}"] = stat_vars[f"v{c}"]
stat_vars["orig_populationType"] = stat_vars['populationType']
return stat_vars
### Variable renaming scripts
def addPropertyRemapping(remapper, prop, function):
""" Helper function to add new remapping function to a certain property.
Args:
remapper: Dictionary with mapping from properties to renaming functions.
prop: Property to perform the remapping on.
function: Renaming function that takes three arguments
(prop, constraint, popType) and returns the new name for the constraint.
"""
if prop not in remapper:
remapper[prop] = []
remapper[prop].append(function)
def remap_constraint_from_prop(row, prop_remap):
""" Helper which applies property remappings to all constraints in a dataset.
Args:
row: Pandas row to apply function to.
prop_remap: Dictionary of renaming functions for each property.
"""
for constraint in range(1, min(_MAX_CONSTRAINTS_WITH_DPV, 1 + row['numConstraints'])):
prop = row[f"p{constraint}"]
if prop in prop_remap:
# May need to apply multiple functions for a single property.
remapper = prop_remap[prop]
for function in remapper:
row[f"v{constraint}"] = function(prop, row[f"v{constraint}"],
row['populationType'])
return row
def generate_dependent_constraint_list():
""" Generates a list of dependent variables.
Using an OS system call, a protobuf definition is compiled. A definition
file is then read in and used to generate a pandas dataframe of dependent
variable definitions.
Returns:
obs_spec_list: Observation for statistical variables in
protobuf object format.
"""
# Generate population observation spec. Creates a new python file.
os.system("protoc -I=. --python_out=. pop_obs_spec_common.proto")
# Load newly created protobuf class definition.
import pop_obs_spec_common_pb2
obs_spec_list = pop_obs_spec_common_pb2.PopObsSpecList()
# Load in PV list from spec proto. Note that covid cases was temporarily
# added as a DPV for display, but shouldn't truly be one.
with open("pop_obs_spec_nocovid.textproto") as f:
counts = f.read()
text_format.Parse(counts, obs_spec_list)
# Create a dataframe that matches the greater stat_vars from DB for merging.
dpvs = pd.DataFrame()
for spec in obs_spec_list.spec:
# Get universal props.
new_row = {}
new_row['populationType'] = spec.pop_type
new_row['measuredProp'] = spec.mprop
new_row['statType'] = spec.stat_type
# Get independent variables.
variables = []
for name in spec.cprop:
variables.append((name, "", False))
# Get dependent variables which depend on the value of the
# constraint.
for name in spec.dpv:
variables.append((name.prop, name.val, True))
# Variables are sorted alphabetically.
variables = sorted(variables)
# Add as a row to entire dataframe.
for index, variable in enumerate(variables):
var_name, constraint, is_dpv_var = variable
new_row[f"orig_p{index + 1}"] = var_name
new_row[f"p{index + 1}_is_dpv"] = is_dpv_var
if is_dpv_var:
new_row[f"orig_v{index + 1}"] = constraint
dpvs = dpvs.append(new_row, ignore_index=True)
# Only return statistical variables with at least one dependent variable.
query_string = ""
for c in range(1, _MAX_CONSTRAINTS + 1):
query_string += f"p{c}_is_dpv == 1 or "
return dpvs.query(f"{query_string} False")
def remove_dependent_constraints(stat_vars):
""" Removes all dependent constraints from list of stat vars.
Args: stat_vars: Pandas dataframe holding all stat vars
Returns: stat_vars with all dependent constraints imputed in place for all
rows.
"""
# Generate list of dependent constraints from protobuf config.
dpvs = generate_dependent_constraint_list()
# Merge across common columns shared with dependent variable list.
common_cols = (['measuredProp', 'populationType', 'statType'] +
[f"orig_p{x}" for x in range(1, 1 + _MAX_CONSTRAINTS_WITH_DPV)])
stat_vars = pd.merge(stat_vars, dpvs, on=common_cols, how='left')
# Replace any dependent variables and their value with nan.
for c in range(1, _MAX_CONSTRAINTS + 1):
dpv_match = stat_vars.query(f"p{c}_is_dpv == 1")
# Ensure that constraint {c} exists in both tables.
if f"orig_v{c}_x" in dpv_match and f"orig_v{c}_y" in dpv_match:
# Only remove dependent constraints where the value matches.
dpv_match = dpv_match.query(f"orig_v{c}_x == orig_v{c}_y")
stat_vars.loc[dpv_match.index, f"p{c}"] = np.nan
stat_vars.loc[dpv_match.index, f"v{c}"] = np.nan
stat_vars.loc[dpv_match.index, "numConstraints"] = (
stat_vars.loc[dpv_match.index,
"numConstraints"].apply(lambda x: x - 1))
# TODO(REMOVE): Left shift all imputed columns to remove holes.
# stat_vars = stat_vars.apply(left_fill_columns, axis=1)
# Rename constraints from merge.
for c in range(1, _MAX_CONSTRAINTS + 1):
stat_vars = stat_vars.rename({f"orig_v{c}_x": f"orig_v{c}"},
axis=1)
return stat_vars
def left_fill_columns(row):
""" Removes holes in constraints after imputing dependent constraints.
Args:
Row of dataframe with or without holes present between constraints.
Returns:
Row of dataframe without holes present between constraints.
"""
row_constraints = min(row['numConstraints'], _MAX_CONSTRAINTS_WITH_DPV)
# Keep track of the search location to look to pull columns from.
search_location = 2
for base_col in range(1, row_constraints + 1):
# If current population is null then search for the next non-null column.
if pd.isna(row[f"p{base_col}"]):
search_location = max(search_location, base_col + 1)
while search_location <= _MAX_CONSTRAINTS:
# Swap first non-null with the current null location.
if not pd.isna(row[f"p{search_location}"]):
row[f"p{base_col}"] = row[f"p{search_location}"]
row[f"v{base_col}"] = row[f"v{search_location}"]
row[f"p{search_location}"] = np.nan
row[f"v{search_location}"] = np.nan
search_location += 1
break
search_location += 1
return row
def row_to_human_readable(row):
""" Generates a human readable name for a dataframe row.
Args:
Row: A preprocessed dataframe row with dependent variables removed and
all constraints, values, and populations remove.
Returns:
String mapping between the provided row to the corresponding human readable
dcid format: <?statType>_<mProp>_<popType>_<v1>_<v2>_..._<mQual>_<mDenom>
"""
# Add measured property and population type. e.g. Count_InsuranceClaim.
human_string = (f"{svrc.capitalizeFirst(row['measuredProp'])}" +
f"_{svrc.capitalizeFirst(row['populationType'])}")
# StatType (e.g. median) is prepended if the stat type is not measuredValue.
stat_type = row['statType']
if stat_type != "measuredValue" and stat_type != "Unknown":
human_string = (svrc.capitalizeFirst(stat_type.replace("Value", ""))
+ "_" + human_string)
# Append renamed constraint fields.
row_constraints = min(row['numConstraints'], _MAX_CONSTRAINTS_WITH_DPV)
for num in range(1, row_constraints + 1):
if not | pd.isna(row[f"v{num}"]) | pandas.isna |
"""
Filename: statistical_forecast.py
Author: <NAME>
Version: 14.03.2021
Description: This script uses ARIMA model to forecast the foreign currency
(particularly Indian Rupee/ Euro) for the next 3 years.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from datetime import date
from statsmodels.tsa.arima_model import ARIMA
from helper import fit_arima_model, check_stationarity, plot_acf_pacf, ljungbox_test
if __name__ == "__main__":
data = pd.read_excel('Foreign_Exchange_Rates.xlsx')
print('Dataset has {} rows and {} columns'.format(data.shape[0], data.shape[1]))
#print('List all columns of dataset: \n{}'.format(data.columns))
################# Data Cleaning #########################
data = data.rename(columns = {'Time Serie':'Date'})
data.set_index('Date',inplace = True)
data = data.replace('ND', np.nan)
data = data.bfill().ffill() #handling missing values
data = data.astype('float')
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
import os
import pickle
import numpy as np
import pandas as pd
def load_raw_data(data_dir='.'):
# считываем обучающую выборку, метки целевого признака,
# а также данные, для которых необходимо будет сделать предсказание
train_path = os.path.join(data_dir, 'orange_small_churn_data.csv')
test_path = os.path.join(data_dir, 'orange_small_churn_test_data.csv.zip')
labels_path = os.path.join(data_dir, 'orange_small_churn_labels.csv')
train = pd.read_csv(train_path)
test = pd.read_csv(test_path, index_col=0)
labels = pd.read_csv(labels_path, names=['target'])
# из исходных данных нам известно какие признаки
# являются числовыми, а какие категориальными
# выделим их явно в списки
numerical = train.columns.to_list()[:190]
categorical = train.columns.to_list()[190:]
# объединим метки классов с данными обучения
train = pd.concat([train, labels], axis=1)
return train, test, numerical, categorical
def load_data(data_dir='.'):
# считываем обучающую выборку, метки целевого признака,
# а также данные, для которых нужно будет сделать предсказание
train_path = os.path.join(data_dir, 'train.csv')
test_path = os.path.join(data_dir, 'test.csv')
train = | pd.read_csv(train_path, index_col=0) | pandas.read_csv |
import pandas as pd
import os.path as op
from glob import glob
from ipdb import set_trace
df_generic = pd.read_csv('../stims/stimuli-expressive_selection-all.csv', sep='\t', index_col=0)
tsvs = sorted(glob('data/ratings_complete/*neutral_ratings.tsv'))
dfs = []
for tsv in tsvs:
sub = op.basename(tsv).split("_")[0]
print(f"Processing {sub} ...")
df_generic = pd.read_csv('data/featurespace-AU.tsv', sep='\t', index_col=0)
df = (
| pd.read_csv(tsv, sep='\t', index_col=0) | pandas.read_csv |
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
np.random.seed(1)
# In[5]:
full_labels = pd.read_csv('images.csv')
# In[6]:
full_labels.head()
# In[13]:
grouped = full_labels.groupby('filename')
grouped.apply(lambda x: len(x)).value_counts()
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
# In[15]:
TRAINING_PERCENTAGE = 0.7
train_index = np.random.choice(len(grouped_list), size=int(TRAINING_PERCENTAGE*len(grouped_list)), replace=False)
test_index = np.setdiff1d(range(len(grouped_list)), train_index)
# In[18]:
train = | pd.concat([grouped_list[i] for i in train_index]) | pandas.concat |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from .config import config
from .utils import get_in, flatten
from .render import runs_to_html, artifacts_to_html
class RunList(list):
def to_rows(self):
rows = []
head = [
"project",
"uid",
"iter",
"start",
"state",
"name",
"labels",
"inputs",
"parameters",
"results",
"artifacts",
"error",
]
for run in self:
row = [
get_in(run, "metadata.project", config.default_project),
get_in(run, "metadata.uid", ""),
get_in(run, "metadata.iteration", ""),
get_in(run, "status.start_time", ""),
get_in(run, "status.state", ""),
get_in(run, "metadata.name", ""),
get_in(run, "metadata.labels", ""),
get_in(run, "spec.inputs", ""),
get_in(run, "spec.parameters", ""),
get_in(run, "status.results", ""),
get_in(run, "status.artifacts", []),
get_in(run, "status.error", ""),
]
rows.append(row)
return [head] + rows
def to_df(self, flat=False):
rows = self.to_rows()
df = pd.DataFrame(rows[1:], columns=rows[0]) # .set_index('iter')
df["start"] = | pd.to_datetime(df["start"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
import six
import re
import os
from pandas.api.types import is_scalar, is_integer
import numpy as np
import pandas as pd
import h5py
def partition(start, stop, step):
"""Partition an integer interval into equally-sized subintervals.
Like builtin :py:func:`range`, but yields pairs of end points.
Examples
--------
>>> for lo, hi in partition(0, 9, 2):
print(lo, hi)
0 2
2 4
4 6
6 8
8 9
"""
return ((i, min(i + step, stop)) for i in range(start, stop, step))
def parse_cooler_uri(s):
"""
Parse a Cooler URI string
e.g. /path/to/mycoolers.cool::/path/to/cooler
"""
parts = s.split("::")
if len(parts) == 1:
file_path, group_path = parts[0], "/"
elif len(parts) == 2:
file_path, group_path = parts
if not group_path.startswith("/"):
group_path = "/" + group_path
else:
raise ValueError("Invalid Cooler URI string")
return file_path, group_path
def atoi(s):
return int(s.replace(",", ""))
def parse_humanized(s):
_NUMERIC_RE = re.compile("([0-9,.]+)")
_, value, unit = _NUMERIC_RE.split(s.replace(",", ""))
if not len(unit):
return int(value)
value = float(value)
unit = unit.upper().strip()
if unit in ("K", "KB"):
value *= 1000
elif unit in ("M", "MB"):
value *= 1000000
elif unit in ("G", "GB"):
value *= 1000000000
else:
raise ValueError("Unknown unit '{}'".format(unit))
return int(value)
def parse_region_string(s):
"""
Parse a UCSC-style genomic region string into a triple.
Parameters
----------
s : str
UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA
style sequence names are allowed. End coordinate must be greater than
or equal to start.
Returns
-------
(str, int or None, int or None)
"""
def _tokenize(s):
token_spec = [
("HYPHEN", r"-"),
("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"),
("OTHER", r".+"),
]
tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec)
tok_regex = re.compile(tok_regex, re.IGNORECASE)
for match in tok_regex.finditer(s):
typ = match.lastgroup
yield typ, match.group(typ)
def _check_token(typ, token, expected):
if typ is None:
raise ValueError("Expected {} token missing".format(" or ".join(expected)))
else:
if typ not in expected:
raise ValueError('Unexpected token "{}"'.format(token))
def _expect(tokens):
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["COORD"])
start = parse_humanized(token)
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["HYPHEN"])
typ, token = next(tokens, (None, None))
if typ is None:
return start, None
_check_token(typ, token, ["COORD"])
end = parse_humanized(token)
if end < start:
raise ValueError("End coordinate less than start")
return start, end
parts = s.split(":")
chrom = parts[0].strip()
if not len(chrom):
raise ValueError("Chromosome name cannot be empty")
if len(parts) < 2:
return (chrom, None, None)
start, end = _expect(_tokenize(parts[1]))
return (chrom, start, end)
def parse_region(reg, chromsizes=None):
"""
Genomic regions are represented as half-open intervals (0-based starts,
1-based ends) along the length coordinate of a contig/scaffold/chromosome.
Parameters
----------
reg : str or tuple
UCSC-style genomic region string, or
Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``.
chromsizes : mapping, optional
Lookup table of scaffold lengths to check against ``chrom`` and the
``end`` coordinate. Required if ``end`` is not supplied.
Returns
-------
A well-formed genomic region triple (str, int, int)
"""
if isinstance(reg, six.string_types):
chrom, start, end = parse_region_string(reg)
else:
chrom, start, end = reg
start = int(start) if start is not None else start
end = int(end) if end is not None else end
try:
clen = chromsizes[chrom] if chromsizes is not None else None
except KeyError:
raise ValueError("Unknown sequence label: {}".format(chrom))
start = 0 if start is None else start
if end is None:
if clen is None: # TODO --- remove?
raise ValueError("Cannot determine end coordinate.")
end = clen
if end < start:
raise ValueError("End cannot be less than start")
if start < 0 or (clen is not None and end > clen):
raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end))
return chrom, start, end
def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def argnatsort(array):
array = np.asarray(array)
if not len(array):
return np.array([], dtype=int)
cols = tuple(zip(*(natsort_key(x) for x in array)))
return np.lexsort(cols[::-1])
def read_chromsizes(
filepath_or,
name_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"),
all_names=False,
**kwargs
):
"""
Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC
database, where ``db`` is a genome assembly name.
Parameters
----------
filepath_or : str or file-like
Path or url to text file, or buffer.
name_patterns : sequence, optional
Sequence of regular expressions to capture desired sequence names.
Each corresponding set of records will be sorted in natural order.
all_names : bool, optional
Whether to return all contigs listed in the file. Default is
``False``.
Returns
-------
:py:class:`pandas.Series`
Series of integer bp lengths indexed by sequence name.
References
----------
* `UCSC assembly terminology <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>`_
* `GRC assembly terminology <https://www.ncbi.nlm.nih.gov/grc/help/definitions>`_
"""
if isinstance(filepath_or, six.string_types) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
chromtable = pd.read_csv(
filepath_or,
sep="\t",
usecols=[0, 1],
names=["name", "length"],
dtype={"name": str},
**kwargs
)
if not all_names:
parts = []
for pattern in name_patterns:
part = chromtable[chromtable["name"].str.contains(pattern)]
part = part.iloc[argnatsort(part["name"])]
parts.append(part)
chromtable = pd.concat(parts, axis=0)
chromtable.index = chromtable["name"].values
return chromtable["length"]
def fetch_chromsizes(db, **kwargs):
"""
Download chromosome sizes from UCSC as a :py:class:`pandas.Series`, indexed
by chromosome label.
"""
return read_chromsizes(
"http://hgdownload.cse.ucsc.edu/goldenPath/{}/database/chromInfo.txt.gz".format(
db
),
**kwargs
)
def load_fasta(names, *filepaths):
"""
Load lazy FASTA records from one or multiple files without reading them
into memory.
Parameters
----------
names : sequence of str
Names of sequence records in FASTA file or files.
filepaths : str
Paths to one or more FASTA files to gather records from.
Returns
-------
OrderedDict of sequence name -> sequence record
"""
import pyfaidx
if len(filepaths) == 0:
raise ValueError("Need at least one file")
if len(filepaths) == 1:
fa = pyfaidx.Fasta(filepaths[0], as_raw=True)
else:
fa = {}
for filepath in filepaths:
fa.update(pyfaidx.Fasta(filepath, as_raw=True).records)
records = OrderedDict((chrom, fa[chrom]) for chrom in names)
return records
def binnify(chromsizes, binsize):
"""
Divide a genome into evenly sized bins.
Parameters
----------
chromsizes : Series
pandas Series indexed by chromosome name with chromosome lengths in bp.
binsize : int
size of bins in bp
Returns
-------
bins : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
def _each(chrom):
clen = chromsizes[chrom]
n_bins = int(np.ceil(clen / binsize))
binedges = np.arange(0, (n_bins + 1)) * binsize
binedges[-1] = clen
return pd.DataFrame(
{"chrom": [chrom] * n_bins, "start": binedges[:-1], "end": binedges[1:]},
columns=["chrom", "start", "end"],
)
bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)
bintable["chrom"] = pd.Categorical(
bintable["chrom"], categories=list(chromsizes.index), ordered=True
)
return bintable
make_bintable = binnify
def digest(fasta_records, enzyme):
"""
Divide a genome into restriction fragments.
Parameters
----------
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
enzyme: str
Name of restriction enzyme (e.g., 'DpnII').
Returns
-------
frags : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
try:
import Bio.Restriction as biorst
import Bio.Seq as bioseq
except ImportError:
raise ImportError("Biopython is required to find restriction fragments.")
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError("Unknown enzyme name: {}".format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pd.DataFrame(
{"chrom": [chrom] * n_frags, "start": cuts[:-1], "end": cuts[1:]},
columns=["chrom", "start", "end"],
)
return frags
return pd.concat(map(_each, chroms), axis=0, ignore_index=True)
def get_binsize(bins):
"""
Infer bin size from a bin DataFrame. Assumes that the last bin of each
contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
sizes = set()
for chrom, group in bins.groupby("chrom"):
sizes.update((group["end"] - group["start"]).iloc[:-1].unique())
if len(sizes) > 1:
return None
if len(sizes) == 1:
return next(iter(sizes))
else:
return None
def get_chromsizes(bins):
"""
Infer chromsizes Series from a bin DataFrame. Assumes that the last bin of
each contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
chromtable = (
bins.drop_duplicates(["chrom"], keep="last")[["chrom", "end"]]
.reset_index(drop=True)
.rename(columns={"chrom": "name", "end": "length"})
)
chroms, lengths = list(chromtable["name"]), list(chromtable["length"])
return pd.Series(index=chroms, data=lengths)
def bedslice(grouped, chromsizes, region):
"""
Range query on a BED-like dataframe with non-overlapping intervals.
"""
chrom, start, end = parse_region(region, chromsizes)
result = grouped.get_group(chrom)
if start > 0 or end < chromsizes[chrom]:
lo = result["end"].values.searchsorted(start, side="right")
hi = lo + result["start"].values[lo:].searchsorted(end, side="left")
result = result.iloc[lo:hi]
return result
def asarray_or_dataset(x):
return x if isinstance(x, h5py.Dataset) else np.asarray(x)
def rlencode(array, chunksize=None):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
array = asarray_or_dataset(array)
n = len(array)
if n == 0:
return (
np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=array.dtype),
)
if chunksize is None:
chunksize = n
starts, values = [], []
last_val = np.nan
for i in range(0, n, chunksize):
x = array[i : i + chunksize]
locs = where(x[1:] != x[:-1]) + 1
if x[0] != last_val:
locs = np.r_[0, locs]
starts.append(i + locs)
values.append(x[locs])
last_val = x[-1]
starts = np.concatenate(starts)
lengths = np.diff(np.r_[starts, n])
values = np.concatenate(values)
return starts, lengths, values
def cmd_exists(cmd):
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
def mad(data, axis=None):
return np.median(np.abs(data - np.median(data, axis)), axis)
@contextmanager
def open_hdf5(fp, mode="r", *args, **kwargs):
"""
Context manager like ``h5py.File`` but accepts already open HDF5 file
handles which do not get closed on teardown.
Parameters
----------
fp : str or ``h5py.File`` object
If an open file object is provided, it passes through unchanged,
provided that the requested mode is compatible.
If a filepath is passed, the context manager will close the file on
tear down.
mode : str
* r Readonly, file must exist
* r+ Read/write, file must exist
* a Read/write if exists, create otherwise
* w Truncate if exists, create otherwise
* w- or x Fail if exists, create otherwise
"""
if isinstance(fp, six.string_types):
own_fh = True
fh = h5py.File(fp, mode, *args, **kwargs)
else:
own_fh = False
if mode == "r" and fp.file.mode == "r+":
# warnings.warn("File object provided is writeable but intent is read-only")
pass
elif mode in ("r+", "a") and fp.file.mode == "r":
raise ValueError("File object provided is not writeable")
elif mode == "w":
raise ValueError("Cannot truncate open file")
elif mode in ("w-", "x"):
raise ValueError("File exists")
fh = fp
try:
yield fh
finally:
if own_fh:
fh.close()
class closing_hdf5(h5py.Group):
def __init__(self, grp):
super(closing_hdf5, self).__init__(grp.id)
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.file.close()
def close(self):
self.file.close()
def attrs_to_jsonable(attrs):
out = dict(attrs)
for k, v in attrs.items():
try:
out[k] = v.item()
except ValueError:
out[k] = v.tolist()
except AttributeError:
out[k] = v
return out
def infer_meta(x, index=None): # pragma: no cover
"""
Extracted and modified from dask/dataframe/utils.py :
make_meta (BSD licensed)
Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError(
"Can't handle meta of type " "'{0}'".format(type(x).__name__)
)
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
if hasattr(x, "_meta"):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), " "got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except: # noqa
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def get_meta(
columns, dtype=None, index_columns=None, index_names=None, default_dtype=np.object
): # pragma: no cover
"""
Extracted and modified from pandas/io/parsers.py :
_get_empty_meta (BSD licensed).
"""
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be default_dtype.
dtype = defaultdict(lambda: dtype or default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: default_dtype)
# Convert column indexes to column names.
for k, v in six.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_columns is None or index_columns is False:
index = pd.Index([])
else:
data = [pd.Series([], dtype=dtype[name]) for name in index_names]
if len(data) == 1:
index = pd.Index(data[0], name=index_names[0])
else:
index = pd.MultiIndex.from_arrays(data, names=index_names)
index_columns.sort()
for i, n in enumerate(index_columns):
columns.pop(n - i)
col_dict = {col_name: pd.Series([], dtype=dtype[col_name]) for col_name in columns}
return | pd.DataFrame(col_dict, columns=columns, index=index) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from scipy.special import gamma,gammainc,gammaincc
from scipy.stats import norm
from scipy.optimize import minimize,root_scalar
import networkx as nx
from operator import itemgetter
ep = 1e-80 #For preventing overflow errors in norm.cdf
tref = pd.to_datetime('2020-01-01') #Reference time for converting dates to numbers
################# FORMATTING ########################
def format_JH(url,drop_list,columns):
data = pd.read_csv(url)
if len(columns) == 2:
data[columns[1]] = data[columns[1]].fillna(value='NaN')
data = data.T.drop(drop_list).T.set_index(columns).T
data.index = pd.to_datetime(data.index,format='%m/%d/%y')
return data
def format_kaggle(folder,metric):
data_full = pd.read_csv(folder+'train.csv')
data = data_full.pivot_table(index='Date',columns=['Country_Region','Province_State'],values=metric)
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
return data
def format_predictions(path):
pred = pd.read_csv(path).fillna(value='NaN').set_index(['Country/Region','Province/State'])
for item in ['Nmax','Nmax_low','Nmax_high','sigma','sigma_low','sigma_high']:
pred[item] = pd.to_numeric(pred[item])
for item in ['th','th_low','th_high']:
pred[item] = pd.to_datetime(pred[item],format='%Y-%m-%d')
return pred
def load_sim(path):
data = pd.read_csv(path,index_col=0,header=[0,1])
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
for item in data.keys():
data[item] = pd.to_numeric(data[item])
return data
################# ESTIMATING PARAMETER VALUES ###############
def cbarr(t):
return 1/(np.sqrt(2*np.pi)*(1-norm.cdf(t)+ep))
def tmean(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
def tvar(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return sigma**2*cbarr(-tau)*(np.sqrt(np.pi/2)*(1+np.sign(tau)*gammaincc(3/2,tau**2/2))-cbarr(-tau)*np.exp(-tau**2/2))
def cost_init(params,data,tf):
th,sigma = params
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tvar_sample = (((data.index.values-tmean_sample)**2)*data.values).sum()/data.values.sum()
return (tmean_sample-tmean(tf,params))**2 + (tvar_sample-tvar(tf,params))**2
################### COST FUNCTIONs #################
def cost_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
penalty = (sigma-mean_sigma)**2/(2*var_sigma)
else:
penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
return ((np.log(data.values)-prediction)**2).sum()/2 + penalty
def jac_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
dpenalty = (sigma-mean_sigma)/var_sigma
else:
dpenalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),-err.sum(),(tau*dlogNdt*err).sum()])+np.asarray([0,0,dpenalty])
def hess_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
d2penalty = 1/var_sigma
else:
d2penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt_s = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*(norm.cdf(tau)+ep))
dlogNdth = -dlogNdt_s/sigma
dlogNdlogK = np.ones(len(t))
dlogNdsig = -tau*dlogNdt_s/sigma
d2Ndth2_N = -tau*dlogNdt_s/sigma**2
d2Ndsig2_N = 2*tau*(1-tau**2/2)*dlogNdt_s/(sigma**2)
d2Ndsigdth_N = (1-2*tau**2/2)*dlogNdt_s/sigma**2
term1 = np.asarray([[((-d2Ndth2_N+dlogNdth**2)*err).sum(), 0, ((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum()],
[0, 0, 0],
[((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum(), 0, ((-d2Ndsig2_N+dlogNdsig**2)*err).sum()]])
term2 = np.asarray([[(dlogNdth**2).sum(), (dlogNdth*dlogNdlogK).sum(), (dlogNdth*dlogNdsig).sum()],
[(dlogNdth*dlogNdlogK).sum(), (dlogNdlogK**2).sum(), (dlogNdsig*dlogNdlogK).sum()],
[(dlogNdth*dlogNdsig).sum(), (dlogNdsig*dlogNdlogK).sum(), (dlogNdsig**2).sum()]])
term3 = np.zeros((3,3))
term3[2,2] = d2penalty
return term1 + term2+ term3
def th_err(th,data,sigma,tf):
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tau = (tf-th)/sigma
tmean = -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
return tmean_sample-tmean
def cost_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
return 0.5*((np.log(data.values)-prediction)**2).sum()
def jac_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(np.pi*2)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),
-err.sum()])
################## FITTING #####################
def fit_erf_sig(data,p0=5e2,sigma=7):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
out = root_scalar(th_err,args=(train,sigma,t[-1]),x0=th0,x1=th0+10)
th0 = out.root
tau0 = (t[-1]-th0)/sigma
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
params = [th0,logK0,sigma]
#Train the model
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
out = minimize(cost_p_sig,[th0,logK0],args=(train,sigma),jac=jac_p_sig,method='BFGS')
params = list(out.x)+[sigma,2*out.fun/len(train)]
return params
def fit_erf(data,p0=5e2,verbose=False,prior=None):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
sig0 = np.sqrt(((t-th0).values**2*train.values).sum()/train.values.sum())
tf = t[-1]
if prior is not None:
mean_sigma, var_sigma = prior
lb = mean_sigma-2*np.sqrt(var_sigma)
ub = mean_sigma+2*np.sqrt(var_sigma)
else:
lb = None
ub = None
out = minimize(cost_init,[th0,sig0],args=(train,tf),bounds=((None,None),(lb,ub)))
th0,sig0 = out.x
tau0 = (tf-th0)/sig0
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
#Fit the curve
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
out = minimize(cost_p,[th0,logK0,sig0],args=(train,prior),method='Nelder-Mead')
#Save the parameters and score, and print states
params = list(out.x)+[2*out.fun/len(train)]
if verbose:
print(out)
return params, [th0,logK0,sig0], out.success
def fit_all(data,p0=5e2,plot=False,ylabel=None,prior=None):
params_list = pd.DataFrame(index=data.keys(),columns=['th','logK','sigma','score'])
for item in data.keys():
params_list.loc[item] = [np.nan,np.nan,np.nan,np.nan]
# Only fit regions that have nonzero new cases/fatalities on at least seven days
if (data[item].diff()>1).sum() > 7:
# Only fit regions that have at least five data points after crossing p0
if (data[item]>p0).sum() > 5:
params,params_0,success = fit_erf(data[item],p0=p0,prior=prior)
params_list.loc[item] = params
if plot:
fig,ax,params_good = plot_predictions(data[item],params)
ax.set_title(item)
ax.set_ylabel(ylabel)
ax.set_ylim((10,None))
plt.show()
return params_list.dropna()
################## CONDFIDENCE BOUNDS AND PRIORS ###################
def make_prior(data,params,thresh,plot=False,buffer=0):
params_valid = params.loc[data.iloc[-1]>thresh].replace('NaN',np.nan).dropna().sort_values('sigma')
not_peaked = params_valid['th']>(data.index[-1]-tref+pd.to_timedelta(buffer,unit='days'))/pd.to_timedelta(1,unit='days')
peaked = params_valid['th']<=(data.index[-1]-tref+pd.to_timedelta(buffer,unit='days'))/pd.to_timedelta(1,unit='days')
params_valid = params_valid.loc[peaked]
if plot:
params_valid['sigma'].loc[peaked].plot.hist()
peaked = peaked.loc[peaked].index.tolist()
not_peaked = not_peaked.loc[not_peaked].index.tolist()
return params_valid['sigma'].loc[peaked].mean(), params_valid['sigma'].loc[peaked].var(), peaked, not_peaked
def conf_bounds(t,params,hess_inv):
th,logK,sigma,score = params
lb = []
ub = []
ml = []
for ti in t:
tau = (ti-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*sigma*(norm.cdf(tau)+ep))
dlogNdx = np.asarray([-dlogNdt,1,-tau*dlogNdt])
sigma_pred2 = dlogNdx[np.newaxis,:].dot(hess_inv.dot(dlogNdx)).squeeze()*score
ub.append(np.exp(prediction+2*np.sqrt(sigma_pred2)))
lb.append(np.exp(prediction-2*np.sqrt(sigma_pred2)))
ml.append(np.exp(prediction))
return np.asarray(lb), np.asarray(ml), np.asarray(ub)
def conf_bounds_eig(t,params,hess_inv):
th,logK,sigma,score = params
v,u = np.linalg.eig(hess_inv*score)
sloppy_v = v[0]
sloppy_u = u[:,0]
params_upper = params[:3]+2*sloppy_u*np.sqrt(sloppy_v)
params_lower = params[:3]-2*sloppy_u*np.sqrt(sloppy_v)
tau = (t-th)/sigma
ml = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_lower
tau = (t-th)/sigma
lb = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_upper
tau = (t-th)/sigma
ub = np.exp(logK)*(norm.cdf(tau)+ep)
return lb,ml,ub
def get_sigvar(params,data,p0):
th,logK,sigma0,score0 = params
train = pd.to_numeric(data.loc[data>p0])
train.index=(train.index-tref)/timedelta(days=1)
H = hess_p(params[:-1],train,None)
return np.linalg.inv(H)[2,2]*params[-1]
def sweep_sigma(params,data,p0,sig_bound=30):
th,logK,sigma0,score0 = params
sigvar = get_sigvar(params,data,p0)
if sigvar < 0:
sigvar = 100
params_sweep = []
for sigma in np.logspace(np.log10(np.max([sigma0-4*np.sqrt(sigvar),1])),np.log10(sigma0+sig_bound*np.sqrt(sigvar)),200):
params_sweep.append(fit_erf_sig(data,sigma=sigma,p0=p0))
return np.asarray(params_sweep)
def get_score_thresh(params_sweep,M,c):
sigma = params_sweep[:,2]
dsig = np.diff(sigma)
sigma = sigma[1:]
score = params_sweep[1:,3]
sig_xi2 = np.min(score)
prob = np.exp(-score*M/(2*sig_xi2))/(np.exp(-score*M/(2*sig_xi2))*dsig).sum()
score_set = list(set(score))
score_set.sort()
score_set = np.asarray(score_set)
pcum = np.asarray([(prob[score<=val]*dsig[score<=val]).sum() for val in score_set])
scoremax = score_set[pcum<=c][-1]
return sigma, prob, scoremax
def conf_bounds_sigma(t,params_sweep,M,c):
sigma,prob,scoremax = get_score_thresh(params_sweep,M,c)
params_good = params_sweep[params_sweep[:,3]<=scoremax]
th,logK,sigma = params_good[np.argmin(params_good[:,-1]),:3]
tau = (t-th)/sigma
ml = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_good[0,:3]
tau = (t-th)/sigma
lb = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_good[-1,:3]
tau = (t-th)/sigma
ub = np.exp(logK)*(norm.cdf(tau)+ep)
return lb,ml,ub,params_good
def predict_all(data,params_list,p0=50,c=0.95,verbose=False,th_string=False):
pred_idx = params_list.index.copy()
predictions = []
for item in pred_idx:
if verbose:
print(item[0]+', '+item[1])
#Load the data and best-fit params
train = data[item]
params = params_list.loc[item].copy()
try:
#Fit for a range of sigma values
params_sweep = sweep_sigma(params,train,p0)
sigma,prob,scoremax = get_score_thresh(params_sweep,len(train.loc[train>p0]),c)
params_good = params_sweep[params_sweep[:,3]<=scoremax]
total = np.exp(params_good[:,1])
th = tref+pd.to_timedelta(params_good[:,0],unit='days')
if th_string:
th = [pd.Timestamp.isoformat(th[k])[:10] for k in range(len(params_good))]
sigma = params_good[:,2]
best = np.argmin(params_good[:,-1])
predictions.append([total[best],total[0],total[-1],sigma[best],sigma[0],sigma[-1],th[best],th[0],th[-1]])
except:
if verbose:
print('---------------Failed---------------')
pred_idx = pred_idx.drop(item)
predictions = pd.DataFrame(predictions,index=pred_idx,columns=['Nmax','Nmax_low','Nmax_high','sigma','sigma_low','sigma_high','th','th_low','th_high'])
return predictions
########################### PLOTTING ##########################
def data_collapse(data,params,scale=True,colors=list(sns.color_palette())*10,ax=None,ms=10,
endpoint=False,alpha=1,labels=True):
if ax is None:
fig,ax=plt.subplots(figsize=(4,3))
fig.subplots_adjust(left=0.22,bottom=0.22,right=0.9)
else:
fig = np.nan
k = 0
for item in params.index:
th,logK,sigma = params[['th','logK','sigma']].loc[item]
if th is not 'NaN':
data_plot = data[item].copy()
if scale:
data_plot.index = ((data_plot.index-tref)/pd.to_timedelta(1,unit='days') - th)/sigma
data_plot = data_plot/np.exp(logK)
else:
data_plot.index = (data_plot.index-tref)/ | pd.to_timedelta(1,unit='days') | pandas.to_timedelta |
from typing import List, Tuple, Optional, Union
import numpy as np
import pandas as pd
from chemicalc.reference_spectra import ReferenceSpectra
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import StrMethodFormatter
from matplotlib.lines import Line2D
def plot_gradients(
star: ReferenceSpectra,
inst_name: str,
labels: List[str],
panel_height: float = 3,
panel_width: float = 8,
inset_ylabel: bool = False,
inset_ylabel_xoffset: float = 0,
inset_ylabel_yoffset: float = 0,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = (-0.11, 0.05),
ylim_spec: Tuple[float, float] = (0.35, 1.15),
yticks: Optional[List[float]] = None,
yticks_spec: Optional[List[float]] = None,
xtick_size: float = 18,
ytick_size: float = 18,
xlabel_size: float = 26,
ylabel_size: float = 26,
ylabel_pad: float = 10,
include_spec: bool = True,
) -> plt.figure:
"""
Plot gradients of a spectrum with respect to its stellar labels.
:param ReferenceSpectra star: Reference star object
:param str inst_name: Instrument name
:param List[str] labels: List of labels
:param float panel_height: Height of each subplot
:param float panel_width: Width of figure
:param bool inset_ylabel: Include label as inset annotation instead of in y-axis (better for large #s of labels)
:param float inset_ylabel_xoffset: Relative x position of label annotation
:param float inset_ylabel_yoffset: Relative y position of label annotation
:param Optional[Tuple[float,float]] xlim: Bounds on the x-axis
:param Optional[Tuple[float,float]] ylim: Bounds on the y-axis (for gradients)
:param Tuple[float,float] ylim_spec: Bounds on the y-axis (for the spetrum if included)
:param Optional[List[float]] yticks: Manual y-axis ticks (for gradients)
:param Optional[List[float]] yticks_spec: Manual y-axis ticks (for the spetrum if included)
:param float xtick_size: Fontsize of x-axis tick labels
:param float ytick_size: Fontsize of y-axis tick labels
:param float xlabel_size: Fontsize of x-axis labels
:param float ylabel_size: Fontsize of y-axis labels
:param float ylabel_pad: Pad between placeholder y-axis label and y-axis when using inset_ylabel
:param bool include_spec: Include spectrum in top panel
:return plt.figure: Matplotlib figure
"""
nlabels = len(labels)
if include_spec:
nfigures = nlabels + 1
else:
nfigures = nlabels
wave = star.wavelength[inst_name]
if xlim is None:
xlim = (np.min(wave), np.max(wave))
fig = plt.figure(figsize=(panel_width, panel_height * nfigures))
gs = GridSpec(nfigures, 1)
gs.update(hspace=0.0)
i = 0
if include_spec: # Plot spectrum in top panel
ax = plt.subplot(gs[0, 0])
ax.plot(wave, star.spectra[inst_name][0], c="k", lw=1)
ax.set_xlim(xlim)
ax.set_ylim(ylim_spec)
ax.set_ylabel(r"$f_\mathrm{norm}$", size=ylabel_size)
ax.tick_params(axis="x", labelsize=0)
if yticks_spec is not None:
ax.set_yticks(yticks_spec)
ax.tick_params(axis="y", labelsize=ytick_size)
i += 1
for label in labels: # Plot gradients in individual panels
ax = plt.subplot(gs[i, 0])
ax.plot(star.gradients[inst_name].loc[label], c="k", lw=1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if inset_ylabel: # Include label names as annotations
ax.set_ylabel(
r"$\frac{\partial f}{\partial X}}$",
size=ylabel_size,
rotation=0,
va="center",
labelpad=ylabel_pad,
)
ax.text(
ax.get_xlim()[0] + inset_ylabel_xoffset,
ax.get_ylim()[0] + inset_ylabel_yoffset,
f"[{label}/H]",
fontsize=18,
)
else: # Include label names in y-axis labels
ylabel = (
"$\\frac{\partial f_\mathrm{norm}}{\partial \mathrm{"
+ f"[{label}/H]"
+ "}}$"
)
ax.set_ylabel(fr"{ylabel}", size=ylabel_size)
if yticks is not None:
ax.set_yticks(yticks)
ax.tick_params(axis="y", labelsize=ytick_size)
ax.tick_params(axis="x", labelsize=xtick_size)
i += 1
if i == nfigures:
ax.set_xlabel(r"Wavelength ($\AA$)", size=xlabel_size)
else:
ax.tick_params(axis="x", labelsize=0)
plt.tight_layout()
return fig
def plot_crlb(
crlb_list: Union[pd.DataFrame, List[pd.DataFrame]],
cutoff: Optional[float] = None,
labels: Union[str, List[str]] = None,
label_loc: Tuple[float, float] = (0.98, 0.95),
panel_height: float = 3,
panel_width: float = 8,
cutoff_label_xoffset: float = 3,
cutoff_label_yoffset: float = 0.05,
ylim: Optional[Tuple[float, float]] = (0.009, 1.7),
yticks: Optional[List[float]] = None,
ytick_ndecimal: int = 2,
legend_ncol: int = 1,
legend_loc: str = "lower right",
reverse_legend: bool = False,
color_palette: str = "plasma",
) -> plt.figure:
"""
Plots standard presentation of CRLBs
:param Union[pd.DataFrame,List[pd.DataFrame]] crlb_list: CRLB dataframe or list of CRLB dataframes
:param Optional[float] cutoff: Cutoff precision for abundances
:param Union[str,List[str]] labels: List of additional text to include in each panel.
Must be same length as the number of CRLB dataframes
:param Tuple[float,float] label_loc: Location of additional text box
:param float panel_height: Height of each subplot
:param float panel_width: Width of each subplot
:param float cutoff_label_xoffset: Relative x position of cutoff label (increases to the left)
:param float cutoff_label_yoffset: Relative y position of cutoff label
:param Optional[Tuple[float,float]] ylim: Bound on y-axis
:param Optional[List[float]] yticks: Manual y-axis ticks.
Helpful when log-spacing yields only one tick on the y-axis.
:param int ytick_ndecimal: Number of decimal places to include in y-axis ticks.
:param int legend_ncol: Number of legend columns
:param str legend_loc: Location of legend (standard matplotlib inputs)
:param bool reverse_legend: Reverse order of legend items
:param str color_palette: Color palette of lines and markers (standard matplotlib selection)
:return plt.figure: Matplotlib figure
"""
if type(crlb_list) is not list:
crlb_list = [crlb_list]
# Sort sets of CRLBs
# ToDo: Thoroughly check that sorting works as intended
order = np.argsort([-len(crlb.index) for crlb in crlb_list])
sorted_crlb_list = [crlb_list[i] for i in order]
all_crlb = pd.concat(sorted_crlb_list, axis=1, sort=False)
all_labs = all_crlb.index
all_cols = all_crlb.columns
nlabs = all_labs.shape[0]
npanels = len(crlb_list)
# Initialize Figure
fig = plt.figure(figsize=(panel_width, panel_height * npanels))
gs = GridSpec(npanels, 1)
gs.update(hspace=0.0)
# Iterate through panels
for i, crlb in enumerate(crlb_list):
if i == 0:
ax = plt.subplot(gs[i, 0])
else:
ax = plt.subplot(gs[i, 0], sharex=ax)
labs = all_crlb.index
cols = crlb.columns
crlb_sorted = crlb.reindex(labs)
c = plt.cm.get_cmap(color_palette, len(cols))
# Iterate through CRLBs w/in panel
for j, col in enumerate(
all_cols
): # Placeholder to make x-axis match between panels
mask = np.isfinite(all_crlb.iloc[:, j].values)
ax.plot(
all_crlb.iloc[:, j].index[mask],
all_crlb.iloc[:, j].values[mask],
marker="",
markersize=0,
linestyle="",
linewidth=0,
)
for j, col in enumerate(crlb):
mask = np.isfinite(crlb_sorted.loc[:, col].values)
plt.plot(
crlb_sorted.loc[:, col].index[mask],
crlb_sorted.loc[:, col].values[mask],
marker="o",
markersize=8,
markeredgewidth=1,
linestyle="-",
linewidth=1,
color=c(j),
markeredgecolor="k",
label=col,
)
# Plot cutoff line
if cutoff:
ax.axhline(cutoff, ls="--", lw=1, c="k")
plt.text(
s=f"{cutoff:01.1f} dex",
x=nlabs - cutoff_label_xoffset,
y=cutoff + cutoff_label_yoffset,
fontsize=12,
)
# Axes
ax.set_ylabel(r"$\sigma$[X/H]", size=16)
# ToDo: replace StrMethodFormatter with FuncFormatter
ax.yaxis.set_major_formatter(
StrMethodFormatter("{x:." + f"{ytick_ndecimal}" + "f}")
)
ax.set_xlim(-0.5, nlabs - 0.5)
ax.set_ylim(ylim)
ax.set_yscale("log")
plt.grid(True, "both", "both")
if i == npanels - 1:
ax.tick_params(axis="x", which="major", rotation=-45)
else:
ax.tick_params(axis="x", labelsize=0)
for j, label in enumerate(ax.get_xticklabels()):
label.set_horizontalalignment("left")
# ToDo: replace StrMethodFormatter with FuncFormatter
ax.yaxis.set_major_formatter(
StrMethodFormatter("{x:." + f"{ytick_ndecimal}" + "f}")
)
# Add Label
if labels is not None:
if type(labels) is not list:
labels = [labels]
plt.text(
label_loc[0],
label_loc[1],
s=labels[i],
fontsize=10,
horizontalalignment="right",
verticalalignment="top",
transform=ax.transAxes,
bbox=dict(fc="white", ec="black", lw=1, pad=5.0),
)
# Legend
plt.legend(fontsize=10, ncol=legend_ncol, loc=legend_loc)
if reverse_legend:
leg_handles, leg_labels = fig.axes[i].get_legend_handles_labels()
fig.axes[i].legend(
leg_handles[::-1],
leg_labels[::-1],
fontsize=10,
ncol=legend_ncol,
loc=legend_loc,
)
if yticks is not None:
fig.axes[i].set_yticks(yticks)
plt.tight_layout()
return fig
def overplot_crlb(
crlb_list: List[pd.DataFrame],
names: List[str],
cutoff: Optional[float] = None,
labels: Union[str, List[str]] = None,
label_loc: Tuple[float, float] = (0.98, 0.95),
panel_height: float = 3,
panel_width: float = 8,
cutoff_label_xoffset: float = 3,
cutoff_label_yoffset: float = 0.05,
ylim: Optional[Tuple[float, float]] = (0.009, 1.7),
yticks: Optional[List[float]] = None,
legend_ncol: int = 1,
legend_loc: str = "lower right",
reverse_legend: bool = False,
legend2_ncol: int = 1,
legend2_loc: Tuple[float, float] = (1, 0.425),
reverse_legend2: bool = False,
color_palette: str = "plasma",
) -> plt.figure:
"""
Overplots two groups of CRLBs using different line styles and marker shapes
:param List[pd.DataFrame] crlb_list: List of CRLB dataframes
:param List[str] names: Labels to show in second legend
:param Optional[float] cutoff: Cutoff precision for abundances
:param Union[str,List[str]] labels: List of additional text to include in each panel
:param Tuple[float,float] label_loc: Location of additional text box
:param float panel_height: Height of each subplot
:param float panel_width: Width of each subplot
:param float cutoff_label_xoffset: Relative x position of cutoff label (increases to the left)
:param float cutoff_label_yoffset: Relative y position of cutoff label
:param Optional[Tuple[float,float]] ylim: Bound on y-axis
:param Optional[List[float]] yticks: Manual y-axis ticks
:param int legend_ncol: Number of legend columns
:param str legend_loc: Location of legend (standard matplotlib inputs)
:param bool reverse_legend: Reverse order of legend items
:param int legend2_ncol: Number of legend columns for second legend
:param Tuple[float, float] legend2_loc: Location of legend for second legend (axis coords)
:param bool reverse_legend2: Reverse order of legend items for second legend
:param str color_palette: Color palette of lines and markers
:return plt.figure: Matplotlib figure
"""
# ToDo: Thoroughly check that sorting works as intended
# Determin CRLBs with most labels
lead_crlb = np.argmax([len(crlb.index) for crlb in crlb_list])
all_labs = crlb_list[lead_crlb].index
nlabs = all_labs.shape[0]
# Initialize Figure
fig = plt.figure(figsize=(panel_width, panel_height))
gs = GridSpec(1, 1)
gs.update(hspace=0.0)
ax = plt.subplot(gs[0, 0])
c = plt.cm.get_cmap(color_palette, np.max([crlb.shape[1] for crlb in crlb_list]))
lines = ["-", "--", ":", "-."]
markers = ["s", "o", "^", "*"]
# Iterate through panels
for i, crlb in enumerate(crlb_list):
labs = crlb.index
cols = crlb_list[i].columns
# Iterate through CRLBs w/in set
for j, col in enumerate(cols):
if i == 0:
label = col
else:
label = "_nolegend_"
mask = | pd.notnull(crlb[col].loc[labs].values) | pandas.notnull |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
msg = "Length of ambiguous bool-array must be the same size as vals"
with pytest.raises(Exception, match=msg):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
msg = (
"The nonexistent argument must be one of "
"'raise', 'NaT', 'shift_forward', 'shift_backward' "
"or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, "+07:00")
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")
dr2 = DatetimeIndex(list(dr), name="foo", freq="D")
tm.assert_index_equal(dr, dr2)
def test_dti_construction_univalent(self):
rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern")
rng2 = DatetimeIndex(data=rng, tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_constructors(self, tzstr):
"""Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx2 = idx2._with_freq(None) # the others all have freq=None
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_timetz_accessor(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_dti_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H")
assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T")
assert t4.tz_convert(tz="UTC").freq == t4.freq
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
freq = "15min"
start = Timestamp("201710290100", tz=tz)
end = Timestamp("201710290300", tz=tz)
index = pd.date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
"201710290115",
"201710290130",
"201710290145",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290300",
],
tz=tz,
freq=freq,
ambiguous=[
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
)
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
def test_date_range_localize(self):
rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
rng3 = date_range("3/11/2012 03:00", periods=15, freq="H")
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng._with_freq(None), rng3)
# DST transition time
val = rng[0]
exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(
["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H"
)
tm.assert_index_equal(rng, rng2)
exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp("3/11/2012 01:00", tz="US/Eastern")
assert exp.hour == 1
assert rng[1] == exp
rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern")
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
utc_range = date_range("1/1/2000", periods=20, tz="UTC")
eastern_range = utc_range.tz_convert("US/Eastern")
berlin_range = utc_range.tz_convert("Europe/Berlin")
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_dti_intersection(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_dti_equals_with_tz(self):
left = date_range("1/1/2011", periods=100, freq="H", tz="utc")
right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern")
assert not left.equals(right)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_nat(self, tzstr):
idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_astype_asobject_tzinfos(self, tzstr):
# GH#1345
# dates around a dst transition
rng = date_range("2/13/2010", "5/6/2010", tz=tzstr)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_with_timezone_repr(self, tzstr):
rng = date_range("4/13/2010", "5/6/2010")
rng_eastern = rng.tz_localize(tzstr)
rng_repr = repr(rng_eastern)
assert "2010-04-13 00:00:00" in rng_repr
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_take_dont_lose_meta(self, tzstr):
rng = date_range("1/1/2000", periods=20, tz=tzstr)
result = rng.take(range(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_utc_box_timestamp_and_localize(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr(
rng_eastern[0].tzinfo
)
def test_dti_to_pydatetime(self):
dt = dateutil.parser.parse("2012-06-13T01:39:00Z")
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_dti_to_pydatetime_fizedtz(self):
dates = np.array(
[
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
)
dti = DatetimeIndex(dates)
result = dti.to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = dti._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")])
def test_with_tz(self, tz):
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=pd.offsets.Hour())
assert dr.tz is pytz.utc
# DateRange with naive datetimes
dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc)
dr = bdate_range("1/1/2005", "1/1/2009", tz=tz)
# normalized
central = dr.tz_convert(tz)
assert central.tz is tz
naive = central[0].to_pydatetime().replace(tzinfo=None)
comp = conversion.localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# compare vs a localized tz
naive = dr[0].to_pydatetime().replace(tzinfo=None)
comp = conversion.localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# datetimes with tzinfo set
dr = bdate_range(
datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc)
)
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(Exception, match=msg):
bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_field_access_localize(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range(
"2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan"
)
expected = Index(np.arange(10, dtype=np.int64))
tm.assert_index_equal(dr.hour, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_convert_tz_aware_datetime_datetime(self, tz):
# GH#1581
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]
dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates]
result = DatetimeIndex(dates_aware)
assert timezones.tz_compare(result.tz, tz)
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is pytz.utc
def test_dti_union_aware(self):
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern")
result = rng.union(rng2)
expected = rng.astype("O").union(rng2.astype("O"))
tm.assert_index_equal(result, expected)
assert result[0].tz.zone == "US/Central"
assert result[-1].tz.zone == "US/Eastern"
def test_dti_union_mixed(self):
# GH 21671
rng = DatetimeIndex([ | Timestamp("2011-01-01") | pandas.Timestamp |
#!/usr/bin/env python
import os, sys
import pandas as pd
from pdb import set_trace
strProjectList = sys.argv[1]
#strProjectList = 'Project_list2.txt'
def Summation_all_final_result():
with open(strProjectList) as Input:
listdfResult = []
for i, strSample in enumerate(Input):
#print(strSample)
#if i == 2: break
strSample = strSample.replace('\n','').replace('\r','').strip()
strFinalResultDir = './Output/%s/Summary/Merge_target_result/' % strSample
for j, strFinalResultFile in enumerate(os.listdir(strFinalResultDir)):
if j > 0:
print('I expected one file, but there are more. check the target base change file')
sys.exit(1)
print(strFinalResultFile)
strFinalResultPath = './Output/%s/Summary/Merge_target_result/%s' % (strSample, strFinalResultFile)
listdfResult.append(pd.read_table(strFinalResultPath, low_memory=False))
dfAll = pd.concat(listdfResult)
dfForw = dfAll.iloc[:,0:3]
dfReve = dfAll.iloc[:,3:].replace(' ', '0').astype('int64')
dfAllResult = pd.concat([dfForw, dfReve], axis=1).groupby(['Sample','Barcode','Ref']).sum()
dfAllResult.reset_index(inplace=True)
dfAllResult.to_csv('./Output/Summation_'+strProjectList, sep='\t')
#with open('./Output/%s/Summary/Merge_target_result/%s' % (strSample, strFinalResultFile)) as FinalResult:
"""
for strRow in FinalResult:
listCol = strRow.replace('\n','').split('\t')
listSamBarRef = listCol[:3]
= listCol[3:]
"""
def SummationSubIndel():
with open(strProjectList) as Input,\
open('./Output/Summation_' + strProjectList.replace('.txt','') + '_sub_indel.txt', 'w') as Output:
dictResult = {}
for i, strSample in enumerate(Input):
print(strSample)
#if i == 2: break
strSample = strSample.replace('\n','').replace('\r','').strip()
strSubIndelDir = './Output/%s/result' % strSample
for strSubIndelFile in os.listdir(strSubIndelDir):
if 'sub' in strSubIndelFile:
with open(strSubIndelDir + '/' + strSubIndelFile) as SubIndel:
for strRow in SubIndel:
listCol = strRow.replace('\n','').split('\t')
setIndelPattern = set(listCol[3].split(','))
intCount = int(listCol[2])
strNameBarcodePattern = '-'.join(listCol[0:2])+'-'+''.join(setIndelPattern)
try:
dictResult[strNameBarcodePattern] += intCount
except KeyError:
dictResult[strNameBarcodePattern] = intCount
for strNameBarcodePattern, intCount in dictResult.items():
Output.write('\t'.join(strNameBarcodePattern.split('-')) + '\t' + str(intCount) + '\n')
def ConfirmValidation():
with open(strProjectList) as Input:
listdfResult = []
for i, strSample in enumerate(Input):
if i == 2: break
print(strSample)
strSample = strSample.replace('\n','').replace('\r','').strip()
strFinalResultDir = './Output/%s/Summary/Merge_target_result/' % strSample
for strFinalResultFile in os.listdir(strFinalResultDir):
print(strFinalResultFile)
strFinalResultPath = './Output/%s/Summary/Merge_target_result/%s' % (strSample, strFinalResultFile)
listdfResult.append( | pd.read_table(strFinalResultPath, low_memory=False) | pandas.read_table |
import pandas as pd
import numpy as np
import pytest
import unittest
import datetime
import sys
import context
from fastbt.utils import *
def equation(a,b,c,x,y):
return a*x**2 + b*y + c
def test_multiargs_simple():
seq = pd.Series([equation(1,2,3,4,y) for y in range(20, 30)]).sort_index()
seq.index = range(20,30)
constants = {'a':1, 'b':2, 'c':3, 'x':4}
variables = {'y': range(20, 30)}
par = multi_args(equation, constants=constants, variables=variables).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert (x,) == y
def test_multiargs_product():
seq = []
for x in range(0,10):
for y in range(10,15):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 10), range(10, 15)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 10), 'y': range(10,15)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_multiargs_max_limit():
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
assert len(par) == 1000
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
@pytest.mark.parametrize("maxLimit", [2000, 3000, 5000, 10000])
def test_multiargs_max_limit_adjust(maxLimit):
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True, maxLimit=maxLimit).sort_index()
assert len(par) == min(maxLimit, 5000)
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_tick():
assert tick(112.71) == 112.7
assert tick(112.73) == 112.75
assert tick(1054.85, tick_size=0.1) == 1054.8
assert tick(1054.851, tick_size=0.1) == 1054.9
assert tick(104.73, 1) == 105
assert tick(103.2856, 0.01) == 103.29
assert tick(0.007814, 0.001) == 0.008
assert tick(0.00003562, 0.000001) == 0.000036
assert tick(0.000035617, 0.00000002) == 0.00003562
def test_tick_series():
s = pd.Series([100.43, 200.32, 300.32])
result = [100.45, 200.3, 300.3]
for x,y in zip(tick(s), result):
assert x==y
def test_stop_loss():
assert stop_loss(100, 3) == 97
assert stop_loss(100, 3, order='S') == 103
assert stop_loss(1013, 2.5, order='B', tick_size=0.1) == 987.7
assert stop_loss(100, -3) == 103 # This should be depreceated
assert stop_loss(100, -3, order='S') == 97
def test_stop_loss_error():
with pytest.raises(ValueError):
assert stop_loss(100, 3, 'BS')
def test_stop_loss_series():
p = pd.Series([100.75, 150.63, 180.32])
result = [95.71, 143.1, 171.3]
for x,y in zip(stop_loss(p, 5, tick_size=0.01), result):
assert pytest.approx(x, rel=0.001, abs=0.001) == y
# Test for sell
result = [105.79, 158.16, 189.34]
for x,y in zip(stop_loss(p, 5, order='S', tick_size=0.01), result):
assert pytest.approx(x, rel=0.001, abs=0.001) == y
def test_create_orders_simple():
df = pd.DataFrame(np.arange(20).reshape(5,4), columns=list('ABCD'))
orders = create_orders(df, {'A': 'one', 'B': 'two', 'C': 'three', 'D': 'four'},
exchange='NSE', num=range(5))
df['exchange'] = 'NSE'
df['num'] = [0,1,2,3,4]
assert list(orders.columns) == ['one', 'two', 'three', 'four', 'exchange', 'num']
assert list(df.exchange) == ['NSE'] * 5
class TestRecursiveMerge(unittest.TestCase):
def setUp(self):
df1 = pd.DataFrame(np.random.randn(6,3), columns=list('ABC'))
df2 = pd.DataFrame(np.random.randn(10,3), columns=list('DEF'))
df3 = pd.DataFrame(np.random.randn(7,4), columns=list('GHIJ'))
df4 = pd.DataFrame(np.random.randn(10,7), columns=list('AMNDXYZ'))
df1['idx'] = range(100,106)
df2['idx'] = range(100, 110)
df3['idx'] = range(100, 107)
df4['idx'] = range(100, 110)
self.dfs = [df1, df2, df3, df4]
def test_recursive_merge_simple(self):
df = recursive_merge(self.dfs)
assert len(df) == 6
assert df.shape == (6, 21)
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
assert df.iloc[2, 11] == self.dfs[2].iloc[2, 3]
def test_recursive_on(self):
df = recursive_merge(self.dfs, on=['idx'])
assert df.shape == (6, 18)
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
assert df.iloc[2, 11] == self.dfs[3].iloc[2, 0]
def test_recursive_on(self):
dct = {'1': 'D', '2': 'G', '3': 'X'}
df = recursive_merge(self.dfs, on=['idx'], columns=dct)
assert df.shape == (6, 7)
assert list(sorted(df.columns)) == ['A', 'B', 'C', 'D', 'G', 'X', 'idx']
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
def test_get_nearest_option():
assert get_nearest_option(23120) == [23100]
assert get_nearest_option(23120, opt='P') == [23100]
assert get_nearest_option(28427, n=3) == [28400, 28500, 28600]
assert get_nearest_option(28400, n=3) == [28400, 28500, 28600]
assert get_nearest_option(28495, n=5, opt='P') == [28400, 28300, 28200, 28100, 28000]
assert get_nearest_option(3000, n=3, step=30) == [3000, 3030, 3060]
def test_calendar_simple():
s,e = '2019-01-01', '2019-01-10'
for a,b in zip(calendar(s,e), pd.bdate_range(s,e)):
assert a == b
for a,b in zip(calendar(s,e,alldays=True), pd.date_range(s,e)):
assert a == b
def test_calendar_holidays():
s,e,h = '2019-01-01', '2019-01-07', ['2019-01-03', '2019-01-07']
bdays = [pd.to_datetime(dt) for dt in [
'2019-01-01', '2019-01-02', '2019-01-04'
]]
for a,b in zip(calendar(s,e,h), bdays):
assert a == b
days = [pd.to_datetime(dt) for dt in [
'2019-01-01', '2019-01-02', '2019-01-04', '2019-01-05', '2019-01-06'
]]
for a,b in zip(calendar(s,e,h,True), days):
assert a == b
def test_calendar_bdate_timestamp():
s,e,st,et = '2019-01-01', '2019-01-01', '04:00', '18:00'
for a,b in zip(calendar(s,e,start_time=st, end_time=et),
pd.date_range('2019-01-01 04:00', '2019-01-01 18:00', freq='H')):
assert a == b
def test_calendar_timestamp_length():
s,e,st = '2019-01-01', '2019-01-01', '04:00'
assert len(calendar(s,e,start_time=st, freq='1min')) == 1200
assert len(calendar(s,e,start_time=st, freq='H')) == 20
et = '16:00'
assert len(calendar(s,e,end_time=et, freq='1min')) == 961
assert len(calendar(s,e,end_time=et, freq='H')) == 17
assert len(calendar(s,e,start_time=st, end_time=et, freq='1min')) == 721
assert len(calendar(s,e,start_time=st, end_time=et, freq='H')) == 13
def test_calendar_timestamp_position():
s,e,st,et = '2019-01-01', '2019-01-04', '10:00', '18:00'
ts = calendar(s,e,start_time=st, end_time=et, freq='1min')
assert str(ts[721]) == '2019-01-02 14:00:00'
assert str(ts[1000]) == '2019-01-03 10:38:00'
def test_calendar_multiple_days():
s,e,st,et = '2019-01-01', '2019-01-10', '10:00:00', '21:59:59'
kwargs = {'start': s, 'end': e, 'start_time': st, 'end_time': et}
holidays = ['2019-01-04', '2019-01-05', '2019-01-06']
assert len(calendar(**kwargs)) == 8
assert len(calendar(alldays=True, **kwargs)) == 10
assert len(calendar(holidays=holidays, alldays=True, **kwargs)) == 7
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='H')) == 7*12
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='10min')) == 7*12*6
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='s')) == 7*12*3600
class TestGetOHLCIntraday(unittest.TestCase):
def setUp(self):
timestamp = pd.date_range('2019-01-01', freq='15min', periods=480)
dfs = []
for i,s in zip(range(1,4), ['A', 'B', 'C']):
df = pd.DataFrame()
df['open'] = 100*i + np.arange(480)
df['high'] = df['open'] + 3
df['low'] = df['open'] - 3
df['close'] = df['open'] + 1
df['timestamp'] = timestamp
df['symbol'] = s
dfs.append(df)
self.df = pd.concat(dfs).reset_index(drop=True)
def test_simple(self):
df = get_ohlc_intraday(self.df, '13:00', '20:00').sort_index()
assert(len(df) == 15)
idx = pd.IndexSlice
dt = datetime.date(2019,1,1)
assert(df.loc[idx[dt, 'A'], 'open'] == 152)
assert(df.loc[idx[dt, 'A'], 'close'] == 181)
assert(df.loc[idx[dt, 'A'], 'high'] == 183)
assert(df.loc[idx[dt, 'A'], 'low'] == 149)
def test_date_column(self):
df = self.df
df['date_column'] = df.timestamp.dt.date
df = get_ohlc_intraday(self.df, '10:00', '16:00',
date_col='date_column').sort_index()
idx = pd.IndexSlice
assert(df.loc[idx[datetime.date(2019,1,4), 'C'], 'open'] == 628)
assert(df.loc[idx[datetime.date(2019,1,4), 'C'], 'high'] == 655)
def test_column_mappings(self):
df = self.df
df.columns = list('ABCDEF') # renaming columns
df['date_column'] = df.E.dt.date
mappings = {'A': 'open', 'B': 'high', 'C': 'low', 'D': 'close',
'E': 'timestamp', 'F': 'symbol', 'date_column': 'date'}
df = get_ohlc_intraday(self.df, start_time='10:00', end_time='16:00',
date_col='date', col_mappings=mappings).sort_index()
idx = pd.IndexSlice
assert(df.loc[idx[datetime.date(2019,1,4), 'B'], 'open'] == 528)
assert(df.loc[idx[datetime.date(2019,1,4), 'B'], 'low'] == 525)
def test_sort(self):
df = get_ohlc_intraday(self.df.sort_values(by='timestamp'),
'10:00', '16:00', sort=True)
idx = pd.IndexSlice
assert(df.loc[idx[datetime.date(2019,1,4), 'B'], 'close'] == 553)
class TestGetExpandingOHLC(unittest.TestCase):
def setUp(self):
index = | pd.date_range('2019-01-01', freq='H', periods=720) | pandas.date_range |
# Import Libraries
import pandas as pd
import numpy as np
import yfinance as yf
import time
# Import Libraries
from scipy import stats
# import pandas as pd
# import numpy as np
# import yfinance as yf
import matplotlib.pyplot as plt
# import time
# Import Libraries
from ta.momentum import RSIIndicator
from ta.trend import SMAIndicator
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
# import yfinance as yf
import math
class YinsFinancialTools:
"""
Yin's Machine Learning Package for Financial Tools
Copyright © W.Y.N. Associates, LLC, 2009 – Present
"""
# Define function: Yins Timer Algorithm
def Yin_Timer(
start_date = '2015-01-01',
end_date = '2021-01-01',
ticker = 'FB',
rescale = True,
figsize = (15,6),
LB = -1,
UB = +1,
pick_SMA = 1,
sma_threshold_1 = 10,
sma_threshold_2 = 30,
sma_threshold_3 = 100,
plotGraph = True,
verbose = True,
printManual = True,
gotoSEC = True):
if printManual:
print("------------------------------------------------------------------------------")
print("MANUAL: ")
print("Try run the following line by line in a Python Notebook.")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
temp = YinsTimer(
start_date = '2015-01-01',
end_date = '2021-01-01',
ticker = 'FB',
rescale = True,
figsize = (15,6),
LB = -1,
UB = +1,
pick_SMA = 1,
sma_threshold_1 = 10,
sma_threshold_2 = 30,
sma_threshold_3 = 100,
plotGraph = True,
verbose = True,
printManual = True,
gotoSEC = True)
""" )
print("Manual ends here.")
print("------------------------------------------------------------------------------")
# # Initiate Environment
# import pandas as pd
# import numpy as np
# import yfinance as yf
# import time
# Time
start = time.time()
# Get Data
dta = yf.download(ticker, start_date, end_date)
dta_stock = pd.DataFrame(dta)
# Define Checking Functions:
if LB > 0:
print('Lower Bound (LB) for Signal is not in threshold and is set to default value: -0.01')
LB = -0.01
if UB < 0:
print('Upper Bound (UB) for Signal is not in threshold and is set to default value: +0.01')
UB = +0.01
def chk(row):
if row['aveDIST'] < LB or row['aveDIST'] > UB:
val = row['aveDIST']
else:
val = 0
return val
# Generate Data
df_stock = dta_stock
close = df_stock['Adj Close']
df_stock['Normalize Return'] = close / close.shift() - 1
# Generate Signal:
if len(dta_stock) < 200:
data_for_plot = []
basicStats = []
print('Stock went IPO within a year.')
else:
# Create Features
df_stock['SMA12'] = close.rolling(window=12).mean()
df_stock['SMA20'] = close.rolling(window=20).mean()
df_stock['SMA50'] = close.rolling(window=50).mean()
df_stock['SMA100'] = close.rolling(window=100).mean()
df_stock['SMA200'] = close.rolling(window=200).mean()
df_stock['DIST12'] = close / df_stock['SMA12'] - 1
df_stock['DIST20'] = close / df_stock['SMA20'] - 1
df_stock['DIST50'] = close / df_stock['SMA50'] - 1
df_stock['DIST100'] = close / df_stock['SMA100'] - 1
df_stock['DIST200'] = close / df_stock['SMA200'] - 1
df_stock['aveDIST'] = (df_stock['DIST12'] + df_stock['DIST20'] +
df_stock['DIST50'] + df_stock['DIST100'] + df_stock['DIST200'])/5
df_stock['Signal'] = df_stock.apply(chk, axis = 1)
# Plot
import matplotlib.pyplot as plt
from ta.trend import sma_indicator
if plotGraph:
tickers = ticker
buy_threshold = LB
sell_threshold = UB
# Get Data
stock = dta
# Scale Data
if rescale == False:
smaData1 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_1, True)
smaData2 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_2, True)
smaData3 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_3, True)
else:
smaData1 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_1, True)
smaData2 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_2, True)
smaData3 = stock['Close'] - sma_indicator(stock['Close'], sma_threshold_3, True)
maxDist = max(abs(stock['Close'] - sma_indicator(stock['Close'], sma_threshold_3, True)))
smaData1 = (stock['Close'] - sma_indicator(stock['Close'], sma_threshold_1, True)) / maxDist
smaData2 = (stock['Close'] - sma_indicator(stock['Close'], sma_threshold_2, True)) / maxDist
smaData3 = (stock['Close'] - sma_indicator(stock['Close'], sma_threshold_3, True)) / maxDist
# Conditional Buy/Sell => Signals
conditionalBuy1 = np.where(smaData1 < buy_threshold, stock['Close'], np.nan)
conditionalSell1 = np.where(smaData1 > sell_threshold, stock['Close'], np.nan)
conditionalBuy2 = np.where(smaData2 < buy_threshold, stock['Close'], np.nan)
conditionalSell2 = np.where(smaData2 > sell_threshold, stock['Close'], np.nan)
conditionalBuy3 = np.where(smaData3 < buy_threshold, stock['Close'], np.nan)
conditionalSell3 = np.where(smaData3 > sell_threshold, stock['Close'], np.nan)
# SMA Construction
stock['SMA1'] = smaData1
stock['SMA2'] = smaData2
stock['SMA3'] = smaData3
stock['SMA1_Buy'] = conditionalBuy1
stock['SMA1_Sell'] = conditionalSell1
stock['SMA2_Buy'] = conditionalBuy2
stock['SMA2_Sell'] = conditionalSell2
stock['SMA3_Buy'] = conditionalBuy3
stock['SMA3_Sell'] = conditionalSell3
strategy = "SMA"
title = f'Close Price Buy/Sell Signals using {strategy} {pick_SMA}'
fig, axs = plt.subplots(2, sharex=True, figsize=figsize)
# fig.suptitle(f'Top: {tickers} Stock Price. Bottom: {strategy}')
if pick_SMA == 1:
if not stock['SMA1_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['SMA1_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['SMA1_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['SMA1_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
elif pick_SMA == 2:
if not stock['SMA2_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['SMA2_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['SMA2_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['SMA2_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
elif pick_SMA == 3:
if not stock['SMA3_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['SMA3_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['SMA3_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['SMA3_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
else:
if not stock['SMA1_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['SMA1_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['SMA1_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['SMA1_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
# plt.xticks(rotation=45)
axs[0].set_title(title)
axs[0].set_ylabel('Close Price', fontsize=10)
axs[0].legend(loc='upper left')
axs[0].grid()
axs[1].plot(stock['SMA1'], label='SMA', color = 'green')
axs[1].plot(stock['SMA2'], label='SMA', color = 'blue')
axs[1].plot(stock['SMA3'], label='SMA', color = 'red')
axs[1].set_ylabel('Price Minus SMA (Rescaled to Max=1)', fontsize=10)
axs[1].set_xlabel('Date', fontsize=18)
axs[1].grid()
# Check Statistics:
SIGNAL = df_stock['Signal']
LENGTH = len(SIGNAL)
count_plus = 0
count_minus = 0
for i in range(LENGTH):
if float(SIGNAL.iloc[i,]) > 0:
count_plus += 1
for i in range(LENGTH):
if float(SIGNAL.iloc[i,]) < 0:
count_minus += 1
basicStats = {'AVE_BUY': round(np.sum(count_minus)/LENGTH, 4),
'AVE_SELL': round(np.sum(count_plus)/LENGTH, 4) }
# Print
if verbose:
print("----------------------------------------------------------------------------------------------------")
print(f"Entered Stock has the following information:")
print(f'Ticker: {ticker}')
print("---")
print(f"Expted Return: {round(np.mean(dta_stock['Normalize Return']), 4)}")
print(f"Expted Risk (Volatility): {round(np.std(dta_stock['Normalize Return']), 4)}")
print(f"Reward-Risk Ratio (Daily Data): {round(np.mean(dta_stock['Normalize Return']) / np.std(dta_stock['Normalize Return']), 4)}")
print("---")
print("Tail of the 'Buy/Sell Signal' dataframe:")
print(pd.DataFrame(stock).tail(3))
print("Note: positive values indicate 'sell' and negative values indicate 'buy'.")
print("---")
print(f"Basic Statistics for Buy Sell Signals: {basicStats}")
print("Note: Change LB and UB to ensure average buy sell signals fall beneath 2%.")
print("---")
url_front = "https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="
url_back = "&type=10-K&dateb=&owner=exclude&count=40"
url_all = str(url_front + ticker + url_back)
print("For annual report on SEC site, please go to: ")
print(url_all)
if gotoSEC:
import webbrowser
webbrowser.open(url_all)
print("----------------------------------------------------------------------------------------------------")
# Get More Data:
tck = yf.Ticker(ticker)
ALL_DATA = {
'get stock info': tck.info,
'get historical market data': tck.history(period="max"),
'show actions (dividends, splits)': tck.actions,
'show dividends': tck.dividends,
'show splits': tck.splits,
'show financials': [tck.financials, tck.quarterly_financials],
'show balance sheet': [tck.balance_sheet, tck.quarterly_balance_sheet],
'show cashflow': [tck.cashflow, tck.quarterly_cashflow],
'show earnings': [tck.earnings, tck.quarterly_earnings],
'show sustainability': tck.sustainability,
'show analysts recommendations': tck.recommendations,
'show next event (earnings, etc)': tck.calendar
}
# Time
end = time.time()
if verbose == True:
print('Time Consumption (in sec):', round(end - start, 2))
print('Time Consumption (in min):', round((end - start)/60, 2))
print('Time Consumption (in hr):', round((end - start)/60/60, 2))
# Return
return {
'data': dta_stock,
'resulting matrix': stock,
'basic statistics': basicStats,
'estimatedReturn': np.mean(dta_stock['Normalize Return']),
'estimatedRisk': np.std(dta_stock['Normalize Return']),
'ALL_DATA': ALL_DATA
}
# End function
# Define Function: RSI Timer
def RSI_Timer(
start_date = '2013-01-01',
end_date = '2019-12-6',
tickers = 'AAPL',
pick_RSI = 1,
rsi_threshold_1 = 10,
rsi_threshold_2 = 30,
rsi_threshold_3 = 100,
buy_threshold = 20,
sell_threshold = 80 ):
print("------------------------------------------------------------------------------")
print("MANUAL: ")
print("Try run the following line by line in a Python Notebook.")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
start_date = '2010-01-01'
end_date = '2020-01-18'
ticker = 'FB'
temp = RSI_Timer(
start_date = '2013-01-01',
end_date = '2019-12-6',
tickers = 'AAPL',
pick_RSI = 1,
rsi_threshold_1 = 10,
rsi_threshold_2 = 30,
rsi_threshold_3 = 100,
buy_threshold = 20,
sell_threshold = 80 )
""" )
print("Manual ends here.")
print("------------------------------------------------------------------------------")
# Get Data
stock = yf.download(tickers, start_date, end_date)
rsiData1 = RSIIndicator(stock['Close'], rsi_threshold_1, True)
rsiData2 = RSIIndicator(stock['Close'], rsi_threshold_2, True)
rsiData3 = RSIIndicator(stock['Close'], rsi_threshold_3, True)
# Conditional Buy/Sell => Signals
conditionalBuy1 = np.where(rsiData1.rsi() < buy_threshold, stock['Close'], np.nan)
conditionalSell1 = np.where(rsiData1.rsi() > sell_threshold, stock['Close'], np.nan)
conditionalBuy2 = np.where(rsiData2.rsi() < buy_threshold, stock['Close'], np.nan)
conditionalSell2 = np.where(rsiData2.rsi() > sell_threshold, stock['Close'], np.nan)
conditionalBuy3 = np.where(rsiData3.rsi() < buy_threshold, stock['Close'], np.nan)
conditionalSell3 = np.where(rsiData3.rsi() > sell_threshold, stock['Close'], np.nan)
# RSI Construction
stock['RSI1'] = rsiData1.rsi()
stock['RSI2'] = rsiData2.rsi()
stock['RSI3'] = rsiData3.rsi()
stock['RSI1_Buy'] = conditionalBuy1
stock['RSI1_Sell'] = conditionalSell1
stock['RSI2_Buy'] = conditionalBuy2
stock['RSI2_Sell'] = conditionalSell2
stock['RSI3_Buy'] = conditionalBuy3
stock['RSI3_Sell'] = conditionalSell3
strategy = "RSI"
title = f'Close Price Buy/Sell Signals using {strategy}'
fig, axs = plt.subplots(2, sharex=True, figsize=(13,9))
# fig.suptitle(f'Top: {tickers} Stock Price. Bottom: {strategy}')
if pick_RSI == 1:
if not stock['RSI1_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['RSI1_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['RSI1_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['RSI1_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
elif pick_RSI == 2:
if not stock['RSI2_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['RSI2_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['RSI2_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['RSI2_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
elif pick_RSI == 3:
if not stock['RSI3_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['RSI3_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['RSI3_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['RSI3_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
else:
if not stock['RSI1_Buy'].isnull().all():
axs[0].scatter(stock.index, stock['RSI1_Buy'], color='green', label='Buy Signal', marker='^', alpha=1)
if not stock['RSI1_Sell'].isnull().all():
axs[0].scatter(stock.index, stock['RSI1_Sell'], color='red', label='Sell Signal', marker='v', alpha=1)
axs[0].plot(stock['Close'], label='Close Price', color='blue', alpha=0.35)
# plt.xticks(rotation=45)
axs[0].set_title(title)
axs[0].set_xlabel('Date', fontsize=18)
axs[0].set_ylabel('Close Price', fontsize=18)
axs[0].legend(loc='upper left')
axs[0].grid()
axs[1].plot(stock['RSI1'], label='RSI', color = 'green')
axs[1].plot(stock['RSI2'], label='RSI', color = 'blue')
axs[1].plot(stock['RSI3'], label='RSI', color = 'red')
return {
"data": stock
}
# Define Function: Recurrent Neural Network Regressor
def RNN_Regressor(
start_date = '2013-01-01',
end_date = '2019-12-6',
tickers = 'AAPL',
numberOfPastDays = 100,
cutoff = 0.8,
numOfHiddenLayer = 3,
l1_units = 50,
l2_units = 50,
l3_units = 50,
l4_units = 30,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = True,
verbose = True ):
if verbose:
print("------------------------------------------------------------------------------")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
tmp = RNN_Regressor(
start_date = '2013-01-01',
end_date = '2021-01-01',
tickers = 'AAPL',
numberOfPastDays = 100,
cutoff = 0.8,
numOfHiddenLayer = 3,
l1_units = 50,
l2_units = 50,
l3_units = 50,
l4_units = 30,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = True,
verbose = True )
# Cite
# All Rights Reserved. © <NAME>
""" )
print("------------------------------------------------------------------------------")
# Initiate Environment
from scipy import stats
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
import time
# Define function
def getDatafromYF(ticker, start_date, end_date):
stockData = yf.download(ticker, start_date, end_date)
return stockData
# End function
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
tickers = [tickers]
# Start with Dictionary (this is where data is saved)
stockData = {}
for i in tickers:
stockData[i] = pd.DataFrame(getDatafromYF(str(i), start_date, end_date))
close = stockData[i]['Adj Close']
stockData[i]['Normalize Return'] = close / close.shift() - 1
# Take a look
# print(stockData[tickers[0]].head(2)) # this is desired stock
# print(stockData[tickers[1]].head(2)) # this is benchmark (in this case, it is S&P 500 SPDR Index Fund: SPY)
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
stockData[tickers[0]].iloc[:, 4].head(3)
data = stockData[tickers[0]].iloc[:, 4:5].values
sc = MinMaxScaler(feature_range = (0, 1))
scaled_dta = sc.fit_transform(data)
scaled_dta = pd.DataFrame(scaled_dta)
training_set = scaled_dta.iloc[0:round(scaled_dta.shape[0] * cutoff), :]
testing_set = scaled_dta.iloc[round(cutoff * scaled_dta.shape[0] + 1):scaled_dta.shape[0], :]
# print(training_set.shape, testing_set.shape)
X_train = []
y_train = []
for i in range(numberOfPastDays, training_set.shape[0]):
X_train.append(np.array(training_set)[i-numberOfPastDays:i, 0])
y_train.append(np.array(training_set)[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
if verbose:
print('--------------------------------------------------------------------')
print('Shape for data frame in training set:')
print('Shape of X:', X_train.shape, '; Shape of Y:', len(y_train))
print('--------------------------------------------------------------------')
X_test = []
y_test = []
for i in range(numberOfPastDays, testing_set.shape[0]):
X_test.append(np.array(testing_set)[i-numberOfPastDays:i, 0])
y_test.append(np.array(testing_set)[i, 0])
X_test, y_test = np.array(X_test), np.array(y_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
if verbose:
print('--------------------------------------------------------------------')
print('Shape for data frame in testing set:')
print('Shape of X:', X_test.shape, ': Shape of Y:', len(y_test))
print('--------------------------------------------------------------------')
### Build RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
import time
# Initialize RNN
begintime = time.time()
regressor = Sequential()
# Design hidden layers
if numOfHiddenLayer == 2:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 3:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 4:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 5:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fifth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l5_units))
regressor.add(Dropout(dropOutRate))
# Adding the output layer
regressor.add(Dense(units = 1))
endtime = time.time()
# Summary
if verbose:
print("--------------------------------------------")
print('Let us investigate the sequential models.')
regressor.summary()
print("--------------------------------------------")
print("Time Consumption (in sec):", endtime - begintime)
print("Time Consumption (in min):", round((endtime - begintime)/60, 2))
print("Time Consumption (in hr):", round((endtime - begintime)/60)/60, 2)
print("--------------------------------------------")
### Train RNN
# Compiling the RNN
start = time.time()
regressor.compile(optimizer = optimizer, loss = loss)
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = epochs, batch_size = batch_size)
end = time.time()
# Time Check
if verbose == True:
print('Time Consumption:', end - start)
### Predictions
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
real_stock_price = np.reshape(y_test, (y_test.shape[0], 1))
real_stock_price = sc.inverse_transform(real_stock_price)
### Performance Visualization
# Visualising the results
import matplotlib.pyplot as plt
if plotGraph:
plt.plot(real_stock_price, color = 'red', label = f'Real {tickers[0]} Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = f'Predicted {tickers[0]} Stock Price')
plt.title(f'{tickers[0]} Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel(f'{tickers[0]} Stock Price')
plt.legend()
plt.show()
import math
from sklearn.metrics import mean_squared_error
rmse = np.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))
if verbose:
print(f'---------------------------------------------------------------------------------')
print(f'Root Mean Square Error is {round(rmse,2)} for test set.')
print(f'------------------')
print(f'Interpretation:')
print(f'------------------')
print(f'On the test set, the performance of this LSTM architecture guesses ')
print(f'{tickers[0]} stock price on average within the error of ${round(rmse,2)} dollars.')
print(f'---------------------------------------------------------------------------------')
# Output
return {
'Information': {
'train set shape': training_set.shape,
'test set shape': testing_set.shape
},
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Test Response': {
'predicted_stock_price': predicted_stock_price,
'real_stock_price': real_stock_price
},
'Test Error': rmse
}
# End function
# Define Function: Recurrent Neural Network: Neural Sequence Translation
def Neural_Sequence_Translation(
start_date = '2013-01-01',
end_date = '2021-01-01',
ticker = 'AAPL',
w = 1,
h = 5,
cutoff = 0.8,
numOfHiddenLayer = 3,
numOfDense = 2,
l1_units = 50,
l2_units = 50,
l3_units = 50,
l4_units = 30,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = True,
useMPLFinancePlot= True,
verbose = True ):
if verbose:
print("------------------------------------------------------------------------------")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
tmp = Neural_Sequence_Translation(
start_date = '2013-01-01',
end_date = '2021-01-01',
ticker = 'AAPL',
w = 1,
h = 5,
cutoff = 0.8,
numOfHiddenLayer = 3,
numOfDense = 2,
l1_units = 50,
l2_units = 50,
l2_units = 50,
l3_units = 50,
l4_units = 30,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
useDice = True,
epochs = 50,
batch_size = 64,
plotGraph = True,
useMPLFinancePlot= True,
verbose = True )
# Cite
# All Rights Reserved. © <NAME>
""" )
print("------------------------------------------------------------------------------")
# libraries
import pandas as pd
import numpy as np
import yfinance as yf
# get data
stockData = yf.download(ticker, start_date, end_date)
stockData = stockData.iloc[:,:5] # omit volume
# create data
Y = stockData.iloc[w::, ]
X = np.arange(0, Y.shape[0]*w*h, 1).reshape(Y.shape[0], w*h)
for i in range(0,int(stockData.shape[0]-w)):
X[i,] = np.array(stockData.iloc[i:(i+w),]).reshape(1, w*h)
X_train = X[0:round(X.shape[0]*cutoff), ]
X_test = X[round(X.shape[0]*cutoff):X.shape[0], ]
y_train = Y.iloc[0:round(Y.shape[0]*cutoff), ]
y_test = Y.iloc[round(Y.shape[0]*cutoff):Y.shape[0], ]
X_train = np.array(X_train).reshape(X_train.shape[0], w, h)
X_test = np.array(X_test).reshape(X_test.shape[0], w, h)
if verbose:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
### Build RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
import time
# Initialize RNN
begintime = time.time()
regressor = Sequential()
# Design hidden layers
if numOfHiddenLayer == 2:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 3:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 4:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 5:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fifth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l5_units))
regressor.add(Dropout(dropOutRate))
# Design dense layers
if numOfDense == 1:
regressor.add(Dense(units = l1_units))
elif numOfDense == 2:
regressor.add(Dense(units = l1_units))
regressor.add(Dense(units = l2_units))
elif numOfDense == 3:
regressor.add(Dense(units = l1_units))
regressor.add(Dense(units = l2_units))
regressor.add(Dense(units = l3_units))
else:
if verbose:
print("Options are 1, 2, or 3. Reset to one dense layer.")
regressor.add(Dense(units = l1_units))
# Adding the output layer
regressor.add(Dense(units = y_train.shape[1]))
endtime = time.time()
# Summary
if verbose:
print("--------------------------------------------")
print('Let us investigate the sequential models.')
regressor.summary()
print("--------------------------------------------")
print("Time Consumption (in sec):", endtime - begintime)
print("Time Consumption (in min):", round((endtime - begintime)/60, 2))
print("Time Consumption (in hr):", round((endtime - begintime)/60)/60, 2)
print("--------------------------------------------")
### Train RNN
# Compiling the RNN
start = time.time()
regressor.compile(optimizer = optimizer, loss = loss)
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = epochs, batch_size = batch_size)
end = time.time()
# Time Check
if verbose == True:
print('Time Consumption:', end - start)
### Predictions
predicted_stock_price = regressor.predict(X_test)
real_stock_price = y_test
# Visualising the results
import matplotlib.pyplot as plt
if plotGraph:
fig, axs = plt.subplots(2, figsize = (10,6))
fig.suptitle(f'Real (Up) vs. Estimate (Down) {ticker} Stock Price')
axs[0].plot(real_stock_price, color = 'red', label = f'Real {ticker} Stock Price')
axs[1].plot(predicted_stock_price, color = 'blue', label = f'Predicted {ticker} Stock Price')
if useMPLFinancePlot:
import pandas as pd
import mplfinance as mpf
predicted_stock_price = pd.DataFrame(predicted_stock_price)
predicted_stock_price.columns = real_stock_price.columns
predicted_stock_price.index = real_stock_price.index
s = mpf.make_mpf_style(base_mpf_style='charles', rc={'font.size': 6})
fig = mpf.figure(figsize=(10, 7), style=s) # pass in the self defined style to the whole canvas
ax = fig.add_subplot(2,1,1) # main candle stick chart subplot, you can also pass in the self defined style here only for this subplot
av = fig.add_subplot(2,1,2, sharex=ax) # volume chart subplot
df1 = real_stock_price
mpf.plot(df1, type='candle', style='yahoo', ax=ax, volume=False)
df2 = predicted_stock_price
mpf.plot(df2, type='candle', style='yahoo', ax=av)
# Output
return {
'Information': {
'explanatory matrix X shape': X.shape,
'response matrix Y shape': Y.shape
},
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': {
'neural sequence translation model': regressor
},
'Test Response': {
'predicted_stock_price': predicted_stock_price,
'real_stock_price': real_stock_price
}
}
# End function
# Define Function: Recurrent Neural Network: Neural Sequence Translation
def Autonomous_Neural_Sequence_Translation(
X = 0,
Y = 0,
w = 1,
h = 5,
cutoff = 0.8,
numOfHiddenLayer = 3,
numOfDense = 2,
l1_units = 128,
l2_units = 64,
l3_units = 32,
l4_units = 16,
l5_units = 10,
dropOutRate = 0.2,
layer_activation = 'relu',
final_activation = 'softmax',
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = False,
useMPLFinancePlot = True,
verbose = True ):
if verbose:
print("------------------------------------------------------------------------------")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
tmp = Autonomous_Neural_Sequence_Translation(
X = X, # explanatory data matrix
Y = Y, # response data matrix
w = 1,
h = 5,
cutoff = 0.8, # take a fraction between 0 and 1
numOfHiddenLayer = 3, # take an integer from 1, 2, 3, 4, or 5
numOfDense = 2, # take an integer from 1, 2, or 3
l1_units = 128,
l2_units = 64,
l3_units = 32,
l4_units = 16,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = False,
useMPLFinancePlot = True,
verbose = True )
# Cite
# All Rights Reserved. © <NAME>
""" )
print("------------------------------------------------------------------------------")
# libraries
import pandas as pd
import numpy as np
import yfinance as yf
# get data
X_train = X[0:round(X.shape[0]*cutoff), ]
X_test = X[round(X.shape[0]*cutoff):X.shape[0], ]
y_train = Y.iloc[0:round(Y.shape[0]*cutoff), ]
y_test = Y.iloc[round(Y.shape[0]*cutoff):Y.shape[0], ]
X_train = np.array(X_train).reshape(X_train.shape[0], w, h)
X_test = np.array(X_test).reshape(X_test.shape[0], w, h)
if verbose:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
### Build RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
import time
# Initialize RNN
begintime = time.time()
regressor = Sequential()
# Design hidden layers
if numOfHiddenLayer == 2:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 3:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 4:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 5:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w, h)))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units, return_sequences = True))
regressor.add(Dropout(dropOutRate))
# Adding a fifth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l5_units))
regressor.add(Dropout(dropOutRate))
# Design dense layers
if numOfDense == 1:
regressor.add(Dense(units = l1_units, activation = layer_activation))
elif numOfDense == 2:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
elif numOfDense == 3:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
elif numOfDense == 4:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
elif numOfDense == 5:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
regressor.add(Dense(units = l4_units, activation = layer_activation))
regressor.add(Dense(units = l5_units, activation = layer_activation))
else:
if verbose:
print("Options are 1, 2, 3, 4, or 5. Reset to one dense layer.")
regressor.add(Dense(units = l1_units, activation = final_activation))
# Adding the output layer
regressor.add(Dense(units = y_train.shape[1]))
endtime = time.time()
# Summary
if verbose:
print("--------------------------------------------")
print('Let us investigate the sequential models.')
regressor.summary()
print("--------------------------------------------")
print("Time Consumption (in sec):", endtime - begintime)
print("Time Consumption (in min):", round((endtime - begintime)/60, 2))
print("Time Consumption (in hr):", round((endtime - begintime)/60)/60, 2)
print("--------------------------------------------")
### Train RNN
# Compiling the RNN
start = time.time()
regressor.compile(optimizer = optimizer, loss = loss)
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = epochs, batch_size = batch_size)
end = time.time()
# Time Check
if verbose == True:
print('Time Consumption:', end - start)
### Predictions
predicted_stock_price = regressor.predict(X_test)
real_stock_price = y_test
# Visualising the results
import matplotlib.pyplot as plt
if plotGraph:
fig, axs = plt.subplots(2, figsize = (10,6))
fig.suptitle(f'Real (Up) vs. Estimate (Down) {ticker} Stock Price')
axs[0].plot(real_stock_price, color = 'red', label = f'Real {ticker} Stock Price')
axs[1].plot(predicted_stock_price, color = 'blue', label = f'Predicted {ticker} Stock Price')
if useMPLFinancePlot:
import pandas as pd
import mplfinance as mpf
predicted_stock_price = pd.DataFrame(predicted_stock_price)
predicted_stock_price.columns = real_stock_price.columns
predicted_stock_price.index = real_stock_price.index
s = mpf.make_mpf_style(base_mpf_style='charles', rc={'font.size': 6})
fig = mpf.figure(figsize=(10, 7), style=s) # pass in the self defined style to the whole canvas
ax = fig.add_subplot(2,1,1) # main candle stick chart subplot, you can also pass in the self defined style here only for this subplot
av = fig.add_subplot(2,1,2, sharex=ax) # volume chart subplot
df1 = real_stock_price
mpf.plot(df1, type='candle', style='yahoo', ax=ax, volume=False)
df2 = predicted_stock_price
mpf.plot(df2, type='candle', style='yahoo', ax=av)
# Output
return {
'Information': {
'explanatory matrix X shape': X.shape,
'response matrix Y shape': Y.shape
},
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': {
'neural sequence translation model': regressor
},
'Test Response': {
'predicted_stock_price': predicted_stock_price,
'real_stock_price': real_stock_price
}
}
# End function
# Define Function: Recurrent Neural Network: Neural Sequence Translation
def Embedding_Neural_Sequence_Translation(
X = 0,
Y = 0,
w = 1,
h = 5,
cutoff = 0.8,
max_len = 1000,
output_dim = 5,
numOfHiddenLayer = 3,
numOfDense = 2,
l1_units = 128,
l2_units = 64,
l3_units = 32,
l4_units = 16,
l5_units = 10,
dropOutRate = 0.2,
layer_activation = 'relu',
final_activation = 'softmax',
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = False,
useMPLFinancePlot = True,
verbose = True ):
if verbose:
print("------------------------------------------------------------------------------")
print(
"""
MANUAL: To install this python package, please use the following code.
# In a python notebook:
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# In a command line:
# pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
# Run
tmp = Embedding_Neural_Sequence_Translation(
X = X, # explanatory data matrix
Y = Y, # response data matrix
w = 1,
h = 5,
cutoff = 0.8, # take a fraction between 0 and 1
max_len = 1000,
output_dim = 5,
numOfHiddenLayer = 3, # take an integer from 1, 2, 3, 4, or 5
numOfDense = 2, # take an integer from 1, 2, or 3
l1_units = 128,
l2_units = 64,
l3_units = 32,
l4_units = 16,
l5_units = 10,
dropOutRate = 0.2,
optimizer = 'adam',
loss = 'mean_squared_error',
epochs = 50,
batch_size = 64,
plotGraph = False,
useMPLFinancePlot = True,
verbose = True )
# Cite
# All Rights Reserved. © <NAME>
""" )
print("------------------------------------------------------------------------------")
# libraries
import pandas as pd
import numpy as np
import yfinance as yf
# get data
X_train = X[0:round(X.shape[0]*cutoff), ]
X_test = X[round(X.shape[0]*cutoff):X.shape[0], ]
y_train = Y.iloc[0:round(Y.shape[0]*cutoff), ]
y_test = Y.iloc[round(Y.shape[0]*cutoff):Y.shape[0], ]
X_train = np.array(X_train).reshape(X_train.shape[0], w*h) # dim would be 1, w*h if Embedding is used
X_test = np.array(X_test).reshape(X_test.shape[0], w*h)
if verbose:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
### Build RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import LayerNormalization
import time
# Initialize RNN
begintime = time.time()
regressor = Sequential()
# Embedding
regressor.add(Embedding(input_dim=max_len, output_dim=output_dim, input_length=w*h))
# Design hidden layers
if numOfHiddenLayer == 2:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w*h, output_dim)))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 3:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w*h, output_dim)))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 4:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w*h, output_dim)))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
elif numOfHiddenLayer == 5:
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l1_units, return_sequences = True, input_shape = (w*h, output_dim)))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l2_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l3_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l4_units, return_sequences = True))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Adding a fifth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = l5_units))
regressor.add(LayerNormalization(axis=1))
regressor.add(Dropout(dropOutRate))
# Design dense layers
if numOfDense == 1:
regressor.add(Dense(units = l1_units, activation = layer_activation))
elif numOfDense == 2:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
elif numOfDense == 3:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
elif numOfDense == 4:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
elif numOfDense == 5:
regressor.add(Dense(units = l1_units, activation = layer_activation))
regressor.add(Dense(units = l2_units, activation = layer_activation))
regressor.add(Dense(units = l3_units, activation = layer_activation))
regressor.add(Dense(units = l4_units, activation = layer_activation))
regressor.add(Dense(units = l5_units, activation = layer_activation))
else:
if verbose:
print("Options are 1, 2, 3, 4, or 5. Reset to one dense layer.")
regressor.add(Dense(units = l1_units, activation = final_activation))
# Adding the output layer
regressor.add(Dense(units = y_train.shape[1]))
endtime = time.time()
# Summary
if verbose:
print("--------------------------------------------")
print('Let us investigate the sequential models.')
regressor.summary()
print("--------------------------------------------")
print("Time Consumption (in sec):", endtime - begintime)
print("Time Consumption (in min):", round((endtime - begintime)/60, 2))
print("Time Consumption (in hr):", round((endtime - begintime)/60)/60, 2)
print("--------------------------------------------")
### Train RNN
# Compiling the RNN
start = time.time()
regressor.compile(optimizer = optimizer, loss = loss)
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = epochs, batch_size = batch_size)
end = time.time()
# Time Check
if verbose == True:
print('Time Consumption:', end - start)
### Predictions
predicted_stock_price = regressor.predict(X_test)
real_stock_price = y_test
# Visualising the results
import matplotlib.pyplot as plt
if plotGraph:
fig, axs = plt.subplots(2, figsize = (10,6))
fig.suptitle(f'Real (Up) vs. Estimate (Down) {ticker} Stock Price')
axs[0].plot(real_stock_price, color = 'red', label = f'Real {ticker} Stock Price')
axs[1].plot(predicted_stock_price, color = 'blue', label = f'Predicted {ticker} Stock Price')
if useMPLFinancePlot:
import pandas as pd
import mplfinance as mpf
predicted_stock_price = | pd.DataFrame(predicted_stock_price) | pandas.DataFrame |
import pandas as pd
import numpy as np
import ROOT as root
import root_pandas
import os
import math
from array import array
from scipy.stats import binned_statistic
from array import array
# load rootlogon
#root.gROOT.Macro( os.path.expanduser( '/nfs/dust/belle2/user/ferber/git/jupyter_nb/libs/rootlogon.C' ) )
#root.gROOT.Macro( os.path.expanduser( 'rootlogon.C' ) )
# default canvas sizes
kCanvasWidth = 700;
kCanvasHeight1 = 500;
kCanvasHeight2 = 600;
# default palette
root.gStyle.SetPalette(112) #kViridis = 112
# ------------------------------------------------------------
def MakeAndFillTGraph(df, varx, vary, color = root.kBlack, markersize=1.0, markerstyle=20, title='graph'):
x = np.array(df[varx],dtype='float64')
y = np.array(df[vary],dtype='float64')
n = len(x)
g = root.TGraph(n, x, y)
g.SetMarkerColor(color)
g.SetLineColor(color)
g.SetMarkerStyle(markerstyle)
g.SetMarkerSize(markersize)
g.SetTitle(title)
return g
# ------------------------------------------------------------
def ScaleTGraphXY(tg, fx=1, fy=1):
n = tg.GetN();
tg_new = root.TGraph(n);
for i in range(n):
x, y = root.Double(0), root.Double(0)
tg.GetPoint(i, x, y)
tg_new.SetPoint(i, x*fx, y*fy)
return tg_new
# ------------------------------------------------------------
# add a column that contains a categorical variables (bincount)
def AddBinColumn(df, xmin, xmax, xbins, var):
if xbins <1:
print('Invalid number of bins', xbins)
return None
binedges = np.linspace(xmin, xmax, num=xbins+1)
binlabels = np.linspace(int(1), int(xbins), num=xbins)
bincol = '%s_%s' % (var, 'bin')
#bincolcount = '%s_%s' % (var, 'count')
df[bincol] = | pd.cut(df[var], bins=binedges, labels=binlabels) | pandas.cut |
import unittest
import pandas as pd
import os
import pytest
import numpy as np
from pandas.testing import assert_frame_equal
import src.eval_graph as eval_graph
class Test_graph_eval(unittest.TestCase):
def test_loadCSV(self):
function_return = eval_graph.loadCSV("both", "test/csv_test/bibleTA_emotion.csv")
base = pd.read_csv("test/csv_test/bibleTA_emotion.csv")
assert_frame_equal(function_return, base)
def test_formatBible(self):
df_bible = pd.read_csv("test/csv_test/test_eval_graph.csv")
function_return = eval_graph.formate_bible(df_bible)
base = pd.read_csv("test/csv_test/test_return_formatBible.csv")
base.drop(["Unnamed: 0"], axis=1, inplace=True)
assert_frame_equal(base, function_return)
def test_distillDataframe(self):
df_bible = | pd.read_csv("test/csv_test/test_eval_graph.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from typing import List, Tuple, Dict
from sklearn.preprocessing import MinMaxScaler
from data_mining import ColorizedLogger
logger = ColorizedLogger('NullsFixer', 'yellow')
class NullsFixer:
__slots__ = ('sort_col', 'group_col')
sort_col: str
group_col: str
cols: List[str] = ['iso_code', 'date', 'daily_vaccinations', 'total_vaccinations',
'people_vaccinated', 'people_fully_vaccinated']
def __init__(self, sort_col: str, group_col: str):
self.sort_col = sort_col
self.group_col = group_col
@staticmethod
def fill_with_population(df: pd.DataFrame, df_meta: pd.DataFrame) -> pd.DataFrame:
def f1(row, col, target_col, multiplier=1):
if pd.isna(row[target_col]):
abs_val = row[col]
ph_val = 100 * abs_val / get_population(df_meta, row['country'])
return_val = round(ph_val, 2) * multiplier
else:
return_val = row[target_col]
return return_val
def get_population(_df, country):
return _df.loc[_df['country'] == country, 'population'].values[0]
df['people_vaccinated_per_hundred'] = df.apply(f1, args=(
'people_vaccinated', 'people_vaccinated_per_hundred'), axis=1)
df['people_fully_vaccinated_per_hundred'] = df.apply(f1, args=(
'people_fully_vaccinated', 'people_fully_vaccinated_per_hundred'), axis=1)
df['total_vaccinations_per_hundred'] = df.apply(f1, args=(
'total_vaccinations', 'total_vaccinations_per_hundred'), axis=1)
df['daily_vaccinations_per_million'] = df.apply(f1, args=(
'daily_vaccinations', 'daily_vaccinations_per_million', 10000), axis=1)
return df
def scale_cols(self, df: pd.DataFrame, cols: List[Tuple], per_group: bool = False) \
-> Tuple[pd.DataFrame, Dict, List[Tuple]]:
def scale_func(group_col, col_name):
# if col.max() > max_val:
scaler_ = MinMaxScaler(feature_range=(0, max_val))
scalers[(col_name, group_col.name)] = scaler_
return scaler_.fit_transform(group_col.astype(float).values.reshape(-1, 1)).reshape(-1)
df_keys = df.copy()[[self.sort_col, self.group_col]]
df_keys = [tuple(x) for x in df_keys.to_numpy()]
scalers = {}
for col, max_val in cols:
# logger.info(f'Scaling "{col}" column in the range: [0, {max_val}]')
if per_group:
df[col] = df.groupby(self.group_col)[col].transform(scale_func, col_name=col)
else:
scaler = MinMaxScaler(feature_range=(0, max_val))
scalers[col] = scaler
df[[col]] = scaler.fit_transform(df[[col]])
return df, scalers, df_keys
def unscale_cols(self, df: pd.DataFrame, cols: List[Tuple], scalers: Dict, df_keys: List[Tuple],
per_group: bool = False) -> pd.DataFrame:
def unscale_func(group_col, col_name):
scaler_ = scalers[(col_name, group_col.name)]
return scaler_.inverse_transform(group_col.astype(float).values.reshape(-1, 1)).reshape(-1)
def fix_negatives(group_col):
min_val = group_col.min()
if min_val < 0:
group_col -= min_val
return group_col
df = df[df[[self.sort_col, self.group_col]].apply(tuple, axis=1).isin(df_keys)]
for col, max_val in cols:
# logger.info(f'Unscaling "{col}" column from the range: [0, {max_val}]')
if per_group:
df[col] = df.groupby(self.group_col)[col].transform(unscale_func, col_name=col)
df[col] = df.groupby(self.group_col)[col].transform(fix_negatives)
else:
scaler = scalers[col]
df[[col]] = scaler.inverse_transform(df[[col]])
return df
def fix_and_infer(self, df: pd.DataFrame) -> pd.DataFrame:
accum_cols = ['people_fully_vaccinated', 'people_vaccinated', 'total_vaccinations']
df = self.fix(df)
for col in accum_cols:
count_nan = len(df[col]) - df[col].count()
if count_nan > 0:
df = self.infer_accum_col(df, col, 'total_vaccinations')
df = self.fix(df)
return df
def fix(self, df: pd.DataFrame) -> pd.DataFrame:
all_cols = df.columns
nulls_prev = df.loc[:, self.cols].isna().sum()
while True:
df = self.fix_people_fully_vaccinated(df)
df = self.fix_people_vaccinated(df)
df = self.fix_total_vaccinations(df)
df = self.fix_daily_vaccinations(df)
nulls = df.loc[:, self.cols].isna().sum()
if nulls.equals(nulls_prev):
break
nulls_prev = nulls
return df.loc[:, all_cols]
def infer_accum_col(self, df: pd.DataFrame, col: str, limit_col: str) -> pd.DataFrame:
def _infer_values(col, col_list, nulls_idx, val, consecutive_nulls, limit_col: pd.Series):
# Get top and bottom non-null values (for this block of consecutive nulls)
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
non_null_val_2 = val
# Calculate avg difference and create whole-number steps
diff = non_null_val_2 - non_null_val_1
whole_step, remainder = divmod(diff, consecutive_nulls + 1)
steps = whole_step * np.ones(consecutive_nulls)
steps[1:int(remainder) + 1] += 1
# Add the avg steps to each null value for this block
for null_ind, step in zip(nulls_idx, steps):
pd_idx_previous = col_list[null_ind - 1][0]
val_to_insert = col[pd_idx_previous] + step
pd_idx_null_current = col_list[null_ind][0]
limit_val = limit_col[pd_idx_null_current]
if val_to_insert > limit_val:
val_to_insert = limit_val
col[pd_idx_null_current] = val_to_insert
return col
def f_cols(col, limit_col: pd.Series):
consecutive_nulls = 0
nulls_idx = []
col_list = [(idx, val) for idx, val in col.items()]
for ind, (pd_ind, val) in enumerate(col_list):
if pd.isna(val):
if ind == 0:
col[pd_ind] = 0.0
else:
consecutive_nulls += 1
nulls_idx.append(ind)
if ind == len(col_list) - 1:
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
mean_step = round(col.mean())
max_val = non_null_val_1 + mean_step * consecutive_nulls
col = _infer_values(col, col_list, nulls_idx, max_val,
consecutive_nulls, limit_col)
else:
if consecutive_nulls > 0:
col = _infer_values(col, col_list, nulls_idx, val,
consecutive_nulls, limit_col)
# Reset
consecutive_nulls = 0
nulls_idx = []
return col
def f_groups(df: pd.DataFrame, col: str, limit_col: str):
df.loc[:, [col]] = df[[col]].apply(f_cols, args=(df[limit_col],), axis=0)
return df
df = df.sort_values(self.sort_col).reset_index(drop=True)
df = df.groupby(df[self.group_col]).apply(f_groups, col, limit_col)
return df
def fix_people_fully_vaccinated(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['total_vaccinations']) and pd.notna(row['people_vaccinated'])
cond_2 = pd.isna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_vaccinated']
else:
row = row['people_fully_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = pd.isna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_fully_vaccinated']
return row
# people_fully_vaccinated = total_vaccinations - people_vaccinated
df.loc[:, 'people_fully_vaccinated'] = df.apply(f1, axis=1)
# If total_vaccinations==0 -> people_fully_vaccinated = 0.0
df.loc[:, 'people_fully_vaccinated'] = df.apply(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(df=df, col='people_fully_vaccinated')
return df
def fix_people_vaccinated(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['total_vaccinations']) and pd.notna(row['people_fully_vaccinated'])
cond_2 = pd.isna(row['people_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_fully_vaccinated']
else:
row = row['people_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = pd.isna(row['people_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_vaccinated']
return row
# people_vaccinated = total_vaccinations - people_fully_vaccinated
df.loc[:, 'people_vaccinated'] = df.apply(f1, axis=1)
# If total_vaccinations==0 -> people_vaccinated = 0.0
df.loc[:, 'people_vaccinated'] = df.apply(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(df, 'people_vaccinated')
return df
@staticmethod
def global_fix(row):
# Setup the conditions
cond_1_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['total_vaccinations'])
cond_1_2 = row['people_vaccinated'] > row['total_vaccinations']
cond_2_1 = pd.notna(row['people_fully_vaccinated']) and pd.notna(row['total_vaccinations'])
cond_2_2 = row['people_fully_vaccinated'] > row['total_vaccinations']
cond_3_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['people_fully_vaccinated']) \
and pd.notna(row['total_vaccinations'])
cond_3_2 = row['people_vaccinated'] + row['people_fully_vaccinated'] \
> row['total_vaccinations']
# Check and fix
if cond_3_1:
if cond_3_2:
row['people_fully_vaccinated'], _ = divmod(row['total_vaccinations'], 2)
row['people_vaccinated'] = row['total_vaccinations'] - row['people_fully_vaccinated']
elif cond_1_1:
if cond_1_2:
row['people_vaccinated'] = row['total_vaccinations']
elif cond_2_1:
if cond_2_2:
row['people_fully_vaccinated'] = row['total_vaccinations']
return row
def fix_total_vaccinations(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['people_fully_vaccinated'])
cond_2 = pd.isna(row['total_vaccinations'])
if cond_1 and cond_2:
row = row['people_vaccinated'] + row['people_fully_vaccinated']
else:
row = row['total_vaccinations']
return row
def f2(row):
cond_1 = pd.notna(row['previous_total_vaccinations']) and pd.notna(
row['daily_vaccinations'])
cond_2 = pd.isna(row['total_vaccinations'])
if cond_1 and cond_2:
row = row['previous_total_vaccinations'] + row['daily_vaccinations']
else:
row = row['total_vaccinations']
return row
def f3(row):
cond_1 = pd.notna(row['next_total_vaccinations']) and \
pd.notna(row['next_daily_vaccinations'])
cond_2 = pd.isna(row['total_vaccinations'])
if cond_1 and cond_2:
row = row['next_total_vaccinations'] - row['next_daily_vaccinations']
else:
row = row['total_vaccinations']
return row
# Sort
df = df.sort_values(self.sort_col).reset_index(drop=True)
# total_vaccinations = people_vaccinated + people_fully_vaccinated
df.loc[:, 'total_vaccinations'] = df.apply(f1, axis=1)
df = df.apply(self.global_fix, axis=1)
# total_vaccinations = previous_total_vaccinations + daily_vaccinations
df['previous_total_vaccinations'] = \
df['total_vaccinations'].groupby(df['iso_code']).shift(1, fill_value=0.0)
df.loc[:, 'total_vaccinations'] = df.apply(f2, axis=1)
df = df.apply(self.global_fix, axis=1)
# total_vaccinations = next_total_vaccinations - next_daily_vaccinations
df['next_total_vaccinations'] = df['total_vaccinations'].groupby(df['iso_code']).shift(-1)
df['next_daily_vaccinations'] = df['daily_vaccinations'].groupby(df['iso_code']).shift(-1)
df.loc[:, 'total_vaccinations'] = df.apply(f3, axis=1)
df = df.apply(self.global_fix, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(df, 'total_vaccinations')
return df
def fix_daily_vaccinations(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['total_vaccinations']) and \
| pd.notna(row['previous_total_vaccinations']) | pandas.notna |
import numpy as np
import pandas as pd
from fuzzywuzzy import process
def compute_output_dates(df, horizon):
input_dates = df.index.values
frequency = pd.infer_freq( | pd.DatetimeIndex(input_dates) | pandas.DatetimeIndex |
#!/usr/bin/env python
#----------------------------------------------------------------------#
'''
A module to analyze token trends on the BSC blockchain.
This is very much a work in progress.
'''
#----------------------------------------------------------------------#
# System Module Imports
import os
import sys
import datetime
import configparser
# Additional Module Imports
import tqdm
import pandas as pd
import requests
# Local Imports
#----------------------------------------------------------------------#
# Read in my API keys from a config file
config = configparser.ConfigParser()
config.read(os.path.join(os.getenv('HOME'), '.config', 'api_keys.ini'))
#----------------------------------------------------------------------#
# BITQUERY API
#----------------------------------------------------------------------#
url_bitquery = 'https://graphql.bitquery.io'
#----------------------------------------------------------------------#
def run_query(query): # A simple function to use requests.post to make the API call.
headers = {'X-API-KEY': config['bitquery']['key']}
request = requests.post(url_bitquery,
json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed and return code is {}. {}'.format(request.status_code, query))
#----------------------------------------------------------------------#
def q_pancake_recent_daily(start):
return '''{
ethereum(network: bsc) {
dexTrades(
options: {limit: 10000, desc: "trades"}
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
quoteCurrency: {is: "0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c"}
) {
timeInterval {
day(count: 1)
}
baseCurrency {
symbol
address
}
baseAmount
quoteCurrency {
symbol
address
}
quoteAmount
trades: count
quotePrice
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (start,)
#----------------------------------------------------------------------#
def q_ohlc_periods(
address,
start,
period= 'minute',
periods_per_candle= 1,
limit_candles= None,
quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Construct a query to obtain OHLC data for a given address.'
# Apply the limit if one was given
limit = (limit_candles is not None) and f'options: {{limit: {limit_candles}, asc: "timeInterval.{period}"}}' or ''
# Now construct and return the query
return '''{
ethereum(network: bsc) {
dexTrades(%s
date: {since: "%s"}
exchangeName: {in: ["Pancake", "Pancake v2"]}
baseCurrency: {is: "%s"}
quoteCurrency: {is: "%s"}
) {
timeInterval {
%s(count: %s)
}
baseCurrency {
symbol
address
}
trades: count
open_price: minimum(of: block, get: quote_price)
high_price: quotePrice(calculate: maximum)
low_price: quotePrice(calculate: minimum)
close_price: maximum(of: block, get: quote_price)
}
}
}
''' % (limit, start, address, quote_address, period, periods_per_candle)
#----------------------------------------------------------------------#
def q_tokens_created(start_time, end_time):
return '''{
ethereum(network: bsc) {
smartContractCalls(
options: {asc: "block.height", limit: 2147483647}
smartContractMethod: {is: "Contract Creation"}
smartContractType: {is: Token}
time: {after: "%s", before: "%s"}
) {
transaction {
hash
}
block {
height
timestamp {
iso8601
}
}
smartContract {
contractType
address {
address
annotation
}
currency {
name
symbol
decimals
tokenType
}
}
caller {
address
}
}
}
}
''' % (start_time, end_time)
#----------------------------------------------------------------------#
def get_recent_tokens(from_days_ago= 5, to_days_ago= 4):
'Find all tokens registered within a given time period.'
# Construct the query
now = datetime.datetime.now()
start = now - datetime.timedelta(days=from_days_ago)
end = now - datetime.timedelta(days= to_days_ago)
query = q_tokens_created(start.isoformat(), end.isoformat())
# Now run the query
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: New tokens query failed with {result["errors"]}')
# Collect info on each new token
new_tokens = [
{
'created' : datetime.datetime.fromisoformat(record['block']['timestamp']['iso8601'].rstrip('Z')),
'owner' : record['caller']['address'],
'address' : record['smartContract']['address']['address'],
'decimals' : record['smartContract']['currency']['decimals'],
'name' : record['smartContract']['currency']['name'],
'symbol' : record['smartContract']['currency']['symbol'],
'tokenType' : record['smartContract']['currency']['tokenType'],
}
for record in result['data']['ethereum']['smartContractCalls']
]
return new_tokens
#----------------------------------------------------------------------#
def float_nan(value):
if value is None:
return float('nan')
return float(value)
#----------------------------------------------------------------------#
def get_ohlc(address, start_time, period= 'minute', periods_per_candle= 1, limit_candles= 24*60, quote_address= '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'):
'Obtain OHLC data on an address.'
# Construct and run a query to get OHLC data
query = q_ohlc_periods(address, start_time, period, periods_per_candle, limit_candles, quote_address)
result = run_query(query)
# Basic error handling
if 'errors' in result:
raise RuntimeError(f'ERROR: OHLC query ({address}, {start_time}, {period}, {periods_per_candle}, {limit_candles}) failed with {result["errors"]}')
trades = result['data']['ethereum']['dexTrades']
times = [pd.Timestamp(trade['timeInterval']['minute']) for trade in trades]
ohlc = [
(
float(trade['open_price']),
(trade['high_price'] is None) and max(float(trade['open_price']),float(trade['close_price'])) or float(trade['high_price']),
(trade['low_price'] is None) and min(float(trade['open_price']),float(trade['close_price'])) or float(trade['low_price' ]),
float(trade['close_price']),
int(trade['trades']),
)
for trade in trades
]
ohlc_df = pd.DataFrame(ohlc, columns= ['open', 'high', 'low', 'close', 'trades'], index= times)
return ohlc_df
#----------------------------------------------------------------------#
class OHLCData(dict):
'''
A class that obtains OHLC data from whatever source, can save/load as JSON, and can update on demand.
It calculates a list of statistical indicators. Supported indicator types are:
ema : exponential moving average - alpha is controlled by number of periods in window
crossover : abs=# periods since the last time val-a went up over val-b, sign=current comparison
'''
start_date = None
data = None
token_address = '<KEY>'
quote_address = '0xe9e7cea3dedca5984780bafc599bd69add087d56'
def __init__(self, token_address= None, quote_address= None, start_date= '2022-02-10', today= False):
self.today = today and 1 or 0
if token_address is not None:
self.token_address = token_address
if quote_address is not None:
self.quote_address = quote_address
self.start_date = start_date
self.otherdata = {}
self.load()
self.retrieve()
return
def __len__(self):
return self.data is not None and len(self.data) or 0
def __contains__(self, key):
if self.data is None:
return False
try:
return len(self.data.loc[key]) > 0
except:
pass
return False
def __getitem__(self, key):
if self.data is None:
raise IndexError('Empty data')
if isinstance(key, slice):
try:
return self.data[key]
except:
pass
try:
return self.data.loc[key]
except:
pass
raise IndexError(f'Unable to process slice [{key}]')
if key in self.data:
return self.data[key]
if key in self.data.index:
return self.data.loc[key]
raise IndexError(f'Unable to process query [{key}]')
def __repr__(self):
return f'OHLCData({repr(self.data)})'
def __str__(self):
return str(self.data)
def save(self, verbose= True):
'Save OHLC data and stats to a file.'
if self.data is None:
return
try:
self.data.to_pickle(f'ohlc_{self.token_address}_{self.quote_address}.pickle'.lower())
if verbose:
print(f'Saved {int(len(self.data) / 1440)} days of OHLC to storage file')
except Exception as err:
print(f'Unable to save storage file: {err}')
return
def load(self, verbose= True):
'Load OHLC data and stats from a file.'
try:
self.data = pd.read_pickle(f'ohlc_{self.token_address}_{self.quote_address}.pickle'.lower())
if verbose:
print(f'Loaded {int(len(self.data) / 1440)} days of OHLC from storage file')
except Exception as err:
print(f'Unable to load storage file: {err}')
return
def retrieve(self):
'Retrieve any missing data, and calculate stats over all data.'
# Figure out what dates we will loop over
date = datetime.date.fromisoformat(self.start_date)
day = datetime.timedelta(days= 1)
now = datetime.datetime.now()
today = now.date()
n_days = self.today + int((today - date) / day)
n_pulled = 0
n_saved = 0
# Include any existing data we may have
if self.data is not None:
frames = [self.data]
else:
frames = []
print('Retrieving data:')
dates = tqdm.tqdm(range(n_days))
for ii in dates:
# Pull each day worth of OHLC from the server
isodate = date.isoformat()
dates.set_description(f'OHLC data [{isodate}] pulled={n_pulled:4} saved={n_saved:4} ')
if isodate not in self or today == date:
frames.append(get_ohlc(self.token_address, isodate, 'minute', 1, 24*60, self.quote_address))
n_pulled += 1
if n_pulled > 27:
self.data = pd.concat(frames)
self.save(False)
frames = [self.data]
n_saved += n_pulled
n_pulled = 0
date += day
# Save the result
if frames:
self.data = | pd.concat(frames) | pandas.concat |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = | pd.Series([1, -3.14, 'apple']) | pandas.Series |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = | to_datetime(self.grouper) | pandas.to_datetime |
#!/usr/bin/env python3
"""Train an XGBoost model for colour segmentation of images.
Segmentation of up to six colours is supported. Fewer number of colours are
possible simply by providing training data for only a subset of the colours.
"""
import argparse
import pathlib
import random
import sys
import time
import cv2 as cv
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from matplotlib import colors
filename2class = {}
filename2class["n"] = "background"
filename2class["y"] = "yellow"
filename2class["r"] = "red"
filename2class["m"] = "magenta"
filename2class["b"] = "blue"
filename2class["c"] = "cyan"
filename2class["g"] = "green"
def class2bgr(pixel_class):
if pixel_class == "background":
color = "black"
else:
color = pixel_class
rgb = np.array(colors.to_rgba(color)[:3])
rgb = (rgb * 255).astype("uint8")
return rgb[::-1]
def compute_identical_fraction(a, b):
assert a.shape == b.shape
identical_fraction = (a == b).sum() / a.size
return identical_fraction
def create_channels(image_bgr):
conversions = {
"hsv": cv.COLOR_BGR2HSV,
"xyz": cv.COLOR_BGR2XYZ,
"LAB": cv.COLOR_BGR2Lab,
# "LUV": cv.COLOR_BGR2Luv,
}
channels = {"bgr"[i]: image_bgr[:, :, i] for i in range(3)}
for key in conversions:
image = cv.cvtColor(image_bgr, conversions[key])
new_channels = {key[i]: image[:, :, i] for i in range(len(key))}
channels = {**channels, **new_channels}
return channels
def create_features(image_bgr, flatten=False):
image_bgr = cv.medianBlur(image_bgr, 7)
channels = create_channels(image_bgr=image_bgr)
if flatten:
channels = {key: channels[key].flatten() for key in channels}
return channels, image_bgr.shape[:2]
def load_segment(path: pathlib.Path, name: str) -> pd.DataFrame:
image = cv.imread(str(path / ("camera" + name[1:])))
features, shape = create_features(image_bgr=image, flatten=True)
data = pd.DataFrame(features)
mask = cv.imread(str(path / name))
mask = mask.sum(axis=2) != 0
mask = mask.flatten()
data = data[mask]
data["class"] = filename2class[name[0]]
return data
def balance_classes(data, background_ratio, random_state):
foreground = data[data["class"] != "background"]
min_class_size = foreground["class"].value_counts().min()
foreground = foreground.groupby("class").apply(
lambda d: d.sample(min_class_size, random_state=random_state)
)
foreground = foreground.reset_index(drop=True)
background = data[data["class"] == "background"]
n_background_points = int(background_ratio * foreground.shape[0])
background = background.sample(
n_background_points, random_state=random_state
)
return pd.concat([foreground, background])
def get_subdirectories(input_path: pathlib.Path):
return [f for f in input_path.iterdir() if f.is_dir()]
def load_images_and_create_data(
input_path: pathlib.Path, output_filename: str
):
# go through the folders and load all the annotated images
# then compute features and create a pandas frame
print("loading images")
data = []
for frame_folder in get_subdirectories(input_path):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
for segment_name in segment_names:
segment_data = load_segment(path=frame_folder, name=segment_name)
segment_data["frame"] = frame_folder.name
data.append(segment_data)
pd_data = pd.concat(data, axis="index")
pd_data.to_pickle(output_filename)
print("done loading images")
def prepare_data(
data: pd.DataFrame, feature_names, train_fraction, background_ratio, seed
):
# create training and test data from entire data frame
# we split according to frames, not single pixels
# to properly test generalization to other frames
frames = sorted(list(set(data["frame"])))
random.seed(seed)
random.shuffle(frames)
n_train_frames = int(len(frames) * train_fraction)
train_frames = frames[:n_train_frames]
test_frames = frames[n_train_frames:]
train_set = data.loc[data["frame"].isin(train_frames)]
train_set = balance_classes(
data=train_set, background_ratio=background_ratio, random_state=seed
)
train_set = train_set.sample(frac=1, random_state=seed)
test_set = data.loc[data["frame"].isin(test_frames)]
test_set = balance_classes(
data=test_set, background_ratio=background_ratio, random_state=seed
)
test_set = test_set.sample(frac=1, random_state=seed)
target = "class"
X_train = train_set[feature_names]
y_train = train_set[target]
X_test = test_set[feature_names]
y_test = test_set[target]
assert not set(train_set["frame"]).intersection(set(test_set["frame"]))
print(train_set["class"].value_counts())
print(test_set["class"].value_counts())
return X_train, y_train, X_test, y_test
def fit_model(X_train, y_train, seed=42):
model = XGBClassifier(
learning_rate=1.0,
n_estimators=1, # only one tree
n_jobs=8,
max_depth=6, # maximum tree depth
random_state=seed,
)
model.fit(X_train, y_train)
return model
def evaluate(model, X, y):
# compute and print fraction of correct labels
# also measure time
print("success rate: ", compute_identical_fraction(model.predict(X), y))
start = time.time()
model.predict(X)
end = time.time()
print("n evaluations: ", X.shape[0])
print("elapsed time: ", end - start)
def load_data_and_fit_model(input_filename, output_filename, feature_names):
print("preparing training data")
data = pd.read_pickle(input_filename)
X_train, y_train, X_test, y_test = prepare_data(
data=data,
feature_names=feature_names,
train_fraction=0.8,
background_ratio=20,
seed=22,
)
print("done preparing training data")
print("fitting model")
model = fit_model(X_train, y_train)
model.save_model(output_filename)
model.get_booster().dump_model(output_filename + "_dump.txt")
print("done fitting model")
print("test data ------------------------")
evaluate(model, X_test, y_test)
print("train data -----------------------")
evaluate(model, X_train, y_train)
def load_model_and_generate_evaluation_images(
model_filename,
input_path: pathlib.Path,
output_path: pathlib.Path,
feature_names,
):
model = XGBClassifier()
model.load_model(model_filename)
for frame_folder in sorted(get_subdirectories(input_path)):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
if len(segment_names) != 0:
continue
for camera_name in ["60", "180", "300"]:
image_name = "camera" + camera_name + ".png"
print(frame_folder / image_name)
image_bgr = cv.imread(str(frame_folder / image_name))
features, shape = create_features(
image_bgr=image_bgr, flatten=True
)
X = | pd.DataFrame(features) | pandas.DataFrame |
"""
Set of utility programs for IRIS.
"""
import os
import re
import io
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from glob import glob
# pylint: disable=F0401,E0611,E1103
from urllib.request import urlopen
from urllib.parse import urljoin, urlparse
from urllib.error import HTTPError, URLError
def iris_timeline_parse(timeline_file):
"""
Parses an IRIS timeline file (SCI format) into a structured array. This
version outputs a strucured array instead of a pandas DataSet.
Parameters
----------
timeline_file - string
Filename with timeline file, or URL to the file.
Returns
-------
result - pandas.DataFrame
DataFrame with timeline.
"""
from sunpy.time import parse_time
data = []
slews = []
curr_slew = np.array([np.nan, np.nan])
line_pat = re.compile('.+OBSID=.+rpt.+endtime', re.IGNORECASE)
slew_pat = re.compile('.+I_EVENT_MESSAGE.+MSG="SLEW*', re.IGNORECASE)
if urlparse(timeline_file).netloc == '': # local file
file_obj = open(timeline_file, 'r')
else: # network location
try:
tmp = urlopen(timeline_file).read()
file_obj = io.StringIO(tmp)
except (HTTPError, URLError):
raise EOFError(('iris_timeline_parse: could not open the '
'following file:\n' + timeline_file))
for line in file_obj:
if slew_pat.match(line):
tmp = line.split('=')[1].replace('"', '').strip('SLEW_').split('_')
curr_slew = np.array(tmp).astype('f')
if line_pat.match(line):
data.append(line.replace('//', '').replace(' x ', ', ').strip())
slews.append(curr_slew) # include most up to date slew
file_obj.close()
if len(data) == 0:
raise EOFError(('iris_timeline_parse: could not find any'
' observations in:\n' + str(timeline_file)))
arr_type = [('date_obs', 'datetime64[us]'), ('date_end', 'datetime64[us]'),
('obsid', 'i8'), ('repeats', 'i4'), ('duration', 'f'),
('size', 'f'), ('description', '|S200'), ('xpos', 'f'),
('ypos', 'f'), ('timeline_name', '|S200')]
result = np.zeros(len(data), dtype=arr_type)
result['timeline_name'] = timeline_file
for i, line in enumerate(data):
date_tmp = line.split()[0]
if date_tmp[-2:] == '60': # deal with non-compliant second formats
date_tmp = date_tmp[:-2] + '59.999999'
result[i]['date_obs'] = parse_time(date_tmp)
tmp = line.replace(' Mbits, end', ', end') # Remove new Mbits size str
tmp = tmp.split('desc=')
result[i]['description'] = tmp[1]
tmp = tmp[0]
tmp = [k.split('=')[-1] for k in ' '.join(tmp.split()[1:]).split(',')]
result[i]['obsid'] = int(tmp[0])
result[i]['repeats'] = int(tmp[1])
result[i]['duration'] = float(tmp[2][:-1])
result[i]['size'] = float(tmp[3])
tmp = tmp[4].split()
result[i]['date_end'] = parse_time(date_tmp[:9] + tmp[-1]) + \
timedelta(days=int(tmp[0].strip('+')))
result[i]['xpos'] = slews[i][0]
result[i]['ypos'] = slews[i][1]
return | pd.DataFrame(result) | pandas.DataFrame |
import os
from unittest import mock
import idelib
import numpy as np
import pandas as pd
import pytest
import hypothesis as hyp
import hypothesis.strategies as hyp_st
import hypothesis.extra.numpy as hyp_np
import endaq.batch.analyzer
from endaq.calc.stats import rms, L2_norm
np.random.seed(0)
@pytest.fixture()
def ide_SSX70065():
with idelib.importFile(os.path.join("tests", "batch", "SSX70065.IDE")) as doc:
yield doc
@pytest.fixture()
def analyzer_raw():
analyzer_mock = mock.create_autospec(
endaq.batch.analyzer.CalcCache, spec_set=False, instance=True
)
analyzer_mock.MPS2_TO_G = endaq.batch.analyzer.MPS2_TO_G
analyzer_mock.MPS_TO_MMPS = endaq.batch.analyzer.MPS_TO_MMPS
analyzer_mock.M_TO_MM = endaq.batch.analyzer.M_TO_MM
analyzer_mock.PV_NATURAL_FREQS = endaq.batch.analyzer.CalcCache.PV_NATURAL_FREQS
return analyzer_mock
@pytest.fixture()
def analyzer_bulk(analyzer_raw):
analyzer_mock = analyzer_raw
analyzer_mock._channels = {
"acc": mock.Mock(axis_names=list("XYZ")),
"gps": mock.Mock(axis_names=["Latitude", "Longitude"]),
"spd": mock.Mock(axis_names=["Ground"]),
"gyr": mock.Mock(axis_names=list("XYZ")),
"mic": mock.Mock(axis_names=["Mic"]),
"tmp": mock.Mock(axis_names=["Control"]),
"pre": mock.Mock(axis_names=["Control"]),
}
analyzer_mock._accelerationFs = 3000
analyzer_mock._accelerationData = pd.DataFrame(
np.random.random((21, 3)),
index=pd.Series(np.arange(21) / 3000, name="time"),
columns=pd.Series(["X", "Y", "Z"], name="axis"),
)
analyzer_mock._accelerationResultantData = analyzer_mock._accelerationData.apply(
L2_norm, axis="columns"
).to_frame()
analyzer_mock._microphoneData = pd.DataFrame(
np.random.random(21),
index=pd.Series(np.arange(21) / 3000, name="time"),
columns=pd.Series(["Mic"], name="axis"),
)
analyzer_mock._velocityData = pd.DataFrame(
np.random.random((21, 3)),
index=pd.Series(np.arange(21) / 3000, name="time"),
columns=pd.Series(["X", "Y", "Z"], name="axis"),
)
analyzer_mock._displacementData = pd.DataFrame(
np.random.random((21, 3)),
index=pd.Series(np.arange(21) / 3000, name="time"),
columns=pd.Series(["X", "Y", "Z"], name="axis"),
)
analyzer_mock._pressureData = pd.DataFrame(
np.random.random(5),
index=pd.Series(np.arange(5) / 5, name="time"),
columns=pd.Series(["Control"], name="axis"),
)
analyzer_mock._temperatureData = pd.DataFrame(
np.random.random(5),
index=pd.Series(np.arange(5) / 5, name="time"),
columns=pd.Series(["Control"], name="axis"),
)
analyzer_mock._gyroscopeData = pd.DataFrame(
np.random.random(11),
index=pd.Series(np.arange(11) / 5, name="time"),
columns=pd.Series(["Gyro"], name="axis"),
)
return analyzer_mock
# ==============================================================================
# Analyzer class tests
# ==============================================================================
class TestAnalyzer:
def test_from_ide_vs_from_literal(self, ide_SSX70065):
dataset = ide_SSX70065
calc_params = endaq.batch.analyzer.CalcParams(
accel_start_time=None,
accel_end_time=None,
accel_start_margin=None,
accel_end_margin=None,
accel_highpass_cutoff=1,
accel_integral_tukey_percent=0,
accel_integral_zero="mean",
psd_freq_bin_width=1,
psd_window="hann",
pvss_init_freq=1,
pvss_bins_per_octave=12,
vc_init_freq=1,
vc_bins_per_octave=3,
)
dataset_cache = endaq.batch.analyzer.CalcCache.from_ide(dataset, calc_params)
raw_cache = endaq.batch.analyzer.CalcCache.from_raw_data(
[
(
endaq.ide.to_pandas(dataset.channels[32], time_mode="timedelta"),
("Acceleration", "g"),
),
(
endaq.ide.to_pandas(
dataset.channels[36].subchannels[0], time_mode="timedelta"
),
("Pressure", "Pa"),
),
(
endaq.ide.to_pandas(
dataset.channels[36].subchannels[1], time_mode="timedelta"
),
("Temperature", "°C"),
),
],
calc_params,
)
assert set(dataset_cache._channels) == set(raw_cache._channels)
for (ds_struct, raw_struct) in (
(dataset_cache._channels[measure_key], raw_cache._channels[measure_key])
for measure_key in dataset_cache._channels
):
assert ds_struct.units == raw_struct.units
pd.testing.assert_frame_equal(
ds_struct.to_pandas(time_mode="timedelta"),
raw_struct.to_pandas(time_mode="timedelta"),
)
@hyp.given(
df=hyp_np.arrays(
elements=hyp_st.floats(-1e7, 1e7),
shape=(20, 2),
dtype=np.float64,
).map(
lambda array: pd.DataFrame(
array, index=np.timedelta64(200, "ms") * np.arange(20)
)
),
)
def test_accelerationData(self, df):
calc_params = endaq.batch.analyzer.CalcParams(
accel_start_time=None,
accel_end_time=None,
accel_start_margin=None,
accel_end_margin=None,
accel_highpass_cutoff=1,
accel_integral_tukey_percent=0,
accel_integral_zero="mean",
psd_freq_bin_width=1,
psd_window="hann",
pvss_init_freq=1,
pvss_bins_per_octave=12,
vc_init_freq=1,
vc_bins_per_octave=3,
)
data_cache = endaq.batch.analyzer.CalcCache.from_raw_data(
[(df, ("Acceleration", "m/s\u00b2"))], calc_params
)
df_accel = endaq.calc.filters.butterworth(
df, low_cutoff=calc_params.accel_highpass_cutoff
)
pd.testing.assert_frame_equal(data_cache._accelerationData, df_accel)
(_df_accel, df_vel, df_displ) = endaq.calc.integrate.integrals(
df_accel,
n=2,
zero=calc_params.accel_integral_zero,
highpass_cutoff=calc_params.accel_highpass_cutoff,
tukey_percent=calc_params.accel_integral_tukey_percent,
)
| pd.testing.assert_frame_equal(data_cache._velocityData, df_vel) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Parsers and downloaders for Bio2BEL PheWAS Catalog."""
from collections import defaultdict
import logging
from typing import Dict
import pandas as pd
from tqdm import tqdm
from bio2bel.downloading import make_zipped_df_getter
from .constants import DATA_PATH, DATA_URL
__all__ = [
'get_df',
'make_dict',
]
ZIP_INTERNAL_PATH = 'phewas-catalog.csv'
get_df = make_zipped_df_getter(DATA_URL, DATA_PATH, ZIP_INTERNAL_PATH, sep=',', header=0)
"""Download the data from Denny JC, *et al.* 2013."""
def make_dict() -> Dict:
"""Convert the data to a dictionary."""
return _make_dict(get_df())
def _make_dict(
df: pd.DataFrame,
use_tqdm: bool = True,
) -> Dict:
_dict = defaultdict(list)
it = df[["snp", 'gene_name', 'phewas phenotype', 'odds-ratio', 'phewas code']].iterrows()
if use_tqdm:
it = tqdm(it, total=len(df.index), desc='PheWAS Catalog - generating Dict')
for i, (snp, gene_symbol, phenotype, odds_ratio, icd_code) in it:
if not snp or not gene_symbol or not phenotype or pd.isna(phenotype):
logging.debug('Skipping', i, snp, gene_symbol, phenotype, odds_ratio)
continue
if | pd.notna(gene_symbol) | pandas.notna |
#IMPORTING REQUIREMENTS
from flask import Flask
from flask import render_template
import pandas as pd
import numpy as np
from flask import request
from flask import redirect
from flask import url_for
import os
import time
import FileWatch
from FileWatch import watch_file
import AnalysisHelper
#ASSINGNING APP AS__NAME_ FOR CALLING TO MAIN
app = Flask(__name__)
#DEFAULT ROUTING
@app.route('/')
#ROUTING TO HOME
@app.route('/Home')
def Home():
print('Home')
return render_template('Home.html')
#ROUTING TO WAW
@app.route('/WAW')
def WAW():
print('Who are we')
return render_template('WAW.html')
#ROUTING TO IDA
@app.route('/IDA', methods=['GET', 'POST'])
def IDA():
print('IDA')
return render_template('IDA.html')
#ROUTING TO IDA PHASE 1
@app.route('/IDA_P1')
def IDA_P1():
print('IDA_P1')
# ---------------------------------------------------------FILE SCREENING-----------------------------------------------
# FILENAME
filename = os.path.join(os.getcwd(), "source\Sample.csv")
# CHECKING THE FILE
Check_Availability = FileWatch.watch_file(filename)
if Check_Availability == True:
print("File Found")
data = | pd.read_csv(filename, sep=',', dtype={}, na_values=['.', '??']) | pandas.read_csv |
"""
Hyperparameter Selection
===========================
This script will show how to perform hyperparameter selection
"""
# %%
import numpy as np
import pandas as pd
from sklearn.utils.fixes import loguniform
from cca_zoo.data import generate_covariance_data
from cca_zoo.model_selection import GridSearchCV, RandomizedSearchCV
from cca_zoo.models import KCCA
# %%
np.random.seed(42)
n = 200
p = 100
q = 100
latent_dims = 1
cv = 3
(X, Y), (tx, ty) = generate_covariance_data(
n, view_features=[p, q], latent_dims=latent_dims, correlation=[0.9]
)
# %%
# Grid Search
# ^^^^^^^^^^^^^
# Hyperparameter selection works in a very similar way to in scikit-learn where the main difference is in how we enter the parameter grid.
# We form a parameter grid with the search space for each view for each parameter.
# This search space must be entered as a list but can be any of
# - a single value (as in "kernel") where this value will be used for each view
# - a list for each view
# - a mixture of a single value for one view and a distribution or list for the other
# %%
param_grid = {"kernel": ["poly"], "c": [[1e-1], [1e-1, 2e-1]], "degree": [[2], [2, 3]]}
kernel_reg = GridSearchCV(
KCCA(latent_dims=latent_dims), param_grid=param_grid, cv=cv, verbose=True
).fit([X, Y])
print( | pd.DataFrame(kernel_reg.cv_results_) | pandas.DataFrame |
import pandas as pd
import psycopg2
import pickle
import numpy as np
import json
from collections import defaultdict
# counterS = 0
# global counterS
# global valGlob
# from sqlalchemy import create_engine
# -*- coding: utf-8 -*-
import os
import sys
import copy
def DFS(G,v,seen=None,path=None):
if seen is None: seen = set()
if path is None: path = [v]
seen.add(v)
paths = []
for t in G[v]:
if t not in seen:
t_path = path + [t]
paths.append(tuple(t_path))
paths.extend(DFS(G, t, seen, t_path))
return paths
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def depthCalculator(fileName):
dictStats = {}
dfClean = pd.read_csv(fileName)
date = fileName.replace('WDHierarchy-', '').replace('.csv', '')
dictStats[date] = {}
# dfClean.drop(['statementid', 'ts', 'revid'], axis = 1, inplace=True)
print(fileName + ' loaded')
dfClean['statvalue'] = dfClean['statvalue'].apply(lambda ni: str(ni))
dfClean['itemid'] = dfClean['itemid'].apply(lambda nu: str(nu))
subClasses = list(dfClean['itemid'].loc[dfClean['statproperty'] == "P279",].unique())
classesList = list(dfClean['statvalue'].unique())
# rootClasses = [x for x in classesList if x not in subClasses]
rootClasses = list(set(classesList) - set(subClasses))
instanceOf = list(dfClean['statvalue'].loc[dfClean['statproperty'] == 'P31',].unique())
# instanceOf = [k for k in instanceOf if k not in rootClasses]
instanceOf = list(set(instanceOf) - set(rootClasses))
leafClasses = list(dfClean['itemid'].loc[(dfClean['statproperty'] == 'P279') & (~dfClean['itemid'].isin(dfClean['statvalue'])),].unique())
shallowClasses = list(dfClean['itemid'].loc[(dfClean['statproperty'] == 'P279') & (~dfClean['itemid'].isin(dfClean['statvalue'])) & (dfClean['statvalue'].isin(rootClasses)),].unique())
# firstSub = list(dfClean['itemid'].loc[(dfClean['statproperty'] == 'P279') & (dfClean['statvalue'].isin(rootClasses)),].unique())
# twoDepth = list(dfClean['itemid'].loc[(dfClean['statproperty'] == 'P279') & (~dfClean['itemid'].isin(dfClean['statvalue'])) & (~dfClean['statvalue'].isin(firstSub)),].unique())
# deepClasses = list(set(twoDepth) - set(shallowClasses))
# leafClasses = set(leafClasses + instanceOf)
classesList += subClasses
# childless classes; reduces computation time for avgDepth
superClasses = list(dfClean['statvalue'].loc[dfClean['statproperty'] == "P279",].unique())
childLessClasses = list(set(rootClasses) - set(superClasses))
###remember to add childLessClasses and shallowClasses!
### Explicit depth
# bibi = dfClean.groupby(['itemid', 'statproperty'])['statvalue'].unique()
print('start computing depth')
bibi = dfClean.loc[dfClean.statproperty == 'P279', ].groupby('itemid')['statvalue'].unique()
#compute depth only for leaf classes whose hierarchy is deeper than 1
deepClasses = list(set(leafClasses) - set(shallowClasses))
fertileRoots = list(set(rootClasses) - set(childLessClasses))
uniqueSuperClasses = bibi.to_frame()
uniqueSuperClasses.reset_index(inplace=True)
# uniqueSuperClasses = uniquePerClass.loc[uniquePerClass['statproperty'] == 'P279',]
if len(uniqueSuperClasses.index) != 0:
# uniqueSuperClasses.drop('statproperty', axis=1, inplace=True)
uniqueSuperClasses['statvalue'] = uniqueSuperClasses['statvalue'].apply(lambda c: c.tolist())
uniqueDict = uniqueSuperClasses.set_index('itemid').T.to_dict('list')
for key in uniqueDict.keys():
uniqueDict[key] = uniqueDict[key][0]
classesDefaultDict = defaultdict(str, uniqueDict)
deepChunks = chunkIt(deepClasses, 5)
colLabels =['length', 'rootItem']
tupleDf = pd.DataFrame(columns=colLabels)
for chunk in deepChunks:
allPaths = [p for ps in [DFS(classesDefaultDict, n) for n in set(chunk)] for p in ps]
print('all depths computed, now cleaning')
tupleList = [(len(p), p[len(p)-1]) for p in allPaths]
tempDf = | pd.DataFrame.from_records(tupleList, columns=colLabels) | pandas.DataFrame.from_records |
import collections
import json
import os
import numpy as np
import tensorflow as tf
from model_data_util.create_tt_data.model_data_convert import convertModelToRawData, convertRawDataToModel
from TTBenchmark.check_environment import check_env_info, in_notebook
from TTBenchmark.constant import GDRIVE_PATH
if in_notebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
class BenchmarkDataMini():
def __init__(self,
raw_model: tf.keras.Model,
actual_tt_mean: float,
actual_tt_median: float,
actual_tt_std: float,
batch_size: int,
validation_split: float,
x_shape: np.array,
verbose=False
):
self.model_info: dict = {
"raw_model": raw_model
}
self.actual_tt: dict = {
"mean": actual_tt_mean,
"median": actual_tt_median,
"std": actual_tt_std
}
self.fit_kwargs: dict = {
"batch_size": batch_size,
"validation_split": validation_split,
"verbose": verbose
}
self.data: dict = {"x_shape": x_shape}
def _ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def _write_json(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
def _get_benchmark_path(gdrive_path, model_type, fname="trained_tt.json"):
env_fname = "_".join(list(check_env_info().values()))
env_path = os.path.join(gdrive_path, env_fname)
actual_tt_json_path = os.path.join(env_path, model_type, fname)
return actual_tt_json_path
def save_benchmark(
benchmarks_mini: list,
columns: list,
model_type: str,
gdrive_path=GDRIVE_PATH,
replace=True
):
"""
save benchmarks into json structure:
model_name1:{
model_df: df,
actual_tt: {mean: , median: , std:}
fit_kwargs: {batch_size: , validation_split: , verbose: }
}
:param benchmarks_mini: list of benchmark_mini
:param columns:
:param model_type:
:param gdrive_path:
:return:
"""
actual_tt_json_path = _get_benchmark_path(gdrive_path, model_type)
_ensure_dir(actual_tt_json_path)
model_index = None
if not os.path.exists(actual_tt_json_path) or replace == True:
actual_tt_json = collections.defaultdict(dict)
_write_json(actual_tt_json, actual_tt_json_path)
model_index = 0
with open(actual_tt_json_path) as f:
actual_tt_json = json.load(f)
for i, bmdatamini in enumerate(tqdm(benchmarks_mini)):
model = bmdatamini.model_info["raw_model"]
actual_tt = bmdatamini.actual_tt
fit_kwargs = bmdatamini.fit_kwargs
x_shape = bmdatamini.data['x_shape']
if model_index is None and i == 1:
all_models = list(actual_tt_json.keys())
if len(all_models) > 0:
model_index = int(max(all_models).split("_")[-1]) + 1
else:
# empty json file
model_index = 0
else:
model_index += 1
model_name = f"{model_type}_{model_index}"
actual_tt_json[model_name] = {}
batch_input_shape = np.array([fit_kwargs["batch_size"], *x_shape[1:]])
num_data = x_shape[0]
item = actual_tt_json[model_name]
item["model_df"] = convertModelToRawData(model, columns, num_data, batch_input_shape,
num_dim=len(
batch_input_shape)).to_dict() # with no out_dim padding
item["actual_tt"] = actual_tt
item["fit_kwargs"] = fit_kwargs
_write_json(actual_tt_json, actual_tt_json_path)
if __name__ == "__main__":
import pickle
import pandas as pd
res = pickle.load(open(
"/Users/wangqiong/Documents/AIpaca/Code/TT Prediction/benchmark/benchmark_lib/local_data/ffnn_data_V100_benchmark.pkl",
"rb"))
benchmarks_mini = []
res['X_df'] = [ | pd.DataFrame(x_df) | pandas.DataFrame |
def pd_seresnext_v3():
import torch
model_name = 'pd_seresnext_v3'
device = 'cuda:1'
torch.backends.cudnn.benchmark = True
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
from skimage.transform import resize
from skimage.color import rgb2gray, gray2rgb
from sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score
from tqdm import tqdm_notebook
import gc
import math
import sys
from fastai import *
from fastai.vision import *
np.random.seed(42)
data_dir = '../input/'
submit_l1_dir = "../submits/"
weights_dir = "../weights/"
results_dir = '../results/'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings'}
def kfold_threshold(y_true, y_pred):
n_classes = len(name_label_dict)
classes_thresholds = []
classes_scores = []
kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=239)
for i in range(n_classes):
kf_class_thresholds = []
if (sum(y_true[:, i]) > 20):
for _, tst_inx in kf.split(y_true, y_true[:, i]):
t_min = np.min(y_pred[tst_inx, i])
t_max = np.max(y_pred[tst_inx, i])
thresholds = np.linspace(t_min, t_max, 50)
scores = np.array([
f1_score(y_true[tst_inx, i], np.int32(y_pred[tst_inx, i] >= threshold)) for threshold in
thresholds
])
threshold_best_index = np.argmax(scores)
kf_class_thresholds.append(thresholds[threshold_best_index])
threshold = np.mean(kf_class_thresholds)
classes_thresholds.append(threshold)
f1 = f1_score(y_true[:, i], np.int32(y_pred[:, i] >= threshold))
classes_scores.append(f1)
else:
t_min = np.min(y_pred[:, i])
t_max = np.max(y_pred[:, i])
thresholds = np.linspace(t_min, t_max, 50)
scores = np.array([
f1_score(y_true[:, i], np.int32(y_pred[:, i] >= threshold)) for threshold in thresholds
])
threshold_best_index = np.argmax(scores)
classes_thresholds.append(thresholds[threshold_best_index])
f1 = f1_score(y_true[:, i], np.int32(y_pred[:, i] >= thresholds[threshold_best_index]))
classes_scores.append(f1)
return classes_thresholds, classes_scores
import pretrainedmodels
pretrainedmodels.__dict__['model_names']
import pretrainedmodels
import pretrainedmodels.utils as pqutils
_model_name = 'se_resnext50_32x4d'
model = pretrainedmodels.__dict__[_model_name](num_classes=1000, pretrained='imagenet')
tf_img = pqutils.TransformImage(model)
model_stats = (tf_img.__dict__['mean'], tf_img.__dict__['std'])
model_stats
data_dir = '../input/'
valid_df = pd.read_csv('../input/' + 'val_id.csv', header=None, names=['idx', 'Id'])
train_df = pd.read_csv(data_dir + 'train.csv')
len(train_df)
from PIL import Image as QImage
ids = []
labels = []
def file_jpg_to_png(path):
global ids
gclasses = set(list(range(28))) - set([0, 25])
f1 = '../input/new_data/' + path + '.jpg'
f2 = '../input/train_png/' + path + '.png'
xs = path.split('_')
q = xs.index('classes') + 1
xs = xs[q:]
if len(gclasses & set([int(x) for x in xs])) == 0:
return
xs = ' '.join(xs)
if not os.path.isfile(f2):
try:
im = QImage.open(f1)
im = im.resize((512, 512), QImage.NEAREST)
im.save(f2)
ids.append(path)
labels.append(xs)
except:
pass
else:
ids.append(path)
labels.append(xs)
need_to_prepare_extra = False
if need_to_prepare_extra:
for filename in tqdm_notebook(os.listdir('../input/new_data/'), total=64447):
if filename.endswith(".jpg"):
file_jpg_to_png(filename[:-4])
if need_to_prepare_extra:
xtra_data = pd.DataFrame()
xtra_data['Id'] = ids
xtra_data['Target'] = labels
xtra_data.to_csv(data_dir + 'xtra_train.csv', index=False)
xtra_data.head(n=3)
test_matches = pd.read_csv('../input/test_matches.csv')
test_matches.Extra = test_matches['Extra'].apply(lambda x: "_".join(x.split("_")[2:]))
test_matches.head()
xtra_data = pd.read_csv(data_dir + 'xtra_train.csv')
xtra_data['Extra'] = xtra_data.Id.apply(lambda x: x[:x.find("_classes")])
xtra_data.head()
xtra_matches_ids = test_matches.Extra.values.tolist()
xtra_data_train = xtra_data.loc[~xtra_data.Extra.isin(xtra_matches_ids), ['Id', 'Target']].reset_index(drop=True)
xtra_data_valid = xtra_data.loc[xtra_data.Extra.isin(xtra_matches_ids), ['Id', 'Target']].reset_index(drop=True)
data = xtra_data_train
labels = np.zeros((data.shape[0], 28), dtype=np.int32)
if "Target" in data:
for i, lbls in data['Target'].str.split().iteritems():
for j in map(int, lbls):
labels[i, j] = 1
for j in range(28):
print(j, '\t', name_label_dict[j], '\t', labels[:, j].sum(), '\t', labels[:, j].sum() / labels.shape[0])
xtra_matches_ids = ['1054_E4_1_classes_25_16_0', '1762_G4_5_classes_27', '1335_C6_2_classes_3',
'935_D5_2_classes_22_0', '27_H9_2_classes_10', '669_D8_1_classes_16_2',
'1178_D4_2_classes_19_16_14', '791_A9_1_classes_10_9', '759_F9_9_classes_25_21_19_16',
'1283_F10_2_classes_16_0', '688_E7_10_classes_23', '1772_F9_7_classes_25_17',
'454_E5_1_classes_14_0', '1020_C5_3_classes_23', '1386_G4_2_classes_8',
'681_G8_5_classes_13', '1609_C4_2_classes_16_0', '690_D3_5_classes_22_21_1_0',
'1245_B2_2_classes_21_0', '1335_C10_4_classes_16_0', '693_A11_3_classes_23',
'1139_A12_4_classes_23', '916_F8_1_classes_25_2_0', '694_C1_2_classes_18_1',
'929_B8_1_classes_25_19', '340_F5_3_classes_13', '138_B12_1_classes_8',
'932_G11_2_classes_25_16', '28_H9_1_classes_10', '924_F12_1_classes_27',
'682_F12_2_classes_25_4', '1147_D3_13_classes_16_0', '346_A5_1_classes_12',
'616_F1_4_classes_8', '73_A10_1_classes_27_25', '663_A9_2_classes_16_14',
'859_C8_4_classes_16_14', '933_C10_4_classes_22_21', '1207_B10_7_classes_12',
'694_F10_1_classes_25_21', '908_E3_1_classes_4', '1758_C9_4_classes_17_2',
'1335_D2_2_classes_2_0', '929_H2_2_classes_23', '1717_G8_34_classes_25_17',
'1150_H4_7_classes_13', '1054_E4_2_classes_25_16_0', '504_B1_3_classes_25_16_0',
'747_B5_4_classes_10_9', '1020_B1_7_classes_23_5', '918_H10_2_classes_25_15',
'532_H3_1_classes_25_16_0', '757_C6_3_classes_16_2', '1346_H6_3_classes_16_5_0',
'496_D1_1_classes_16_0', '1042_C3_3_classes_27', '929_B12_1_classes_3',
'684_C4_2_classes_23_0', '696_C9_5_classes_25_21_0', '1144_A10_4_classes_2',
'846_A8_2_classes_16_14', '903_F12_2_classes_23_5', '1264_G1_1_classes_27',
'925_H8_2_classes_1_0', '121_C6_2_classes_10_9', '1657_E10_3_classes_25_17',
'932_G11_1_classes_25_16', '704_G4_1_classes_25_12', '1039_C3_2_classes_19_16',
'906_H7_2_classes_25_6', '19_H7_2_classes_8', '725_G10_2_classes_16_14', '681_B2_4_classes_4',
'697_A6_4_classes_19_0', '1581_B12_2_classes_16_14', '926_F7_2_classes_5_0',
'1770_D2_4_classes_21_17_4', '1037_F4_3_classes_19', '1413_F11_6_classes_21_16',
'694_A2_1_classes_2', '1049_D11_2_classes_25_16_0', '1276_C3_2_classes_21_0',
'346_B12_3_classes_14_0', '1773_G12_3_classes_16_12', '1183_F4_2_classes_15',
'1158_H11_8_classes_16_5', '380_C6_1_classes_16_0', '792_B6_7_classes_13_0',
'682_C9_6_classes_25_12_2', '906_A9_4_classes_20_0', '400_D3_2_classes_25_7',
'1237_G1_4_classes_21_6', '793_B1_1_classes_25_22_0', '1308_A5_4_classes_5',
'800_E1_1_classes_16_14', '1421_G5_7_classes_17', '906_A9_6_classes_20_0',
'1245_B2_3_classes_21_0', '626_D7_6_classes_25_21_12', '344_G2_4_classes_11',
'901_E12_1_classes_25_6_2', '1050_F6_6_classes_16_0', '240_G8_1_classes_8',
'933_C2_1_classes_23_2_0', '556_B9_1_classes_25_18_0', '1335_C10_2_classes_16_0',
'1125_F6_3_classes_4', '1495_F7_3_classes_7_0', '694_C1_1_classes_18_1', '918_B3_4_classes_14',
'1762_E6_5_classes_7', '915_C6_5_classes_4', '820_G4_3_classes_10_9', '927_F12_12_classes_18_0',
'901_D10_2_classes_12_0', '1642_G7_34_classes_25_16', '928_G1_2_classes_14_7',
'682_G9_1_classes_7_0', '903_F2_1_classes_2_0', '1645_E1_32_classes_16_14',
'685_G10_5_classes_12_0', '927_A9_10_classes_25_5', '957_G6_4_classes_16',
'757_C6_2_classes_16_2', '1213_C4_2_classes_4', '909_A6_1_classes_2', '694_D6_2_classes_1_0',
'480_D6_3_classes_25_16', '1050_F1_3_classes_25_16_0', '692_A1_5_classes_25_14_0',
'1772_H1_5_classes_18_17_16_0', '991_G6_7_classes_10_9', '782_F8_2_classes_25_16',
'693_H4_1_classes_7', '1259_A11_4_classes_19_16', '1414_D12_2_classes_21_0',
'1139_D5_5_classes_5', '930_H3_2_classes_1', '901_G9_5_classes_25_19_0', '1754_G2_34_classes_5',
'353_A9_1_classes_21_13', '1179_H7_1_classes_25_16_0', '1423_A4_2_classes_16_14',
'686_F4_2_classes_22_21', '1693_E1_2_classes_23_16', '400_H8_2_classes_23',
'1680_G4_4_classes_16', '935_G3_1_classes_5', '838_E8_1_classes_3', '1030_D8_2_classes_7_0',
'684_D12_4_classes_18', '812_C10_2_classes_13_0', '1416_D10_6_classes_21_16_0',
'1293_E3_2_classes_1_0', '480_D6_2_classes_25_16', '700_H6_2_classes_25_2',
'1773_E10_4_classes_16_0', '611_E10_1_classes_25_13', '346_B12_4_classes_14_0',
'523_A9_4_classes_5', '1581_B12_3_classes_16_14', '684_D8_6_classes_25_12_0',
'927_F12_11_classes_18_0', '353_E4_2_classes_5', '556_C1_5_classes_25_22_16',
'1179_H7_2_classes_25_16_0', '1711_B12_3_classes_26_21_4', '449_G8_2_classes_4_2',
'544_A8_5_classes_22_21_7', '1772_H1_3_classes_18_17_16_0', '1772_G2_6_classes_25_19_16_0',
'909_C11_2_classes_2_0', '930_C12_1_classes_18_14_6', '690_C10_2_classes_13',
'1009_B6_2_classes_10_9', '757_E10_5_classes_12', '88_D7_2_classes_8', '383_E8_7_classes_25_17',
'1432_F2_2_classes_6', '505_C10_1_classes_25_15', '1104_E7_2_classes_16_14',
'699_E8_1_classes_1', '1213_C4_3_classes_4', '690_H5_1_classes_4', '1169_D3_6_classes_16_0',
'686_F4_1_classes_22_21', '532_D1_1_classes_16_0', '896_G8_3_classes_5_0',
'934_G4_3_classes_21', '344_G2_1_classes_11', '369_C9_1_classes_18_14_0',
'682_F12_1_classes_25_4', '683_E1_2_classes_25_1_0', '697_G3_6_classes_13_7',
'1772_A6_7_classes_5', '933_C4_6_classes_5', '1231_F9_5_classes_7', '802_D5_9_classes_16_0',
'682_G10_1_classes_7', '850_C1_9_classes_21_0', '929_B12_2_classes_3', '1339_D3_3_classes_2_1',
'858_D4_2_classes_4', '334_B12_2_classes_4', '622_F1_7_classes_8', '908_G5_2_classes_2_0',
'778_G6_2_classes_25_16_14', '1027_C4_1_classes_7', '886_C10_5_classes_23_0',
'807_C2_3_classes_4', '1314_D2_2_classes_25_16_0', '1770_B5_1_classes_21_16_11',
'1105_F10_2_classes_16_0', '1283_B2_10_classes_16_0', '583_E11_1_classes_25_16',
'820_G4_7_classes_10_9', '928_H3_2_classes_14_0', '970_H1_4_classes_25_18',
'1751_A7_32_classes_27', '701_H10_2_classes_25_14', '1773_B6_11_classes_23_17_16',
'1736_G7_31_classes_25_16', '928_H3_1_classes_14_0', '1645_E5_34_classes_17',
'539_B3_1_classes_25_21_0', '683_E1_1_classes_25_1_0', '484_G6_3_classes_22',
'928_A1_1_classes_4', '1773_B6_7_classes_23_17_16', '1255_A3_4_classes_16_0',
'698_C6_2_classes_25_21_4', '1773_D5_6_classes_17', '681_G8_4_classes_13',
'935_H11_2_classes_22_0', '1125_B9_4_classes_25_7', '698_F11_1_classes_13_0',
'344_F7_1_classes_25_21', '906_C11_1_classes_4', '1656_F5_2_classes_19_17',
'1761_A10_3_classes_23_17_14', '1772_H5_7_classes_17_7', '910_B8_1_classes_12_0',
'1283_F10_4_classes_16_0', '508_C10_1_classes_25_15', '681_B2_3_classes_4',
'868_E8_2_classes_17_16_0', '1339_B9_2_classes_16_0', '856_A2_4_classes_2_0',
'700_C3_6_classes_21', '869_B3_1_classes_16_0', '701_B9_2_classes_21_13_0',
'1178_F9_6_classes_16_0', '542_G1_1_classes_11_2_0']
xtra_data_train = xtra_data.loc[~xtra_data.Id.isin(xtra_matches_ids), ['Id', 'Target']].reset_index(drop=True)
xtra_data_valid = xtra_data.loc[xtra_data.Id.isin(xtra_matches_ids), ['Id', 'Target']].reset_index(drop=True)
exclude_valid = \
['5ae3db3a-bbc4-11e8-b2bc-ac1f6b6435d0',
'e6d0b648-bbbc-11e8-b2ba-ac1f6b6435d0',
'3202385a-bbca-11e8-b2bc-ac1f6b6435d0',
'0cf36c82-bbca-11e8-b2bc-ac1f6b6435d0',
'7cb0006e-bbaf-11e8-b2ba-ac1f6b6435d0',
'87b77dd2-bba2-11e8-b2b9-ac1f6b6435d0',
'62c88efa-bbc8-11e8-b2bc-ac1f6b6435d0',
'44d819c2-bbbb-11e8-b2ba-ac1f6b6435d0',
'b1ca2b40-bbbd-11e8-b2ba-ac1f6b6435d0',
'8cd67266-bbbe-11e8-b2ba-ac1f6b6435d0',
'cead83ec-bb9a-11e8-b2b9-ac1f6b6435d0',
'a166d11a-bbca-11e8-b2bc-ac1f6b6435d0',
'91a0a67e-bb9e-11e8-b2b9-ac1f6b6435d0',
'2be24582-bbb1-11e8-b2ba-ac1f6b6435d0']
exclude_train = \
['7138c4aa-bb9b-11e8-b2b9-ac1f6b6435d0',
'8a10533e-bba6-11e8-b2ba-ac1f6b6435d0',
'be92e108-bbb5-11e8-b2ba-ac1f6b6435d0',
'abfa727e-bba4-11e8-b2ba-ac1f6b6435d0',
'2384acac-bbae-11e8-b2ba-ac1f6b6435d0',
'c7a7a462-bbb1-11e8-b2ba-ac1f6b6435d0',
'559f7ce0-bbb2-11e8-b2ba-ac1f6b6435d0']
valid_df = pd.read_csv('../input/' + 'val_id.csv', header=None, names=['idx', 'Id'])
valid_df = valid_df.loc[~valid_df.Id.isin(exclude_valid), :]
train_df = pd.read_csv(data_dir + 'train.csv')
train_df = train_df.loc[~train_df.Id.isin(exclude_train), :]
test_df = pd.read_csv('../input/' + "sample_submission.csv")
train = train_df.loc[~train_df.Id.isin(valid_df.Id.values.tolist()), :].reset_index(drop=True)
train = pd.concat([train, xtra_data_train], axis=0, sort=False)
valid = train_df.loc[train_df.Id.isin(valid_df.Id.values.tolist()), :].reset_index(drop=True)
valid = pd.concat([valid, xtra_data_valid], axis=0, sort=False)
test = test_df
del train_df, valid_df, test_df, xtra_data_valid, xtra_data_train
gc.collect()
train_files = train.Id.apply(lambda s: '../input/' + 'train_png/' + s + '.png')
train_labels = train.Target.astype(str).apply(lambda s: [name_label_dict[int(q)] for q in s.split(' ')])
train_ds = ImageMultiDataset(fns=train_files, labels=train_labels, classes=list(name_label_dict.values()))
del train_files, train_labels
valid_files = valid.Id.apply(lambda s: '../input/' + 'train_png/' + s + '.png')
valid_labels = valid.Target.astype(str).apply(lambda s: [name_label_dict[int(q)] for q in s.split(' ')])
valid_ds = ImageMultiDataset(fns=valid_files, labels=valid_labels, classes=list(name_label_dict.values()))
del valid_files, valid_labels
test_files = test.Id.apply(lambda s: '../input/' + 'test_png/' + s + '.png')
test_labels = test.Predicted.astype(str).apply(lambda s: [name_label_dict[int(q)] for q in s.split(' ')])
test_ds = ImageMultiDataset(fns=test_files, labels=test_labels, classes=list(name_label_dict.values()))
del test_files, test_labels
xtra = [RandTransform(squish, {})]
tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=180.0,
max_zoom=1.2, max_lighting=0.2, max_warp=None,
p_affine=0.9, p_lighting=0.65, xtra_tfms=xtra)
data = ImageDataBunch.create(train_ds, valid_ds, test_ds, path=data_dir, device=device,
size=512, bs=12, ds_tfms=tfms, padding_mode='zeros')
data.normalize(model_stats)
valid_df = pd.read_csv('../input/' + 'val_id.csv', header=None, names=['idx', 'Id'])
valid_df = valid_df.loc[~valid_df.Id.isin(exclude_valid), :]
train_df = pd.read_csv(data_dir + 'train.csv')
train_df = train_df.loc[~train_df.Id.isin(exclude_train), :]
test_df = | pd.read_csv('../input/' + "sample_submission.csv") | pandas.read_csv |
ver = "#version 1.5.1 (by zilad)"
print(f"get_multi_profit_graph.py Version: {ver}")
import pymysql
from sqlalchemy import create_engine
from library import cf
import pandas as pd
from matplotlib import font_manager, rc, ticker, pyplot as plt
from library.logging_pack import *
import os
import random
import time
def get_factors(s_date, s_name):
# MySQL의 'daily_craw' DB에 연결하기
pymysql.install_as_MySQLdb()
engine = create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port
+ "/" + 'daily_craw', encoding='utf-8')
# MySQL 테이블에서 필요한 데이터(rows)를 가져와서 가공하기
if type(s_name) == str:
if s_name in ('KOSPI', 'KOSDAQ'):
######################################## KOSPI, KOSDAQ ##########################################
sql = f'select date, close from t_sector where code_name = "종합({s_name})" and date >= {s_date} order by date'
rows = engine.execute(sql).fetchall()
else:
sql = f'select date, close from `{s_name}` where date >= {s_date} order by date'
rows = engine.execute(sql).fetchall()
sec = | pd.DataFrame(rows, columns=['date', 'close']) | pandas.DataFrame |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_isolated_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M))
assert_frame_equal(actual_iter, expected)
# Many 2D random walks
np.random.seed(0)
initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M))
assert_frame_equal(actual_iter, expected)
def test_start_at_frame_other_than_zero(self):
# One 1D stepper
N = 5
FIRST_FRAME = 3
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': FIRST_FRAME + np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(6, 2))
assert_frame_equal(actual, expected)
def test_blank_frame_no_memory(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': [0, 1, 2, 4, 5]})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(10, 10))
assert_frame_equal(actual, expected)
# This doesn't error, but we might wish it would
# give the particle a new ID after the gap. It just
# ignores the missing frame.
def test_real_data_that_causes_duplicate_bug(self):
filename = 'reproduce_duplicate_track_assignment.df'
f = pd.read_pickle(os.path.join(path, filename))
# Not all parameters reproduce it, but these do
self.link_df(f, 8, 2, verify_integrity=True)
def test_search_range(self):
t = self.link(unit_steps(), 1.1, hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(unit_steps(), 0.9, hash_generator((10, 10), 1))
assert len(t_short) == len(unit_steps()) # Each step is a separate track.
t = self.link(random_walk_legacy(), max_disp + 0.1,
hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(random_walk_legacy(), max_disp - 0.1,
hash_generator((10, 10), 1))
assert len(t_short) > 1 # Multiple tracks
def test_box_size(self):
"""No matter what the box size, there should be one track, and it should
contain all the points."""
for box_size in [0.1, 1, 10]:
t1 = self.link(unit_steps(), 1.1, hash_generator((10, 10), box_size))
t2 = self.link(random_walk_legacy(), max_disp + 1,
hash_generator((10, 10), box_size))
assert len(t1) == 1
assert len(t2) == 1
assert len(t1[0].points) == len(unit_steps())
assert len(t2[0].points) == len(random_walk_legacy())
def test_easy_tracking(self):
level_count = 5
p_count = 16
levels = []
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(j, (j, k)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count * 2 + 1), .5)
tracks = self.link(levels, 1.5, hash_generator)
assert len(tracks) == p_count
for t in tracks:
x, y = zip(*[p.pos for p in t])
dx = np.diff(x)
dy = np.diff(y)
assert np.sum(dx) == level_count - 1
assert np.sum(dy) == 0
def test_copy(self):
"""Check inplace/copy behavior of link_df, link_df_iter"""
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f_inplace = f.copy()
expected = f.copy()
expected['particle'] = np.zeros(N)
# Should add particle column in-place
# UNLESS diagnostics are enabled
actual = self.link_df(f_inplace, 5)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'particle' not in f_inplace.columns
else:
assert_frame_equal(actual, f_inplace)
# Should copy
actual = self.link_df(f, 5, copy_features=True)
assert_frame_equal(actual, expected)
assert 'particle' not in f.columns
# Should copy
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
assert 'particle' not in f.columns
@nose.tools.raises(tp.SubnetOversizeException)
def test_oversize_fail(self):
self.link_df(contracting_grid(), 1)
@nose.tools.raises(tp.SubnetOversizeException)
def test_adaptive_fail(self):
"""Check recursion limit"""
self.link_df(contracting_grid(), 1, adaptive_stop=0.92)
def link(self, *args, **kwargs):
kwargs.update(self.linker_opts)
return tp.link(*args, **kwargs)
def link_df(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
return tp.link_df(*args, **kwargs)
def link_df_iter(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
args = list(args)
features = args.pop(0)
res = pd.concat(tp.link_df_iter(
(df for fr, df in features.groupby('frame')), *args, **kwargs))
return res.sort(['particle', 'frame']).reset_index(drop=True)
class TestOnce(unittest.TestCase):
# simple API tests that need only run on one engine
def setUp(self):
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
self.features = f
def test_t_column(self):
f = self.features.copy()
cols = list(f.columns)
name = 'arbitrary name'
cols[cols.index('frame')] = name
f.columns = cols
# smoke tests
tp.link_df(f, 5, t_column=name, verify_integrity=True)
f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
@nose.tools.raises(ValueError)
def test_check_iter(self):
"""Check that link_df_iter() makes a useful error message if we
try to pass a single DataFrame."""
list(tp.link_df_iter(self.features.copy(), 5))
class SubnetNeededTests(CommonTrackingTests):
"""Tests that assume a best-effort subnet linker (i.e. not "drop")."""
def test_two_nearby_steppers(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_subnet' in self.diag.columns
assert 'diag_subnet_size' in self.diag.columns
# Except for frame in which they appear, all particles should have
# been labeled with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 1].isnull())
# The number of loop iterations is reported by the numba linker only
if self.linker_opts['link_strategy'] == 'numba':
assert 'diag_subnet_iterations' in self.diag.columns
def test_two_nearby_steppers_one_gapped(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
a = a.drop(3).reset_index(drop=True)
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_nearby_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N),
'y': M + random_walk(N),
'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1),
'y': M + Y + random_walk(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
| assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
import re
import os
import pandas as pd
import numpy as np
from .extract_tools import default_tokenizer as _default_tokenizer
def _getDictionnaryKeys(dictionnary):
"""
Function that get keys from a dict object and flatten sub dict.
"""
keys_array = []
for key in dictionnary.keys():
keys_array.append(key)
if (type(dictionnary[key]) == type({})):
keys_array = keys_array+_getDictionnaryKeys(dictionnary[key])
return(keys_array)
class pandasToBrat:
"""
Class for Pandas brat folder management.
For each brat folder, there is an instance of pandasToBrat.
It supports importation and exportation of configurations for relations and entities.
Documents importation and exportation.
Annotations and entities importation and exportation.
Inputs :
folder, str : path of brat folder
"""
def __init__(self, folder):
self.folder = folder
self.conf_file = 'annotation.conf'
self.emptyDFCols = {
"annotations":["id","type_id", "word", "label", "start", "end"],
"relations":["id","type_id","relation","Arg1","Arg2"]
}
# Adding '/' to folder path if missing
if(self.folder[-1] != '/'):
self.folder += '/'
# Creating folder if do not exist
if (os.path.isdir(self.folder)) == False:
os.mkdir(self.folder)
# Loading conf file if exists | creating empty conf file if not
self.read_conf()
def _emptyData(self):
fileList = self._getFileList()
nb_files = fileList.shape[0]
confirmation = input("Deleting all data ({} files), press y to confirm :".format(nb_files))
if confirmation == 'y':
fileList["filename"].apply(lambda x: os.remove(self.folder+x))
print("{} files deleted.".format(nb_files))
def _generateEntitiesStr (self, conf, data = '', level = 0):
if (type(conf) != type({})):
return data
# Parsing keys
for key in conf.keys():
value = conf[key]
if value == True:
data += '\n'+level*'\t'+key
elif value == False:
data += '\n'+level*'\t'+'!'+key
elif type(value) == type({}):
data += '\n'+level*'\t'+key
data = self._generateEntitiesStr(value, data, level+1)
return data
def _writeEntitiesLevel (self, conf, data, last_n = -1):
for n in range(last_n,len(conf)):
# If empty : pass, if not the last line : pass
if (conf[n] != '' and n > last_n):
level = len(conf[n].split("\t"))-1
if (n+1 <= len(conf)): # Level of next item
next_level = len(conf[n+1].split("\t"))-1
else:
next_level = level
splitted_str = conf[n].split("\t")
str_clean = splitted_str[len(splitted_str)-1]
if (level >= next_level): # On écrit les lignes de même niveau
if (str_clean[0] == '!'):
data[str_clean[1:]] = False
else:
data[str_clean] = True
if (level > next_level):
# On casse la boucle
break
elif (level < next_level): # On écrit les lignes inférieurs par récurence
splitted_str = conf[n].split("\t")
last_n, data[str_clean] = self._writeEntitiesLevel(conf, {}, n)
return(n, data)
def _readRelations(self, relations, entities = []):
data = {}
for relation in relations.split("\n"):
if relation != '':
relation_data = relation.split("\t")[0]
args = list(map(lambda x: x.split(":")[1], relation.split("\t")[1].split(", ")))
args_valid = list(filter(lambda x: x in entities, args))
if (len(args_valid) > 0):
data[relation_data] = {"args":args_valid}
return data
def _writeRelations(self, relations, entities = []):
data = ''
for relation in relations:
args_array = list(filter(lambda x: x in entities, relations[relation]["args"]))
if (len(args_array) > 0):
data += '\n'+relation+'\t'
for n in range(0, len(args_array)):
data += int(bool(n))*', '+'Arg'+str(n+1)+':'+args_array[n]
return data
def read_conf (self):
"""
Get the current Brat configuration.
Output :
Dict containing "entities" and "relations" configurations.
"""
if (os.path.isfile(self.folder+self.conf_file)):
# Reading file
file = open(self.folder+self.conf_file)
conf_str = file.read()
file.close()
# Splitting conf_str
conf_data = re.split(re.compile(r"\[[a-zA-Z]+\]", re.DOTALL), conf_str)[1:]
data = {}
# Reading enteties
data["entities"] = self._writeEntitiesLevel(conf_data[0].split("\n"), {})[1]
# Reading relations
entitiesKeys = _getDictionnaryKeys(data["entities"])
data["relations"] = self._readRelations(conf_data[1], entitiesKeys)
return(data)
else:
self.write_conf()
self.read_conf()
def write_conf(self, entities = {}, relations = {}, events = {}, attributes = {}):
"""
Write or overwrite configuration file.
It actually doesn't suppport events and attributes configuration data.
inputs :
entities, dict : dict containing the entities. If an entities do have children, his value is an other dict, otherwise, it is set as True.
relations, dict : dict containing the relations between entities, each key is a relation name, the value is a dict with a "args" key containing the list of related entities.
"""
# TODO : Add events and attributes support.
conf_str = ''
# Entities
conf_str += '\n\n[entities]'
conf_str += self._generateEntitiesStr(entities)
# relations
conf_str += '\n\n[relations]'
entitiesKeys = _getDictionnaryKeys(entities)
conf_str += self._writeRelations(relations, entitiesKeys)
# attributes
conf_str += '\n\n[attributes]'
# events
conf_str += '\n\n[events]'
# Write conf file
file = open(self.folder+self.conf_file,'w')
file.write(conf_str)
file.close()
def _getFileList(self):
# Listing files
filesDF = pd.DataFrame({'filename':pd.Series(os.listdir(self.folder))})
filesDFSplitted = filesDF["filename"].str.split(".", expand = True)
filesDF["id"] = filesDFSplitted[0]
filesDF["filetype"] = filesDFSplitted[1]
filesDF = filesDF[filesDF["filetype"].isin(["txt","ann"])]
return(filesDF)
def _parseData(self):
# Listing files
filesDF = self._getFileList()
# Getting data from txt and ann
filesDF_txt = filesDF.rename(columns = {"filename":"text_data"}).loc[filesDF["filetype"] == "txt", ["id","text_data"]]
filesDF_ann = filesDF.rename(columns = {"filename":"annotation"}).loc[filesDF["filetype"] == "ann", ["id","annotation"]]
dataDF = filesDF_txt.join(filesDF_ann.set_index("id"), on = "id")
dataDF["text_data"] = dataDF["text_data"].apply(lambda x: open(self.folder+x).read())
dataDF["annotation"] = dataDF["annotation"].apply(lambda x: open(self.folder+x).read())
return(dataDF)
def read_text(self):
"""
read_text
Get a pandas DataFrame containing the brat documents.
Input : None
Output : Pandas dataframe
"""
dataDF = self._parseData()
return(dataDF[["id","text_data"]])
def read_annotation(self, ids = []):
"""
read_annotation
Get annotations from the brat folder.
You can get specific annotation by filtering by id.
input :
ids, list (optionnal) : list of id for which you want the annotation data, if empty all annotations are returned.
output :
dict containing an annotations and relations data.
"""
data = {}
data["annotations"] = pd.DataFrame(columns=self.emptyDFCols["annotations"])
data["relations"] = | pd.DataFrame(columns=self.emptyDFCols["relations"]) | pandas.DataFrame |
try:
import win32com.client
except:
print("not on windows os: failed to import win32com.client.")
import pandas as pd
import datetime as dt
import pprint
import json
import os
import pprint
pp = pprint.PrettyPrinter(1)
# basepath = "F:\\la-tools-test\\IDR_Drop"
basepath = os.getcwd()
def flatten(l):
out = []
for item in l:
if isinstance(item, (list, tuple)):
out.extend(flatten(item))
else:
if item != '' and item != ' ':
out.append(item)
return out
def clean_acct(acct):
return ''.join(acct.split(' - '))
def str_to_date(datestring):
return (dt.datetime.strptime(str(datestring).split('+')[0],"%Y-%m-%d %H:%M:%S"))
def date_to_str(datetime_obj):
return (dt.datetime.strftime(datetime_obj, format = '%m/%d/%Y %H:%M:%S'))
def Merge(dict1, dict2):
res = {**dict1, **dict2}
return res
def ba_sa_merge(a, b):
merged = []
for k in range(0, len(a)):
m_acct = a[k] + '_' + b[k]
merged.append(m_acct)
return merged
def aaron_to_dict(body):
parse = body.split('\n')
parse = [thing.splitlines() for thing in parse]
parse2 = flatten(parse)
strp_len = [len(item.strip(' ')) for item in parse2]
acct_start = parse2.index('Account Number(s): ') + 1
acct_end = strp_len.index(0)
for item in parse2:
split_item = item.split(': ')
if split_item[0] == 'User Id':
user = ('user', split_item[1])
elif split_item[0] == 'Password':
pw = ('pw', split_item[1])
elif split_item[0] == 'Customer':
name = ('name', split_item[1])
accts = parse2[acct_start:acct_end]
clean_accts = [clean_acct(acct) for acct in accts]
accts_tup = ('accts', clean_accts)
row = [user, pw, name, accts_tup]
return row
def admin_to_dict(body):
parse = body.split('\n')
parse = [thing.splitlines() for thing in parse]
parse2 = flatten(parse)
if (len(body) > 2500) or ("SA Number" in body):
try:
ba_list = [''.join(elem.split('-')) for elem in parse2[9].split(' ')]
sa_list = [elem for elem in parse2[10].split(' ')]
accts = ('accts', ba_sa_merge(ba_list, sa_list))
for item in parse2:
split_item = item.split(': ')
if split_item[0] == 'User Id':
user = ('user', split_item[1])
elif split_item[0] == 'Password':
pw = ('pw', split_item[1])
elif split_item[0] == 'Customer':
name = ('name', split_item[1])
row = [user, pw, name, accts]
except:
for index, item in enumerate(parse2):
if item.lower() == 'user id':
special_index = index
name = ('name', parse2[special_index + 1])
ba_list = parse2[special_index + 2].split(' ')
sa_list = parse2[special_index + 3].split(' ')
user = ('user', parse2[special_index + 4])
pw = ('pw', parse2[special_index + 6])
ba_list = [ba.replace('-', '') for ba in ba_list]
accts = ('accts', ba_sa_merge(ba_list, sa_list))
row = [user, pw, name, accts]
else:
for item in parse2:
split_item = item.split(': ')
if split_item[0] == 'User ID':
user = ('user', split_item[1])
elif split_item[0] == 'Password':
pw = ('pw', split_item[1])
elif split_item[0] == 'Customer':
name = ('name', split_item[1])
elif split_item[0] == 'Account Number':
acct_list = [str(acct) for acct in split_item[1].split(' ')]
accts = ('accts', acct_list)
row = [user, pw, name, accts]
return(row)
def ngrid_to_dict(body):
parse = body.split('\n')
parse = [thing.splitlines() for thing in parse]
parse2 = flatten(parse)
accts_list = []
names_list = []
for item in parse2:
split_item = item.split(': ')
if split_item[0] == 'Userid':
username = split_item[1].replace(' ', '')
user = ('user', username)
if split_item[0] == 'Password':
password = split_item[1].replace(' ', '')
pw = ('pw', password)
try:
split_item2 = [thing.split(' ') for thing in split_item][0]
split_item3 = list(filter(None, split_item2))
int(split_item3[0])
accts_list.append(split_item3[0])
full_name = ' '.join(split_item3[1:])
names_list.append(full_name)
except:
pass
accts = ('accts', accts_list)
name = ('name', names_list)
row = [user, pw, name, accts]
return(row)
def iter_mail(sender_func, mailbox, index):
print('scraping emails...')
#start iterating through emails
mail = mailbox.GetLast()
i = index
try:
msg_row = sender_func(mail.Body)
msg_row.append(('date', str_to_date(mail.ReceivedTime)))
pp.pprint(dict(msg_row))
master = [(i, dict(msg_row))]
i += 1
except:
master = []
while mail:
mail = mailbox.GetPrevious()
try:
msg_row = sender_func(mail.Body)
msg_row.append(('date', str_to_date(mail.ReceivedTime)))
msg_row = dict(msg_row)
pp.pprint(msg_row)
new = (i, msg_row)
master.append(new)
i += 1
except:
pass
master = dict(master)
return(master, i)
# ### Get Filtered Messages (Sender, Date, Body)
# aaron_to_dict will gives a dictionary for each account of the structure
#
# {'user': 'nhengi-ston3n',
# 'pw': 'fm54f7',
# 'name': 'STONEWALL KITCHEN LLC',
# 'accts': ['800531501']}
#
# The following code will add the date email is received (if within last week) w/ structure:
#
# {'user': 'nhengi-ston3n',
# 'pw': 'fm54f7',
# 'name': 'STONEWALL KITCHEN LLC',
# 'accts': ['800531501'],
# 'date': datetime.datetime(2019, 5, 16, 10, 17, 21)}
#
# nest these dictionaries into a scary looking dictionary:
#
# {0: {'user': 'nhengi-ston3n',
# 'pw': 'fm54f7',
# 'name': 'STONEWALL KITCHEN LLC',
# 'accts': ['800531501'],
# 'date': datetime.datetime(2019, 5, 16, 10, 17, 21)},
# 1: {'user': 'nhengi-morg1n',
# 'pw': '6z2s5e',
# 'name': '<NAME>', \
# 'accts': ['800514701'],
# 'date': datetime.datetime(2019, 4, 17, 12, 34, 30)},
# 2: {'user': 'nhengi-ferr1n',
# 'pw': 'xx27b4',
# 'name': '<NAME>',
# 'accts': ['800511301'],
# 'date': datetime.datetime(2019, 4, 17, 12, 7, 55)},
#
# #.....
#https://docs.microsoft.com/en-us/office/vba/api/outlook.mailitem
def get_emails():
try:
global inbox
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
inbox = outlook.Folders('ENGIENA-GSERNASERVICES (ENGIE North America)')
folder = inbox.Folders("Inbox")
messages = folder.Items
aaron_filter = "[SenderEmailAddress] = '<EMAIL>'"
admin_filter = "[SenderEmailAddress] = '<EMAIL>'"
ngrid_filter = "[SenderEmailAddress] = '<EMAIL>'"
today = dt.datetime.now()
aaron = messages.Restrict(aaron_filter)
admin = messages.Restrict(admin_filter)
ngrid = messages.Restrict(ngrid_filter)
print(len(aaron), 'aaron emails,', len(admin), 'epo emails', len(ngrid), 'ngrid emails')
j = 0
error = 0
try:
print('parsing <EMAIL> inbox')
print('')
scrape, j2 = iter_mail(aaron_to_dict, aaron, j)
except:
print('error parsing aaron')
error += 1
try:
print('parsing <EMAIL>')
print('')
admin_scrape, j3 = iter_mail(admin_to_dict, admin, j2)
master = Merge(scrape, admin_scrape)
except:
print('error parsing EPO admin')
error += 1
master = {}
try:
print('parsing <EMAIL>')
print('')
ngrid_scrape, j4 = iter_mail(ngrid_to_dict, ngrid, j3)
master = Merge(master, ngrid_scrape)
except:
print('')
print('error parsing ngrid')
error += 1
if error == 3:
pass
print('error with', error, 'of 3 inboxes')
print('found ', len(master.keys()), ' new emails: ')
pretty_json = json.dumps(master, default = lambda date: date_to_str(date), sort_keys = True, indent = 4)
lame_json = json.dumps(master, default = lambda date: date_to_str(date), sort_keys = True)
json_name = 'email_bodies_' + date_to_str(today).split(' ')[0].replace('/', '_') + '.json'
print('writing .json object')
os.chdir(os.path.join(basepath, 'Logins'))
with open(json_name, 'w') as f:
json.dump(lame_json, f)
#print(pretty_json)
return master, json_name
except:
print("failed.")
def bodies_json(bodies):
test = pd.DataFrame.from_dict(bodies, orient = 'index')
if type(test.date[0]) == str:
test.date = pd.to_datetime(test.date)
accts_success = [len(accts) > 0 for accts in test.accts]
accts_fail = [not val for val in accts_success]
good = test[accts_success].reset_index(drop = True)
util = []
for i, a in enumerate(good.accts):
first_acct = a[0]
leading = a[0][:2]
if leading == '80':
util.append('PSNH')
elif leading == '51' and (len(first_acct.split('_')) > 1):
util.append('CLP')
elif leading == '54' and (len(first_acct.split('_')) > 1):
util.append('WMECO')
else:
if 'SUEZ' in good.user[i]:
util.append('NGRID')
else:
util.append('NSTAR')
good['util'] = util
email_error = []
if len(accts_fail) > 0:
bad = test[accts_fail].reset_index()
mail_error = 'EMAIL_SCRAPE_ERROR.csv'
bad.to_csv(mail_error, header = True, index = False)
return(good, bodies)
def main():
#scrape emails, save json of records
#os.chdir(os.path.join(basepath, 'Logins'))
output_dict, filename = get_emails()
print('file saved as', filename)
#json to df
email_df = | pd.DataFrame.from_dict(output_dict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[26]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
df = pd.read_csv("data4",sep="\t")
df.head()
print(df.describe())
print(df[df['target']=='unstable'].describe())
print(df[df['target']=='stable'].describe())
print(df[df['target']=='unchanged'].describe())
# In[27]:
df.plot(kind = 'box', subplots = True, layout = (4, 4), sharex = False, sharey = False)
plt.show()
# In[28]:
df.hist()
his = plt.gcf()
his.set_size_inches(12, 6)
plt.show()
# In[29]:
sns.set_style('whitegrid')
sns.FacetGrid(df, hue = 'target', size = 6).map(plt.scatter, ':', '*').add_legend()
plt.show()
# In[30]:
plt.close()
sns.pairplot(df, hue = 'target', height = 2, diag_kind = 'kde')
plt.show()
# In[31]:
plt.figure(figsize=(15,10))
plt.subplot(2,2,1)
sns.violinplot(x='target',y='+',data=df)
plt.subplot(2,2,2)
sns.violinplot(x='target',y='.',data=df)
plt.subplot(2,2,3)
sns.violinplot(x='target',y=':',data=df)
plt.subplot(2,2,4)
sns.violinplot(x='target',y='*',data=df)
plt.show()
# In[32]:
# Import modules
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
# In[33]:
df.head()
# In[34]:
plt.figure(figsize=(7,5))
sns.heatmap(df.corr(),annot=True,cmap='RdYlGn_r')
plt.show()
# In[35]:
#spliting the data
test_size = 0.30
seed = 7
score = 'accuracy'
# Implementation of different ML Algorithms
def models(X_train, Y_train,score):
clfs = []
result = []
names = []
clfs.append(('LR', LogisticRegression()))
clfs.append(('LDA', LinearDiscriminantAnalysis()))
clfs.append(('KNN', KNeighborsClassifier()))
clfs.append(('CART', DecisionTreeClassifier()))
clfs.append(('NB', GaussianNB()))
clfs.append(('SVM', SVC()))
for algo_name, clf in clfs:
k_fold = model_selection.KFold(n_splits=10, random_state=seed)
cv_score = model_selection.cross_val_score(clf, X_train, Y_train, cv=k_fold, scoring=score)
#result = "%s: %f (%f)" % (algo_name, cv_score.mean(), cv_score.std())
result.append((algo_name,cv_score.mean(), cv_score.std()))
names.append(algo_name)
return (result)
# In[36]:
X_all = df.iloc[:,:4]
Y_all = df.iloc[:,4]
# In[37]:
X_train_all, X_test_all, Y_train_all, Y_test_all = model_selection.train_test_split(X_all, Y_all, test_size=test_size, random_state=14)
# In[38]:
models(X_train_all, Y_train_all, score)
# In[39]:
# Evaluation of the Classifier
# Predictions on test dataset
svm = SVC()
svm.fit(X_train_all, Y_train_all)
pred = svm.predict(X_test_all)
print(accuracy_score(Y_test_all, pred))
print(confusion_matrix(Y_test_all, pred))
print(classification_report(Y_test_all, pred))
# In[19]:
X_sep = df[['*','.']]
Y_sep = df.target
# In[20]:
X_train_sep, X_test_sep, Y_train_sep, Y_test_sep = model_selection.train_test_split(X_sep, Y_sep, test_size=test_size, random_state=seed)
models(X_train_sep, Y_train_sep, score)
# In[22]:
svm = SVC()
svm.fit(X_train_sep, Y_train_sep)
pred = svm.predict(X_test_sep)
print(accuracy_score(Y_test_sep, pred))
print(confusion_matrix(Y_test_sep, pred))
print(classification_report(Y_test_sep, pred))
# In[23]:
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# In[24]:
a = pd.read_csv('data4',sep="\t", header = None)
i = | pd.DataFrame(a) | pandas.DataFrame |
'''
This method uses these features
['dow', 'year', 'month', 'day_of_week', 'holiday_flg', 'min_visitors', 'mean_visitors', 'median_visitors', 'max_visitors', 'count_observations', 'air_genre_name', 'air_area_name', 'latitude', 'longitude', 'rs1_x', 'rv1_x', 'rs2_x', 'rv2_x', 'rs1_y', 'rv1_y', 'rs2_y', 'rv2_y', 'total_reserv_sum', 'total_reserv_mean', 'total_reserv_dt_diff_mean']
RMSE GradientBoostingRegressor: 0.501477019571
RMSE KNeighborsRegressor: 0.421517079307
'''
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
data = {
'tra': pd.read_csv('./data/air_visit_data.csv'),
'as': pd.read_csv('./data/air_store_info.csv'),
'hs': pd.read_csv('./data/hpg_store_info.csv'),
'ar': pd.read_csv('./data/air_reserve.csv'),
'hr': pd.read_csv('./data/hpg_reserve.csv'),
'id': pd.read_csv('./data/store_id_relation.csv'),
'tes': pd.read_csv('./data/sample_submission.csv'),
'hol': pd.read_csv('./data/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
# add 'air_store_id' to the last of data['hr']
data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id'])
for df in ['ar', 'hr']:
# get year, month, day, get rid of time
data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime'])
data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date
data[df]['reserve_datetime'] = pd.to_datetime(data[df]['reserve_datetime'])
data[df]['reserve_datetime'] = data[df]['reserve_datetime'].dt.date
data[df]['reserve_datetime_diff'] = data[df].apply(lambda r: (r['visit_datetime'] - r['reserve_datetime']).days,
axis=1)
tmp1 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].sum().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs1', 'reserve_visitors': 'rv1'})
tmp2 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].mean().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs2', 'reserve_visitors': 'rv2'})
data[df] = pd.merge(tmp1, tmp2, how='inner', on=['air_store_id', 'visit_date'])
data['tra']['visit_date'] = pd.to_datetime(data['tra']['visit_date'])
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date
data['tes']['visit_date'] = data['tes']['id'].map(lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date
unique_stores = data['tes']['air_store_id'].unique()
# count week
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'week_min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'week_mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'week_median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'week_max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].count().rename(columns={'visitors':'week_count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
# count all
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].min().rename(columns={'visitors':'all_min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].mean().rename(columns={'visitors':'all_mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].median().rename(columns={'visitors':'all_median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].max().rename(columns={'visitors':'all_max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
tmp = data['tra'].groupby(['air_store_id'], as_index=False)['visitors'].count().rename(columns={'visitors':'all_count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id'])
# count year
stores1 = pd.concat([pd.DataFrame({'air_store_id': unique_stores})], axis=0, ignore_index=True).reset_index(drop=True)
data2016 = data['tra'][data['tra']['year'].isin([2016])]
data2017 = data['tra'][data['tra']['year'].isin([2017])]
# count 2016
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].min().rename(columns={'visitors':'2016_min_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].mean().rename(columns={'visitors':'2016_mean_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].median().rename(columns={'visitors':'2016_median_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].max().rename(columns={'visitors':'2016_max_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2016.groupby(['air_store_id','year'], as_index=False)['visitors'].count().rename(columns={'visitors':'2016_count_observations'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
# count 2017
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].min().rename(columns={'visitors':'2017_min_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].mean().rename(columns={'visitors':'2017_mean_visitors'})
stores1 = pd.merge(stores1, tmp, how='left', on=['air_store_id'])
tmp = data2017.groupby(['air_store_id','year'], as_index=False)['visitors'].median().rename(columns={'visitors':'2017_median_visitors'})
stores1 = | pd.merge(stores1, tmp, how='left', on=['air_store_id']) | pandas.merge |
import pandas as pd
import os
def get_sample(direc='data_raw/', sample_rate=0.2, drop_rt=True, shuffle=True):
"""
Generates a sample of data that can be labeled.
:param direc: Directory containing files from which to sample data.
:param sample_rate: The fraction of tweets to draw from each file.
:param drop_rt: Should retweets be dropped?
:return: A combined dataframe of the sample data.
"""
files = os.listdir(direc)
files = [f for f in files if '.csv' in f]
df_sample_list = []
for file in files:
df = | pd.read_csv(direc + file) | pandas.read_csv |
import matplotlib.pyplot as plt
import matplotlib.lines
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
from oolearning.converters.TwoClassConverterBase import TwoClassConverterBase
from oolearning.converters.TwoClassPrecisionRecallOptimizerConverter import \
TwoClassPrecisionRecallOptimizerConverter
from oolearning.converters.TwoClassRocOptimizerConverter import TwoClassRocOptimizerConverter
from oolearning.evaluators.TwoClassEvaluator import TwoClassEvaluator
class TwoClassProbabilityEvaluator(TwoClassEvaluator):
"""
Evaluates 2-class classification problems, where "probabilities" are supplied as well as a Converter (i.e.
an object that encapsulates the logic to convert the probabilities to classes.
"""
def __init__(self,
converter: TwoClassConverterBase):
super().__init__(positive_class=converter.positive_class)
self._converter = converter
self._actual_classes = None
self._predicted_probabilities = None
self._auc_roc = None
self._auc_precision_recall = None
self._fpr = None
self._tpr = None
self._ideal_threshold_roc = None
self._ppv = None
self._ideal_threshold_ppv_tpr = None
@property
def auc_roc(self):
return self._auc_roc
@property
def auc_precision_recall(self):
return self._auc_precision_recall
def evaluate(self,
actual_values: np.ndarray, predicted_values: pd.DataFrame):
self._actual_classes = actual_values
self._predicted_probabilities = predicted_values
self._auc_roc = roc_auc_score(y_true=[1 if x == self._positive_class else 0 for x in actual_values],
y_score=predicted_values[self._positive_class])
# according to this (), average precision is same as auc of pr curve
self._auc_precision_recall = average_precision_score(y_true=[1 if x == self._positive_class else 0
for x in actual_values],
y_score=predicted_values[self._positive_class])
predicted_classes = self._converter.convert(values=predicted_values)
super().evaluate(actual_values=actual_values, predicted_values=predicted_classes)
def plot_roc_curve(self):
"""
:return: an ROC curve, indicating the point (threshold) that has the minimum distance to the
upper left corner (i.e. a perfect predictor). If a threshold is specified in the
class constructor, then that threshold is also annotated on the graph.
"""
if self._fpr is None or self._tpr is None or self._ideal_threshold_roc is None:
converter = TwoClassRocOptimizerConverter(actual_classes=self._actual_classes,
positive_class=self._converter.positive_class)
converter.convert(values=self._predicted_probabilities)
self._fpr = converter.false_positive_rates
self._tpr = converter.true_positive_rates
self._ideal_threshold_roc = converter.ideal_threshold
self._create_curve(x_coordinates=self._fpr,
y_coordinates=self._tpr,
threshold=0.5,
ideal_threshold=self._ideal_threshold_roc,
title='ROC (AUC={0})'.format(round(self.auc_roc, 3)),
x_label='False Positive Rate (1 - True Negative Rate)',
y_label='True Positive Rate',
corner='Left')
plt.tight_layout()
def plot_precision_recall_curve(self):
"""
# TODO document
"""
self.plot_ppv_tpr_curve()
def plot_ppv_tpr_curve(self):
"""
# TODO document
"""
if self._ppv is None or self._tpr is None or self._ideal_threshold_ppv_tpr is None:
converter = TwoClassPrecisionRecallOptimizerConverter(actual_classes=self._actual_classes,
positive_class=self._converter.positive_class) # noqa
converter.convert(values=self._predicted_probabilities)
self._ppv = converter.positive_predictive_values
self._tpr = converter.true_positive_rates
self._ideal_threshold_ppv_tpr = converter.ideal_threshold
self._create_curve(x_coordinates=self._tpr,
y_coordinates=self._ppv,
threshold=0.5,
ideal_threshold=self._ideal_threshold_ppv_tpr,
title='Positive Predictive Value vs. True Positive Rate',
x_label='True Positive Rate',
y_label='Positive Predictive Value',
corner='Right')
plt.tight_layout()
def plot_calibration(self):
"""
:return: calibration plot. Predicted probabilities are matched with the actual class and binned by
the prediction in intervals of 0.1. i.e. all probabilities/classes that have a prediction between
0 to 0.1 are grouped together, > 0.1 <= 0.2 are grouped together, and so on. For each group, the
percent of positive classes found is calculated. For example, in the group that has predicted
probabilities between 0 and 0.1, we would expect the average probability to be 0.05, and therefore
we would expect about 0.05 (i.e. 5%) of the group to be a positive class. The percentage of
positive classes for each bin is plotted. If the points fall along a 45 degree line, the model
has produced well-calibrated probabilities.
"""
calibration_data = pd.concat([self._predicted_probabilities[self._positive_class],
self._actual_classes], axis=1)
calibration_data.columns = ['probabilities', 'actual_classes']
bin_labels = ['[0, 0.1]', '(0.1, 0.2]', '(0.2, 0.3]', '(0.3, 0.4]', '(0.4, 0.5]', '(0.5, 0.6]',
'(0.6, 0.7]', '(0.7, 0.8]', '(0.8, 0.9]', '(0.9, 1.0]']
# .cut maintains distribution shape
bins = pd.cut(calibration_data.probabilities,
bins=np.arange(0.0, 1.1, 0.1),
include_lowest=True,
labels=bin_labels)
calibration_data['bins'] = bins
# calibration_data.bins.value_counts(ascending=True)
# calibration_data.head()
# calibration_data.sort_values(['bins', 'actual_classes'])
def calibration_grouping(x):
# noinspection PyTypeChecker
number_positive_events = sum(x.actual_classes == self._positive_class)
total_observations = len(x.actual_classes)
d = {'Positive Events Found': number_positive_events,
'Total Observations': total_observations,
'Actual Calibration': 0 if total_observations == 0
else number_positive_events / total_observations}
return pd.Series(d, index=['Positive Events Found', 'Total Observations', 'Actual Calibration'])
calibration_group_data = calibration_data.groupby('bins').apply(calibration_grouping)
calibration_group_data['Perfect Calibration'] = np.arange(0.05, 1.05, 0.10)
calibration_group_data[['Actual Calibration', 'Perfect Calibration']].plot(yticks=np.arange(0.0, 1.1, 0.1)) # noqa
ax = plt.gca()
ax.set_xticks(np.arange(len(bin_labels)))
ax.set_xticklabels(labels=bin_labels, rotation=20, ha='right', size=9)
ax.set_xlim(-0.5, len(bin_labels) - 0.5)
ax.figure.set_size_inches(8, 8)
ax.grid(which='major', alpha=0.1)
for index in range(10):
text = '({}/{} = {:.1%})'.format(calibration_group_data.iloc[index]['Positive Events Found'],
calibration_group_data.iloc[index]['Total Observations'],
calibration_group_data.iloc[index]['Actual Calibration'])
ax.annotate(text,
xy=(index+0.15, calibration_group_data.iloc[index]['Actual Calibration'] - 0.005),
size=7)
ax.scatter(x=np.arange(len(bin_labels)), y=calibration_group_data['Actual Calibration'].values, s=10)
ax.set(**{'title': 'Calibration Chart',
'xlabel': 'Binned Probabilities',
'ylabel': 'Percent of Positive (Actual) Events in Bin'})
plt.tight_layout()
def plot_predicted_probability_hist(self):
calibration_data = pd.concat([self._predicted_probabilities[self._positive_class],
self._actual_classes], axis=1)
calibration_data.columns = ['Predicted Probabilities', 'Actual Classes']
calibration_data['Predicted Probabilities'].hist(by=calibration_data['Actual Classes'],
bins=20)
axes = plt.gcf().get_axes()
for ax in axes:
ax.set_xticks(np.arange(0.0, 1.1, 0.1))
ax.set(**{'xlabel': 'Predicted Probability (Positive Event)',
'ylabel': 'Count'})
ax = plt.gca()
ax.figure.set_size_inches(10, 6)
plt.suptitle('Histogram of Predicted Probabilities, by Actual Outcome', fontsize=12)
plt.tight_layout()
@staticmethod
def _create_gain_lift_data(predicted_probabilities, actual_classes, positive_class):
raw_data = pd.concat([predicted_probabilities[positive_class],
actual_classes], axis=1)
raw_data.columns = ['probabilities', 'actual_classes']
raw_data.sort_values(['probabilities'], ascending=False)
# .qcut gets percentiles
bins = pd.qcut(x=raw_data['probabilities'], q=10, labels=list(range(100, 0, -10)))
raw_data['percentiles'] = bins
# probabilities_classes.sort_values('probabilities')
def gain_grouping(x):
# noinspection PyTypeChecker
number_positive_events = sum(x.actual_classes == positive_class)
d = {'Number of Observations': len(x.actual_classes),
'Number of Positive Events': number_positive_events
}
return | pd.Series(d, index=['Number of Observations', 'Number of Positive Events']) | pandas.Series |
import ipywidgets as widgets
# import bql
# import bqviz as bqv
from bqplot import Figure, Pie, pyplot as plt
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from components.efficient_frontier import EfficientFrontier
# bq = bql.Service()
class ETFViewer:
def __init__(self, etf_fields, drivers):
"""Creates an empty component with the label requiring to select an ETF.
"""
label = widgets.Label("Selecione um ETF.")
self.component = widgets.VBox([label])
self.component.layout.width = "40%"
self.etf_fields = etf_fields
self.drivers = drivers
def set_etf(self, ids):
"""Set an ETF to the component and creates all tabs of visualizations.
:param ids: List of BQL ids of the selected ETFs.
:type ids: list
"""
self.ids = ids
self.component.children = [widgets.Label("Carregando...")]
self.hist_returns = self.get_etf_historical_returns()
out_error = widgets.Output()
with out_error:
titles = ['Performance', 'Volatilidade', 'Volume', 'Tracking', 'Holdings', 'Macro Factors', 'Infos']
tab = widgets.Tab()
tab.children = [
self.create_performance_view(),
self.create_volatility_view(),
self.create_volume_view(),
self.create_delta_benchmark_view(),
self.create_holdings_view(),
self.create_drivers_view(),
self.create_infos_view(),
]
for i, title in enumerate(titles):
tab.set_title(i, title)
tab.add_class('custom-tabs')
self.component.children = [self.get_tabs_styles(), tab]
return
self.component.children = [out_error]
def ids_as_string(self):
return ','.join(["'%s'"%id for id in self.ids])
def get_etf_historical_returns(self):
"""Gets a pandas DataFrame with the historical monthly returns of \
the ETFs from a period of 3 years.
:return: The monthly historical returns.
:rtype: pd.DataFrame
"""
# bql_response = bq.execute("""
# get(PCT_DIFF(PX_LAST(M, dates=range(-3y, 0d)))) for([%s])
# """%self.ids_as_string())
# df = bql_response[0].df()
# df.to_csv('./data/etfs/%s_returns.csv'%(self.ids[0]))
df = pd.DataFrame()
for id in self.ids:
_df = | pd.read_csv('./data/etfs/%s_returns.csv'%(id), index_col=0, parse_dates=['DATE']) | pandas.read_csv |
#Library
import pandas as pd
import camelot
import sqlalchemy
from sqlalchemy import Table, Column, ForeignKey, Integer, String, VARCHAR
import psycopg2
def pdfextrat(url="http://www.fsvs.ks.edu.tw/ezfiles/1/1001/img/58/206002.pdf",page=34):
tables=camelot.read_pdf(url, pages=str(page),flavor="stream")
print("Analysis Alchole PDF.....")
print(tables[0])
print(tables[0].parsing_report)
table_df = tables[0].df
return table_df
def table_transform(table_df):
rule = ["題序", "一", "二", "三", "四", "五"]
index, team = [], ""
for num, i in enumerate(table_df.iloc[:, 0]):
if "組別" in i:
team = i.replace("組別 ", "")
if i in rule:
index.append(num)
index[0] = index[0] + 2
alc_dict = {
"Team": [],
"Drinkname": [],
"Ingredients": [],
"Method": [],
"Garnish": [],
"Glassware": []
}
for n, first in enumerate(index):
dr, ing, me, gar, glass = "", "", "", "", ""
second = 0
if first == index[len(index) - 1]:
second = len(table_df) - 1
else:
second = index[n + 1]
for i in range(first, second):
if len(table_df.columns) == 10:
dr += table_df.iloc[i, 1]
ing += table_df.iloc[i, 2]
me += table_df.iloc[i, 3]
gar += table_df.iloc[i, 6]
glass += table_df.iloc[i, 7] + table_df.iloc[i, 8] + table_df.iloc[i, 9]
elif len(table_df.columns) == 9:
dr += table_df.iloc[i, 1]
ing += table_df.iloc[i, 2]
me += table_df.iloc[i, 3]
gar += table_df.iloc[i, 5]
glass += table_df.iloc[i, 6] + table_df.iloc[i, 7] + table_df.iloc[i, 8]
elif len(table_df.columns) == 8:
dr += table_df.iloc[i, 1]
ing += table_df.iloc[i, 2]
me += table_df.iloc[i, 3]
gar += table_df.iloc[i, 4]
glass += table_df.iloc[i, 5] + table_df.iloc[i, 6] + table_df.iloc[i, 7]
elif len(table_df.columns) == 7:
dr += table_df.iloc[i, 1]
ing += table_df.iloc[i, 2]
me += table_df.iloc[i, 4]
gar += table_df.iloc[i, 5]
glass += table_df.iloc[i, 6]
else:
dr += table_df.iloc[i, 1]
ing += table_df.iloc[i, 2]
me += table_df.iloc[i, 3]
gar += table_df.iloc[i, 4]
glass += table_df.iloc[i, 5]
alc_dict["Team"].append(team)
alc_dict["Drinkname"].append(dr)
alc_dict["Ingredients"].append(ing)
alc_dict["Method"].append(me)
alc_dict["Garnish"].append(gar)
alc_dict["Glassware"].append(glass)
alc_df = pd.DataFrame(alc_dict)
return alc_df
def multiplepage(page_start, page_end, url="http://www.fsvs.ks.edu.tw/ezfiles/1/1001/img/58/206002.pdf"):
alldf = | pd.DataFrame() | pandas.DataFrame |
import re
import io
import demjson
import requests
import numpy as np
import pandas as pd
from fake_useragent import UserAgent
# TODO need add comments
url = {
"eastmoney": "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx",
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
# https://fred.stlouisfed.org/release/tables?rid=205&eid=712378
def gdp_quarterly():
"""
ABS: absolute value (per 100 million CNY)
YoY: year on year growth
Data source: http://data.eastmoney.com/cjsj/gdp.html
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7519513",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "20",
"_": "1622020352668"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Absolute_Value",
"YoY_Rate",
"Primary_Industry_ABS",
"Primary_Industry_YoY_Rate",
"Secondary_Industry_ABS",
"Secondary_Industry_YoY_Rate",
"Tertiary_Industry_ABS",
"Tertiary_Industry_YoY_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df["Absolute_Value"] = df["Absolute_Value"].astype(float)
df["Secondary_Industry_ABS"] = df["Secondary_Industry_ABS"].astype(float)
df["Tertiary_Industry_ABS"] = df["Tertiary_Industry_ABS"].astype(float)
df["Absolute_Value"] = df["Absolute_Value"].astype(float)
df["YoY_Rate"] = df["YoY_Rate"].astype(float) / 100
df["Secondary_Industry_YoY_Rate"] = df["Secondary_Industry_YoY_Rate"].astype(
float) / 100
df["Tertiary_Industry_YoY_Rate"] = df["Tertiary_Industry_YoY_Rate"].astype(
float) / 100
return df
def ppi_monthly():
"""
ABS: absolute value (per 100 million CNY)
YoY: year on year growth
Accum: Accumulation
Data source: http://data.eastmoney.com/cjsj/ppi.html
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable9051497",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "22",
"_": "1622047940401"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"Current_Month_YoY_Rate",
"Current_Month_Accum"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df["Current_Month"] = df["Current_Month"].astype(float)
df["Current_Month_YoY_Rate"] = df["Current_Month_YoY_Rate"].astype(
float) / 100
df["Current_Month_Accum"] = df["Current_Month_Accum"].astype(float)
return df
def cpi_monthly():
"""
Accum: Accumulation
YoY: year on year growth
MoM: month on month growth
Data source: http://data.eastmoney.com/cjsj/cpi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2790750",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "19",
"_": "1622020352668"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Notion_Monthly",
"Notion_YoY_Rate",
"Notion_MoM_Rate",
"Notion_Accum",
"Urban_Monthly",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Urban_Accum",
"Rural_Monthly",
"Rural_YoY_Rate",
"Rural_MoM_Rate",
"Rural_Accum",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Notion_Monthly",
"Notion_Accum",
"Urban_Monthly",
"Urban_Accum",
"Rural_Monthly",
"Rural_Accum"]] = df[["Notion_Monthly",
"Notion_Accum",
"Urban_Monthly",
"Urban_Accum",
"Rural_Monthly",
"Rural_Accum"]].astype(float)
df[["Notion_YoY_Rate",
"Notion_MoM_Rate",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Rural_YoY_Rate",
"Rural_MoM_Rate"]] = df[["Notion_YoY_Rate",
"Notion_MoM_Rate",
"Urban_YoY_Rate",
"Urban_MoM_Rate",
"Rural_YoY_Rate",
"Rural_MoM_Rate"]].astype(float) / 100
return df
def pmi_monthly():
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/pmi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable4515395",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "2",
"ps": "200",
"mkt": "21",
"_": "162202151821"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Man_Industry_Index",
"Man_Index_YoY_Rate",
"Non-Man_Industry_Index",
"Non-Man_Index_YoY_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Man_Industry_Index", "Non-Man_Industry_Index"]] = \
df[["Man_Industry_Index", "Non-Man_Industry_Index"]].astype(float)
df[["Man_Index_YoY_Rate", "Non-Man_Index_YoY_Rate"]] = \
df[["Man_Index_YoY_Rate", "Non-Man_Index_YoY_Rate"]].astype(float) / 100
return df
def fai_monthly(): # fix asset investment
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/gdzctz.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable607120",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "12",
"_": "1622021790947"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Current_Year_Accum"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Current_Year_Accum"]] = \
df[["Current_Month", "Current_Year_Accum"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def hi_old_monthly(): # house index old version (2008-2010)
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/house.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1895714",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "10",
"_": "1622022794457"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Housing_Prosperity_Index",
"HPI_YoY_Rate",
"Land_Development_Area_Index",
"LDAI_YoY_Rate",
"Sales_Price_Index",
"SPI_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Housing_Prosperity_Index",
"Land_Development_Area_Index",
"Sales_Price_Index"]] = df[["Housing_Prosperity_Index",
"Land_Development_Area_Index",
"Sales_Price_Index"]].astype(float)
df[["HPI_YoY_Rate", "LDAI_YoY_Rate", "SPI_YoY_Rate"]] = \
df[["HPI_YoY_Rate", "LDAI_YoY_Rate", "SPI_YoY_Rate"]].astype(float) / 100
return df
# mkt=1&stat=2&city1=%E5%B9%BF%E5%B7%9E&city2=%E4%B8%8A%E6%B5%B7
# newly built commercial housing & second-hand commercial housing
def hi_new_monthly(city1: str, city2: str):
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/newhouse.html
"""
tmp_url = "http://data.eastmoney.com/dataapi/cjsj/getnewhousechartdata?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params_nbch_MoM = {
"mkt": "1",
"stat": "2",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
request_params_shch_MoM = {
"mkt": "1",
"stat": "3",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
r_nbch_MoM = requests.get(
tmp_url,
params=request_params_nbch_MoM,
headers=request_header)
r_shch_MoM = requests.get(
tmp_url,
params=request_params_shch_MoM,
headers=request_header)
data_text_nbch_MoM = r_nbch_MoM.text
data_text_shch_MoM = r_shch_MoM.text
data_json_nbch_MoM = demjson.decode(data_text_nbch_MoM)
data_json_shch_MoM = demjson.decode(data_text_shch_MoM)
date_nbch = data_json_nbch_MoM['chart']['series']['value']
data1_nbch_MoM = data_json_nbch_MoM['chart']['graphs']['graph'][0]['value']
data2_nbch_MoM = data_json_nbch_MoM['chart']['graphs']['graph'][1]['value']
data1_shch_MoM = data_json_shch_MoM['chart']['graphs']['graph'][0]['value']
data2_shch_MoM = data_json_shch_MoM['chart']['graphs']['graph'][1]['value']
df_MoM = pd.DataFrame({"Date": date_nbch,
"City1_nbch_MoM": data1_nbch_MoM,
"City1_shch_MoM": data1_shch_MoM,
"City2_nbch_MoM": data2_nbch_MoM,
"City2_shch_MoM": data2_shch_MoM})
df_MoM["Date"] = pd.to_datetime(df_MoM["Date"], format="%m/%d/%Y")
request_params_nbch_YoY = {
"mkt": "2",
"stat": "2",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
request_params_shch_YoY = {
"mkt": "2",
"stat": "3",
"city1": "{}".format(city1),
"city2": "{}".format(city2)
}
r_nbch_YoY = requests.get(
tmp_url,
params=request_params_nbch_YoY,
headers=request_header)
r_shch_YoY = requests.get(
tmp_url,
params=request_params_shch_YoY,
headers=request_header)
data_text_nbch_YoY = r_nbch_YoY.text
data_text_shch_YoY = r_shch_YoY.text
data_json_nbch_YoY = demjson.decode(data_text_nbch_YoY)
data_json_shch_YoY = demjson.decode(data_text_shch_YoY)
date_nbch = data_json_nbch_YoY['chart']['series']['value']
data1_nbch_YoY = data_json_nbch_YoY['chart']['graphs']['graph'][0]['value']
data2_nbch_YoY = data_json_nbch_YoY['chart']['graphs']['graph'][1]['value']
data1_shch_YoY = data_json_shch_YoY['chart']['graphs']['graph'][0]['value']
data2_shch_YoY = data_json_shch_YoY['chart']['graphs']['graph'][1]['value']
df_YoY = pd.DataFrame({"Date": date_nbch,
"City1_nbch_YoY": data1_nbch_YoY,
"City1_shch_YoY": data1_shch_YoY,
"City2_nbch_YoY": data2_nbch_YoY,
"City2_shch_YoY": data2_shch_YoY})
df_YoY["Date"] = pd.to_datetime(df_YoY["Date"], format="%m/%d/%Y")
df = df_YoY.merge(df_MoM, on="Date")
return df
def ci_eei_monthly(): # Climate Index & Entrepreneur Expectation Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/qyjqzs.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7709842",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "8",
"_": "1622041485306"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Climate_Index",
"CI_YoY_Rate",
"CI_MoM_Rate",
"Entrepreneur_Expectation_Index",
"EEI_YoY_Rate",
"EEI_MoM_Rate"
]
df.replace('', np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Climate_Index", "Entrepreneur_Expectation_Index"]] = \
df[["Climate_Index", "Entrepreneur_Expectation_Index"]].astype(float)
df[["CI_YoY_Rate", "CI_MoM_Rate", "EEI_YoY_Rate", "EEI_MoM_Rate"]] = df[[
"CI_YoY_Rate", "CI_MoM_Rate", "EEI_YoY_Rate", "EEI_MoM_Rate"]].astype(float) / 100
return df
def ig_monthly(): # Industry Growth
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/gyzjz.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable4577327",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "0",
"_": "1622042259898"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"IG_YoY_Rate",
"IG_Accum_Rate",
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["IG_YoY_Rate", "IG_Accum_Rate"]] = \
df[["IG_YoY_Rate", "IG_Accum_Rate"]].astype(float) / 100
return df
def cgpi_monthly(): # Corporate Goods Price Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/qyspjg.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7184534",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "9",
"_": "1622042652353"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"General_Index",
"General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Mineral_Product",
"Mineral_Product_YoY_Rate",
"Mineral_Product_MoM_Rate",
"Coal_Oil_Electricity",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["General_Index",
"Agricultural_Product",
"Mineral_Product",
"Coal_Oil_Electricity"]] = df[["General_Index",
"Agricultural_Product",
"Mineral_Product",
"Coal_Oil_Electricity"]].astype(float)
df[["General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"]] = df[["General_Index_YoY_Rate",
"Total_Index_MoM_Rate",
"Agricultural_Product_YoY_Rate",
"Agricultural_Product_MoM_Rate",
"Coal_Oil_Electricity_YoY_Rate",
"Coal_Oil_Electricity_MoM_Rate"]].astype(float) / 100
return df
def cci_csi_cei_monthly(): # Consumer Confidence Index & Consumer Satisfaction Index & Consumer Expectation Index
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/xfzxx.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1243218",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "4",
"_": "1622043704818"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"CCI",
"CCI_YoY_Rate",
"CCI_MoM_Rate",
"CSI",
"CSI_YoY_Rate",
"CSI_MoM_Rate",
"CEI",
"CEI_YoY_Rate",
"CEI_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["CCI", "CSI", "CEI"]] = \
df[["CCI", "CSI", "CEI"]].astype(float)
df[["CCI_YoY_Rate", "CCI_MoM_Rate",
"CSI_YoY_Rate", "CSI_MoM_Rate",
"CEI_YoY_Rate", "CEI_MoM_Rate"]] = \
df[["CCI_YoY_Rate", "CCI_MoM_Rate",
"CSI_YoY_Rate", "CSI_MoM_Rate",
"CEI_YoY_Rate", "CEI_MoM_Rate"]].astype(float) / 100
return df
def trscg_monthly(): # Total Retail Sales of Consumer Goods
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/xfp.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3665821",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "5",
"_": "1622044011316"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"TRSCG_YoY_Rate",
"TRSCG_MoM_Rate",
"TRSCG_Accum",
"TRSCG_Accum_YoY_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "TRSCG_Accum"]] = \
df[["Current_Month", "TRSCG_Accum"]].astype(float)
df[["TRSCG_YoY_Rate", "TRSCG_MoM_Rate", "TRSCG_Accum_YoY_Rate"]] = df[[
"TRSCG_YoY_Rate", "TRSCG_MoM_Rate", "TRSCG_Accum_YoY_Rate"]].astype(float) / 100
return df
def ms_monthly(): # monetary Supply
"""
Man: manufacturing
Non-Man: Non-manufacturing
Data Source: http://data.eastmoney.com/cjsj/hbgyl.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3818891",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "11",
"_": "1622044292103"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"M2",
"M2_YoY_Rate",
"M2_MoM_Rate",
"M1",
"M1_YoY_Rate",
"M1_MoM_Rate",
"M0",
"M0_YoY_Rate",
"M0_MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["M0", "M1", "M2"]] = \
df[["M0", "M1", "M2"]].astype(float)
df[["M0_YoY_Rate", "M1_YoY_Rate", "M2_YoY_Rate",
"M0_MoM_Rate", "M1_MoM_Rate", "M2_MoM_Rate"]] = \
df[["M0_YoY_Rate", "M1_YoY_Rate", "M2_YoY_Rate",
"M0_MoM_Rate", "M1_MoM_Rate", "M2_MoM_Rate"]].astype(float) / 100
return df
def ie_monthly(): # Import & Export
"""
Data Source: http://data.eastmoney.com/cjsj/hgjck.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable3818891",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "1",
"_": "1622044292103"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month_Export",
"Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import",
"Accumulation_Import_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df.replace("", np.nan, inplace=True)
df[["Current_Month_Export", "Current_Month_Import",
"Accumulation_Export", "Accumulation_Import"]] = \
df[["Current_Month_Export", "Current_Month_Import",
"Accumulation_Export", "Accumulation_Import"]].astype(float)
df[["Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import_YoY_Rate"]] = \
df[["Current_Month_Export_YoY_Rate",
"Current_Month_Export_MoM_Rate",
"Current_Month_Import_YoY_Rate",
"Current_Month_Import_MoM_Rate",
"Accumulation_Export_YoY_Rate",
"Accumulation_Import_YoY_Rate"]].astype(float)/100
return df
def stock_monthly(): # Stock Trading Statistics
"""
Data Source: http://data.eastmoney.com/cjsj/gpjytj.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "jQuery112308659690274138041_1622084599455",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "2",
"_": "1622084599456"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("(") + 1:-1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"SH_Total_Stock_issue",
"SZ_Total_Stock_Issue",
"SH_Total_Market_Capitalization",
"SZ_Total_Market_Capitalization",
"SH_Turnover",
"SZ_Turnover",
"SH_Volume",
"SZ_Volume",
"SH_Highest",
"SZ_Highest",
"SH_lowest",
"SZ_lowest"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[list(df.columns[1:])] = df[list(df.columns[1:])].astype(float)
return df
def fgr_monthly(): # Forex and Gold Reserve
"""
Data Source: http://data.eastmoney.com/cjsj/gpjytj.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "tatable6260802",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "16",
"_": "1622044863548"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Forex",
"Forex_YoY_Rate",
"Forex_MoM_Rate",
"Gold",
"Gold_YoY_Rate",
"Gold_MoM_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Forex", "Gold"]] = \
df[["Forex", "Gold"]].astype(float)
df[["Forex_YoY_Rate", "Gold_YoY_Rate",
"Forex_MoM_Rate", "Gold_MoM_Rate"]] = \
df[["Forex_YoY_Rate", "Gold_YoY_Rate",
"Forex_MoM_Rate", "Gold_MoM_Rate"]].astype(float) / 100
return df
def ctsf_monthly(): # Client Transaction Settlement Funds
"""
http://data.eastmoney.com/cjsj/banktransfer.html
"""
tmp_url = "http://data.eastmoney.com/dataapi/cjsj/getbanktransferdata?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"p": "1",
"ps": "200"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("["):-11])
df = pd.DataFrame(data_json)
df.replace("", np.nan, inplace=True)
df["StartDate"] = pd.to_datetime(df["StartDate"], format="%Y-%m-%d")
df["EndDate"] = pd.to_datetime(df["EndDate"], format="%Y-%m-%d")
df[list(df.columns)[2:]] = df[list(df.columns)[2:]].astype(float)
return df
# TODO: SPECIAL CASE
def sao_monthly(): # Stock Account Overview
"""
http://data.eastmoney.com/cjsj/gpkhsj.html
"""
tmp_url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"callback": "datatable4006236",
"type": "GPKHData",
"js": "({data:[(x)],pages:(pc)})",
"st": "SDATE",
"sr": "-1",
"token": "<KEY>",
"p": "1",
"ps": "2000",
"_": "1622079339035"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") + 6: -14])
df = pd.DataFrame(data_json[0])
df.columns = [
"Date",
"New_Investor",
"New_Investor_MoM_Rate",
"New_Investor_YoY_Rate",
"Active_Investor",
"Active_Investor_A_Share",
"Active_Investor_B_share",
"SHIndex_Close",
"SHIndex_Rate",
"SHSZ_Market_Capitalization",
"SHSZ_Average_Capitalization"
]
df.replace("-", np.nan, inplace=True)
df.Date = pd.to_datetime(df.Date, format="%Y年%m月")
df[list(df.columns[~df.columns.isin(["Date", "New_Investor_MoM_Rate", "New_Investor_YoY_Rate"])])] = df[list(
df.columns[~df.columns.isin(["Date", "New_Investor_MoM_Rate", "New_Investor_YoY_Rate"])])].astype(float)
df[["New_Investor_MoM_Rate", "New_Investor_YoY_Rate"]] = \
df[["New_Investor_MoM_Rate", "New_Investor_YoY_Rate"]].astype(float) / 100
return df
def fdi_monthly(): # Foreign Direct Investment
"""
http://data.eastmoney.com/cjsj/fdi.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable1477466",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "15",
"_": "1622044863548"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df.replace("", np.nan, inplace=True)
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] = \
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def gr_monthly(): # Government Revenue
"""
http://data.eastmoney.com/cjsj/czsr.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7840652",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "14",
"_": "1622080618625"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] = \
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def ti_monthly(): # Tax Income
"""
http://data.eastmoney.com/cjsj/qgsssr.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable8280567",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "3",
"_": "1622080669713"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df = df.replace("", np.nan)
df[["Current_Month"]] = \
df[["Current_Month"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def nl_monthly(): # New Loan
"""
http://data.eastmoney.com/cjsj/xzxd.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2533707",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "7",
"_": "1622080800162"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation",
"Accum_YoY_Rate"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]] =\
df[["YoY_Rate", "MoM_Rate", "Accum_YoY_Rate"]].astype(float) / 100
return df
def dfclc_monthly(): # Deposit of Foreign Currency and Local Currency
"""
http://data.eastmoney.com/cjsj/wbck.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable2899877",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "18",
"_": "1622081057370"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def fl_monthly(): # Forex Loan
"""
http://data.eastmoney.com/cjsj/whxd.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable636844",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "17",
"_": "1622081336038"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Date",
"Current_Month",
"YoY_Rate",
"MoM_Rate",
"Accumulation"
]
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
df[["Current_Month", "Accumulation"]] = \
df[["Current_Month", "Accumulation"]].astype(float)
df[["YoY_Rate", "MoM_Rate"]] = \
df[["YoY_Rate", "MoM_Rate"]].astype(float) / 100
return df
def drr_monthly(): # Deposit Reserve Ratio
"""
http://data.eastmoney.com/cjsj/ckzbj.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable4285562",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "23",
"_": "1622081448882"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Announcement Date",
"Effective Date",
"Large_Financial_institution_Before",
"Large_Financial_institution_After",
"Large_Financial_institution_Adj_Rate",
"S&M_Financial_institution_Before",
"S&M_Financial_institution_After",
"S&M_Financial_institution_Adj_Rate",
"Comment",
"SHIndex_Rate",
"SZIndex_Rate"
]
df["Announcement Date"] = pd.to_datetime(
df["Announcement Date"], format="%Y-%m-%d")
df["Date"] = df["Announcement Date"]
df["Effective Date"] = pd.to_datetime(
df["Effective Date"], format="%Y-%m-%d")
df[["SHIndex_Rate", "SZIndex_Rate"]] = df[["SHIndex_Rate", "SZIndex_Rate"]].astype(float)
df[["Large_Financial_institution_Before",
"Large_Financial_institution_After",
"Large_Financial_institution_Adj_Rate",
"S&M_Financial_institution_Before",
"S&M_Financial_institution_After",
"S&M_Financial_institution_Adj_Rate"]] = df[["Large_Financial_institution_Before",
"Large_Financial_institution_After",
"Large_Financial_institution_Adj_Rate",
"S&M_Financial_institution_Before",
"S&M_Financial_institution_After",
"S&M_Financial_institution_Adj_Rate"]].astype(float) / 100
return df
def interest_monthly(): # Interest
"""
http://data.eastmoney.com/cjsj/yhll.html
"""
tmp_url = url["eastmoney"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["eastmoney"]
request_params = {
"cb": "datatable7591685",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "200",
"mkt": "13",
"_": "1622081956464"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -1])
df = pd.DataFrame([item.split(",") for item in data_json["data"]])
df.columns = [
"Announcement Date",
"Deposit_Benchmark_Interest_Rate_Before",
"Deposit_Benchmark_Interest_Rate_After",
"Deposit_Benchmark_Interest_Rate_Adj_Rate",
"Loan_Benchmark_Interest_Rate_Before",
"Loan_Benchmark_Interest_Rate_After",
"Loan_Benchmark_Interest_Rate_Adj_Rate",
"SHIndex_Rate",
"SZIndex_Rate",
"Effective Date"
]
df["Announcement Date"] = pd.to_datetime(
df["Announcement Date"], format="%Y-%m-%d")
df["Date"] = df["Announcement Date"]
df["Effective Date"] = pd.to_datetime(
df["Effective Date"], format="%Y-%m-%d")
df["Date"] = df["Announcement Date"]
df = df.replace("", np.nan)
df[["Deposit_Benchmark_Interest_Rate_Before",
"Deposit_Benchmark_Interest_Rate_After",
"Deposit_Benchmark_Interest_Rate_Adj_Rate",
"Loan_Benchmark_Interest_Rate_Before",
"Loan_Benchmark_Interest_Rate_After",
"Loan_Benchmark_Interest_Rate_Adj_Rate"]] = df[["Deposit_Benchmark_Interest_Rate_Before",
"Deposit_Benchmark_Interest_Rate_After",
"Deposit_Benchmark_Interest_Rate_Adj_Rate",
"Loan_Benchmark_Interest_Rate_Before",
"Loan_Benchmark_Interest_Rate_After",
"Loan_Benchmark_Interest_Rate_Adj_Rate"]].astype(float) / 100
return df
def gdc_daily(): # gasoline, Diesel and Crude Oil
"""
http://data.eastmoney.com/cjsj/oil_default.html
"""
tmp_url = "http://datacenter-web.eastmoney.com/api/data/get?"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"callback": "jQuery112302601302322321093_1622082348721",
"type": "RPTA_WEB_JY_HQ",
"sty": "ALL",
"st": "date",
"sr": "-1",
"token": "<KEY>",
"p": "1",
"ps": "50000",
"source": "WEB",
"_": "1622082348722"
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"): -2])
df = pd.DataFrame(data_json["result"]["data"])
df.columns = ["Crude_Oil", "Date", "Gasoline", "Diesel"]
df = df[["Date", "Gasoline", "Diesel", "Crude_Oil"]]
df = pd.to_datetime(df["Date"], format="%Y-%m-%d")
return df
def Leading_Indicators_OECD(startdate = "1950-01", enddate = "2021-05"):
# CLI
tmp_url = url["OECD"] + "CHN.CLI.AMPLITUD.LTRENDIDX.M/OECD"
ua = UserAgent(verify_ssl=False)
request_params = {
"contentType": "csv",
"detail": "code",
"separator": "comma",
"csv-lang": "en",
"startPeriod": "{}".format(startdate),
"endPeriod": "{}".format(enddate)
}
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, params = request_params, headers=request_header)
data_text = r.content
df_cli = pd.read_csv(io.StringIO(data_text.decode('utf-8')))[["TIME", "Value"]]
df_cli.columns = ["Date", "CN_OECD_CLI"]
df_cli["Date"] = pd.to_datetime(df_cli["Date"], format = "%Y-%m")
df_cli["CN_OECD_CLI"] = df_cli["CN_OECD_CLI"].astype(float)
#BCI
tmp_url = url["OECD"] + "CHN.BCI.AMPLITUD.LTRENDIDX.M/OECD"
ua = UserAgent(verify_ssl=False)
request_params = {
"contentType": "csv",
"detail": "code",
"separator": "comma",
"csv-lang": "en",
"startPeriod": "{}".format(startdate),
"endPeriod": "{}".format(enddate)
}
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, params = request_params, headers=request_header)
data_text = r.content
df_bci = pd.read_csv(io.StringIO(data_text.decode('utf-8')))[["TIME", "Value"]]
df_bci.columns = ["Date", "CN_OECD_BCI"]
df_bci["Date"] = pd.to_datetime(df_cli["Date"], format = "%Y-%m")
df_bci["CN_OECD_BCI"] = df_bci["CN_OECD_BCI"].astype(float)
# CCI
tmp_url = url["OECD"] + "CHN.CCI.AMPLITUD.LTRENDIDX.M/OECD"
ua = UserAgent(verify_ssl=False)
request_params = {
"contentType": "csv",
"detail": "code",
"separator": "comma",
"csv-lang": "en",
"startPeriod": "{}".format(startdate),
"endPeriod": "{}".format(enddate)
}
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, params = request_params, headers=request_header)
data_text = r.content
df_cci = pd.read_csv(io.StringIO(data_text.decode('utf-8')))[["TIME", "Value"]]
df_cci.columns = ["Date", "CN_OECD_CCI"]
df_cci["Date"] = pd.to_datetime(df_cci["Date"], format = "%Y-%m")
df_cci["CN_OECD_CCI"] = df_cci["CN_OECD_CCI"].astype(float)
df = pd.merge_asof(df_cli, df_bci, on = "Date")
df = pd.merge_asof(df, df_cci, on = "Date")
return df
def CN_EPU_Monthly():
df = pd.read_excel("https://economicpolicyuncertaintyinchina.weebly.com/uploads/1/2/2/7/122762465/cnepu_1_july_2021_updated.xlsx")[:-2]
df.Date = | pd.to_datetime(df.Date, format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
def _check_data(self, xp, rs):
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_array_equal(xpdata, rsdata)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, sharex=True, legend=True)
for ax in axes:
self.assert_(ax.get_legend() is not None)
axes = df.plot(subplots=True, sharex=True)
for ax in axes[:-2]:
[self.assert_(not label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_xticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_yticklabels()]
axes = df.plot(subplots=True, sharex=False)
for ax in axes:
[self.assert_(label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
@slow
def test_plot_scatter(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with tm.assertRaises(ValueError):
df.plot(y='y', kind='scatter')
@slow
def test_plot_bar(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
_check_plot_works(df.plot, kind='bar')
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', stacked='True', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width() / 2)
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width())
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot)
_check_plot_works(df.boxplot, column=['one', 'two'])
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, df['one'])
_check_plot_works(df.boxplot, notch=1)
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
_check_plot_works(df.boxplot, by='X')
@slow
def test_kde(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 4))
_check_plot_works(df.plot, kind='kde')
_check_plot_works(df.plot, kind='kde', subplots=True)
ax = df.plot(kind='kde')
self.assert_(ax.get_legend() is not None)
axes = df.plot(kind='kde', logy=True, subplots=True)
for ax in axes:
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_hist(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
_check_plot_works(df.hist)
_check_plot_works(df.hist, grid=False)
# make sure layout is handled
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
axes = df.hist(grid=False)
self.assert_(not axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
_check_plot_works(df.hist)
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 20
xrot, yrot = 30, 30
ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
xf, yf = 20, 20
xrot, yrot = 30, 30
axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
for i, ax in enumerate(axes.ravel()):
if i < len(df.columns):
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self.assertEqual(ax.get_yscale(), 'log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
)
for layout_test in layout_to_expected_size:
ax = df.hist(layout=layout_test['layout'])
self.assertEqual(len(ax), layout_test['expected_size'][0])
self.assertEqual(len(ax[0]), layout_test['expected_size'][1])
# layout too small for all 4 plots
with tm.assertRaises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
@slow
def test_scatter(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 2))
import pandas.tools.plotting as plt
def scat(**kwds):
return plt.scatter_matrix(df, **kwds)
_check_plot_works(scat)
_check_plot_works(scat, marker='+')
_check_plot_works(scat, vmin=0)
_check_plot_works(scat, diagonal='kde')
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
def scat2(x, y, by=None, ax=None, figsize=None):
return plt.scatter_plot(df, x, y, by, ax, figsize=None)
_check_plot_works(scat2, 0, 1)
grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
_check_plot_works(scat2, 0, 1, by=grouper)
@slow
def test_andrews_curves(self):
from pandas import read_csv
from pandas.tools.plotting import andrews_curves
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(andrews_curves, df, 'Name')
@slow
def test_parallel_coordinates(self):
from pandas import read_csv
from pandas.tools.plotting import parallel_coordinates
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet)
df = read_csv(path, header=None, skiprows=1, names=[1, 2, 4, 8,
'Name'])
_check_plot_works(parallel_coordinates, df, 'Name', use_columns=True)
_check_plot_works(parallel_coordinates, df, 'Name',
xticks=[1, 5, 25, 125])
@slow
def test_radviz(self):
from pandas import read_csv
from pandas.tools.plotting import radviz
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = | read_csv(path) | pandas.read_csv |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/ | pd.Timedelta('1D') | pandas.Timedelta |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
target = 'scale'
# IP
plot_mode = 'all_in_one'
obj = 'occ'
# Port
flow_dir = 'all'
port_dir = 'sys'
user_plot_pr = ['TCP']
user_plot_pr = ['UDP']
port_hist = pd.DataFrame({'A' : []})
user_port_hist = pd.DataFrame({'A' : []})
def acf(x, length=10):
return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1] \
for i in range(1, length)])
def scale_check(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
scale_list = []
for col in ['byt', 'pkt']:
scale_list.append(col)
scale_list.append(str(np.min(df[col])))
scale_list.append(str(np.log(np.max(df[col]))))
scale_list.append(';')
print(files[data_idx], ':', (' '.join(scale_list)))
def pr_distribution(data_idx, plot=False):
files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
# pr marginal distribution
pr_series = df['pr'].value_counts()
print(names[data_idx], pr_series)
ct = [0, 0, 0]
for i in pr_series.keys():
if i == 'TCP':
ct[0] += pr_series[i]
elif i == 'UDP':
ct[1] += pr_series[i]
else:
ct[2] += pr_series[i]
ct2 = [x/sum(ct) for x in ct]
print(ct2)
with open('results/pr/pr_marginal.csv', 'a') as out:
out.write(','.join([names[data_idx], str(ct2[0]), str(ct2[1]), str(ct2[2]), '\n']))
# prob of spec ports
# http 80/tcp
# https 443/tcp, 443/udp
# ssh 22/tcp
# DNS Service 53
# FTP 21/tcp
# ob_ports = [80, 443, 22, 53, 21]
# for ob_q in ob_ports:
# df_ = df[df['dp'] == ob_q]
# print(ob_q, len(df_.index)/len(df.index), len(df_.index), len(df.index))
# input()
def check_distribution(df, name, user=None):
# count = df_all.value_counts()
# df.hist = df.hist()
df = df.astype(int)
# print(df.value_counts(normalize=True))
global port_hist
global user_port_hist
if port_dir == 'sys':
df.hist(bins=1024) # s is an instance of Series
# plt.plot(df.value_counts().index, df.value_counts().values)
plt.savefig('./results/ports/%s/%s.png' % (port_dir, name))
plt.clf()
port_hist[name+'_port'] = df.value_counts(normalize=True)[:10].index
port_hist[name+'_occ'] = df.value_counts(normalize=True)[:10].values
else:
l_p = []
l_o = []
bar_size = 6000
for i in range(1024, 65536, bar_size):
l_p.append(i)
l_o.append(len(df[(i<=df) & (df<i+bar_size)].index))
# print(df[(i<=df) & (df<i+bar_size)])
# print(i, i+bar_size)
# input()
# print(l_o, name)
l_o = [x/sum(l_o) for x in l_o]
if len(user_port_hist.columns) == 1:
user_port_hist[name+'_port'] = l_p
user_port_hist[name+'_occ'] = l_o
plt.plot(l_p, l_o)
plt.xlabel("user port")
plt.ylabel("probability")
plt.title("user port distribution")
# plt.xticks(x_pos, x)
# plt.savefig('./results/ports/%s/%s.png' % (port_dir, name))
# plt.clf()
print('plotted %s' % name)
def port_distribution(data_idx, plot=False):
files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
for pr in ['TCP', 'UDP']:
df_pr = df[df['pr'] == pr]
if flow_dir == 'outgoing':
flows = df_pr[df_pr['sa'].str.startswith('42.219')]
elif flow_dir == 'incoming':
flows = df_pr[df_pr['da'].str.startswith('42.219')]
else:
flows = df_pr.dropna()
# outgoing_port = pd.concat([outgoing_flows['sp'], outgoing_flows['dp']], axis= 0)
# check_distribution(outgoing_port, files[data_idx]+'_outgoing')
# incoming_port = pd.concat([flows['sp'], flows['dp']], axis= 0)
if port_dir == 'sys':
incoming_port = flows[flows['dp']<1024]['dp']
check_distribution(incoming_port, names[data_idx]+'_'+ pr +'_'+flow_dir)
else:
user_port = flows[flows['dp']>=1024]['dp']
check_distribution(user_port, names[data_idx]+'_'+ pr +'_'+flow_dir, user=True)
def attribute_autocorr(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
# files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc':
df = | pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0)) | pandas.read_csv |
import os
from os.path import join
from warnings import warn
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from collections import OrderedDict, defaultdict
import torch
from torch.optim.optimizer import Optimizer
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from torch import nn
from utils.composition import generate_features, _element_composition
from sklearn.preprocessing import StandardScaler, Normalizer
import json
plt.rcParams.update({"font.size": 16})
RNG_SEED = 42
torch.manual_seed(RNG_SEED)
np.random.seed(RNG_SEED)
# %%
fig_dir = r"figures/Classics/"
data_type_torch = torch.float32
data_type_np = np.float32
# %%
class CONSTANTS:
def __init__(self):
self.crab_red = "#f2636e"
self.dense_blue = "#2c2cd5"
self.colors = list(sns.color_palette("Set1", n_colors=7, desat=0.5))
self.markers = [
"o",
"x",
"s",
"^",
"D",
"P",
"1",
"2",
"3",
"4",
"p",
"*",
"h",
"H",
"+",
"d",
"|",
"_",
]
self.eps = ["oliynyk", "jarvis", "mat2vec", "onehot", "magpie", "random_200"]
self.benchmark_props = [
"aflow__ael_bulk_modulus_vrh",
"aflow__ael_debye_temperature",
"aflow__ael_shear_modulus_vrh",
"aflow__agl_thermal_conductivity_300K",
"aflow__agl_thermal_expansion_300K",
"aflow__Egap",
"aflow__energy_atom",
"CritExam__Ed",
"CritExam__Ef",
"mp_bulk_modulus",
"mp_elastic_anisotropy",
"mp_e_hull",
"mp_mu_b",
"mp_shear_modulus",
"OQMD_Bandgap",
"OQMD_Energy_per_atom",
"OQMD_Formation_Enthalpy",
"OQMD_Volume_per_atom",
]
self.benchmark_names = [
"AFLOW Bulk modulus",
"AFLOW Debye temperature",
"AFLOW Shear modulus",
"AFLOW Thermal conductivity",
"AFLOW Thermal expansion",
"AFLOW Band gap",
"AFLOW Energy per atom",
"Bartel Decomposition (Ed)",
"Bartel Formation (Ef)",
"MP Bulk modulus",
"MP Elastic anisotropy",
"MP Energy above convex hull",
"MP Magnetic moment",
"MP Shear modulus",
"OQMD Band gap",
"OQMD Energy per atom",
"OQMD Formation enthalpy",
"OQMD Volume per atom",
]
self.matbench_props = [
"castelli",
"dielectric",
"elasticity_log10(G_VRH)",
"elasticity_log10(K_VRH)",
"expt_gap",
"expt_is_metal",
"glass",
"jdft2d",
"mp_e_form",
"mp_gap",
"mp_is_metal",
"phonons",
"steels_yield",
]
self.matbench_names = [
"Castelli perovskites",
"Refractive index",
"Shear modulus (log10)",
"Bulk modulus (log10)",
"Experimental band gap",
"Experimental metallicity",
"Experimental glass formation",
"DFT Exfoliation energy",
"MP Formation energy",
"MP Band gap",
"MP Metallicity",
"Phonon peak",
"Steels yield",
]
self.benchmark_names_dict = dict(
zip(self.benchmark_props, self.benchmark_names)
)
self.matbench_names_dict = dict(zip(self.matbench_props, self.matbench_names))
self.mb_units_dict = {
"castelli": "eV/unit cell",
"dielectric": "unitless",
"elasticity_log10(G_VRH)": "log(GPa)",
"elasticity_log10(K_VRH)": "log(GPa)",
"expt_gap": "eV",
"expt_is_metal": "binary",
"glass": "binary",
"jdft2d": "meV/atom",
"mp_e_form": "eV/atom",
"mp_gap": "eV",
"mp_is_metal": "binary",
"phonons": "$cm^{−1}$",
"steels_yield": "MPa",
}
self.bm_units_dict = {
"aflow__ael_bulk_modulus_vrh": None,
"aflow__ael_debye_temperature": None,
"aflow__ael_shear_modulus_vrh": None,
"aflow__agl_thermal_conductivity_300K": None,
"aflow__agl_thermal_expansion_300K": None,
"aflow__Egap": None,
"aflow__energy_atom": None,
"CritExam__Ed": None,
"CritExam__Ef": None,
"mp_bulk_modulus": None,
"mp_elastic_anisotropy": None,
"mp_e_hull": None,
"mp_mu_b": None,
"mp_shear_modulus": None,
"OQMD_Bandgap": None,
"OQMD_Energy_per_atom": None,
"OQMD_Formation_Enthalpy": None,
"OQMD_Volume_per_atom": None,
}
self.mp_units_dict = {
"energy_atom": "eV/atom",
"ael_shear_modulus_vrh": "GPa",
"ael_bulk_modulus_vrh": "GPa",
"ael_debye_temperature": "K",
"Egap": "eV",
"agl_thermal_conductivity_300K": "W/m*K",
"agl_log10_thermal_expansion_300K": "1/K",
}
self.mp_sym_dict = {
"energy_atom": "$E_{atom}$",
"ael_shear_modulus_vrh": "$G$",
"ael_bulk_modulus_vrh": "$B$",
"ael_debye_temperature": "$\\theta_D$",
"Egap": "$E_g$",
"agl_thermal_conductivity_300K": "$\\kappa$",
"agl_log10_thermal_expansion_300K": "$\\alpha$",
}
self.classification_list = ["mp_is_metal", "expt_is_metal", "glass"]
self.classic_models_dict = {
"Ridge": "Ridge",
"SGDRegressor": "SGD",
"ExtraTreesRegressor": "ExtraTrees",
"RandomForestRegressor": "RF",
"AdaBoostRegressor": "AdaBoost",
"GradientBoostingRegressor": "GradBoost",
"KNeighborsRegressor": "kNN",
"SVR": "SVR",
"lSVR": "lSVR",
}
self.atomic_symbols = [
"None",
"H",
"He",
"Li",
"Be",
"B",
"C",
"N",
"O",
"F",
"Ne",
"Na",
"Mg",
"Al",
"Si",
"P",
"S",
"Cl",
"Ar",
"K",
"Ca",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ga",
"Ge",
"As",
"Se",
"Br",
"Kr",
"Rb",
"Sr",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Te",
"I",
"Xe",
"Cs",
"Ba",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"Po",
"At",
"Rn",
"Fr",
"Ra",
"Ac",
"Th",
"Pa",
"U",
"Np",
"Pu",
"Am",
"Cm",
"Bk",
"Cf",
"Es",
"Fm",
"Md",
"No",
"Lr",
"Rf",
"Db",
"Sg",
"Bh",
"Hs",
"Mt",
"Ds",
"Rg",
"Cn",
"Nh",
"Fl",
"Mc",
"Lv",
"Ts",
"Og",
]
self.idx_symbol_dict = {(i): sym for i, sym in enumerate(self.atomic_symbols)}
# %%
def get_cbfv(path, elem_prop="oliynyk", scale=False):
"""
Loads the compound csv file and featurizes it, then scales the features
using StandardScaler.
Parameters
----------
path : str
DESCRIPTION.
elem_prop : str, optional
DESCRIPTION. The default is 'oliynyk'.
Returns
-------
X_scaled : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
formula : TYPE
DESCRIPTION.
"""
df = pd.read_csv(path, keep_default_na=False, na_values=[""])
if "formula" not in df.columns.values.tolist():
df["formula"] = df["cif_id"].str.split("_ICSD").str[0]
# elem_prop = 'mat2vec'
# elem_prop = 'oliynyk'
mini = False
# mini = True
X, y, formula, skipped = generate_features(df, elem_prop, mini=mini)
if scale:
# scale each column of data to have a mean of 0 and a variance of 1
scaler = StandardScaler()
# normalize each row in the data
normalizer = Normalizer()
X_scaled = scaler.fit_transform(X)
X_scaled = pd.DataFrame(
normalizer.fit_transform(X_scaled),
columns=X.columns.values,
index=X.index.values,
)
return X_scaled, y, formula, skipped
else:
return X, y, formula, skipped
# %%
def BCEWithLogitsLoss(output, log_std, target):
loss = nn.functional.binary_cross_entropy_with_logits(output, target)
return loss
def RobustL1(output, log_std, target):
"""
Robust L1 loss using a lorentzian prior. Allows for estimation
of an aleatoric uncertainty.
"""
absolute = torch.abs(output - target)
loss = np.sqrt(2.0) * absolute * torch.exp(-log_std) + log_std
return torch.mean(loss)
def RobustL2(output, log_std, target):
"""
Robust L2 loss using a gaussian prior. Allows for estimation
of an aleatoric uncertainty.
"""
squared = torch.pow(output - target, 2.0)
loss = 0.5 * squared * torch.exp(-2.0 * log_std) + log_std
return torch.mean(loss)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# %%
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.float):
return float(obj)
return json.JSONEncoder.default(self, obj)
def count_gs_param_combinations(d):
cnt_dict = OrderedDict({})
# array = []
if isinstance(d, (list)) and not isinstance(d, (bool)):
return len(d), cnt_dict
elif isinstance(d, (int, float, complex)) and not isinstance(d, (bool)):
return 1, cnt_dict
elif isinstance(d, (bool)) or isinstance(d, (str)):
return 1, cnt_dict
elif d is None:
return 1, cnt_dict
elif isinstance(d, (dict, OrderedDict)):
keys = d.keys()
for k in keys:
array = []
subd = d[k]
array.append(count_gs_param_combinations(subd)[0])
cnt = np.prod(array)
cnt_dict[k] = cnt
return np.prod(list(cnt_dict.values())), cnt_dict
return cnt, cnt_dict
# %%
class Scaler:
def __init__(self, data):
self.data = torch.as_tensor(data)
self.mean = torch.mean(self.data)
self.std = torch.std(self.data)
def scale(self, data):
data = torch.as_tensor(data)
data_scaled = (data - self.mean) / self.std
return data_scaled
def unscale(self, data_scaled):
data_scaled = torch.as_tensor(data_scaled)
data = data_scaled * self.std + self.mean
return data
def state_dict(self):
return {"mean": self.mean, "std": self.std}
def load_state_dict(self, state_dict):
self.mean = state_dict["mean"]
self.std = state_dict["std"]
class DummyScaler:
def __init__(self, data):
self.data = torch.as_tensor(data)
self.mean = torch.mean(self.data)
self.std = torch.std(self.data)
def scale(self, data):
return torch.as_tensor(data)
def unscale(self, data_scaled):
return torch.as_tensor(data_scaled)
def state_dict(self):
return {"mean": self.mean, "std": self.std}
def load_state_dict(self, state_dict):
self.mean = state_dict["mean"]
self.std = state_dict["std"]
# %%
class EDMDataset(Dataset):
"""
Get X and y from EDM dataset.
"""
def __init__(self, dataset, n_comp):
self.data = dataset
self.n_comp = n_comp
self.X = np.array(self.data[0])
self.y = np.array(self.data[1])
self.formula = np.array(self.data[2])
self.cat_feat = np.array(self.data[3])
self.bool_src = np.array(self.data[4])
self.float_feat = np.array(self.data[5])
self.shape = [
(self.X.shape),
(self.y.shape),
(self.formula.shape),
(self.cat_feat.shape),
(self.bool_src.shape),
(self.float_feat.shape),
]
def __str__(self):
string = f"EDMDataset with X.shape {self.X.shape}"
return string
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
X = self.X[idx, :, :]
y = self.y[idx]
formula = self.formula[idx]
cat_feat = self.cat_feat[idx]
bool_src = self.bool_src[idx]
float_feat = self.float_feat[idx]
X = torch.as_tensor(X, dtype=data_type_torch)
y = torch.as_tensor(y, dtype=data_type_torch)
return (X, y, formula, cat_feat, bool_src, float_feat)
"""mini code graveyard"""
"""
if self.cat_feat.size != 0:
cat_feat = self.cat_feat[idx]
else:
cat_feat = np.empty(0,1)
if self.bool_src.size != 0:
bool_src = self.bool_src[idx]
else:
bool_src = np.empty(0,1)
if self.float_feat.size != 0:
float_feat = self.float_feat[idx]
else:
float_feat = np.empty(0,1)
"""
def get_edm(
path,
elem_prop="mat2vec",
n_elements="infer",
inference=False,
verbose=True,
load_type="EDM",
robo_dir="data/structure_properties/",
):
"""
Build an element-derived matrix (EDM), structure/element-derived matrix (SEDM), or structure-derived matrix (SDM).
Parameters
----------
path : str
DESCRIPTION.
elem_prop : str, optional
DESCRIPTION. The default is 'oliynyk'.
load_type : str, optional
DESCRIPTION. which type of matrix to load ('EDM', 'SEDM', or 'SDM')
Returns
-------
X_scaled : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
formula : TYPE
DESCRIPTION.
"""
all_symbols = [
"H",
"He",
"Li",
"Be",
"B",
"C",
"N",
"O",
"F",
"Ne",
"Na",
"Mg",
"Al",
"Si",
"P",
"S",
"Cl",
"Ar",
"K",
"Ca",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ga",
"Ge",
"As",
"Se",
"Br",
"Kr",
"Rb",
"Sr",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Te",
"I",
"Xe",
"Cs",
"Ba",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"Po",
"At",
"Rn",
"Fr",
"Ra",
"Ac",
"Th",
"Pa",
"U",
"Np",
"Pu",
"Am",
"Cm",
"Bk",
"Cf",
"Es",
"Fm",
"Md",
"No",
"Lr",
"Rf",
"Db",
"Sg",
"Bh",
"Hs",
"Mt",
"Ds",
"Rg",
"Cn",
"Nh",
"Fl",
"Mc",
"Lv",
"Ts",
"Og",
]
# mat_prop = 'phonons'
# i = 0
# path = rf'data\matbench_cv\{mat_prop}\test{i}.csv'
df = pd.read_csv(path, keep_default_na=False, na_values=[""])
if "formula" not in df.columns.values.tolist():
df["formula"] = df["cif_id"].str.split("_ICSD").str[0]
# drop pure elements
df["count"] = [len(_element_composition(form)) for form in df["formula"]]
df = df[df["count"] != 1]
# mean of duplicates
if not inference:
df = df.groupby(by=["formula", "task_id"]).mean().reset_index()
list_ohm = [OrderedDict(_element_composition(form)) for form in df["formula"]]
list_ohm = [
OrderedDict(sorted(mat.items(), key=lambda x: -x[1])) for mat in list_ohm
]
y = df["target"].values.astype(data_type_np)
formula = df["formula"].values
if n_elements == "infer":
n_elements = 16
edm_array = np.zeros(
shape=(len(list_ohm), n_elements, len(all_symbols) + 1), dtype=data_type_np
)
elem_num = np.zeros(shape=(len(list_ohm), n_elements), dtype=data_type_np)
elem_frac = np.zeros(shape=(len(list_ohm), n_elements), dtype=data_type_np)
for i, comp in enumerate(
tqdm(list_ohm, desc="Generating EDM", unit="formulae", disable=not verbose)
):
for j, (elem, count) in enumerate(list_ohm[i].items()):
if j == n_elements:
# Truncate EDM representation to n_elements
break
try:
edm_array[i, j, all_symbols.index(elem) + 1] = count
elem_num[i, j] = all_symbols.index(elem) + 1
except ValueError:
print(f"skipping composition {comp}")
# Scale features
for i in range(edm_array.shape[0]):
frac = edm_array[i, :, :].sum(axis=-1) / (edm_array[i, :, :].sum(axis=-1)).sum()
elem_frac[i, :] = frac
if n_elements == 16:
n_elements = np.max(np.sum(elem_frac > 0, axis=1, keepdims=True))
elem_num = elem_num[:, :n_elements]
elem_frac = elem_frac[:, :n_elements]
elem_num = elem_num.reshape(elem_num.shape[0], elem_num.shape[1], 1)
elem_frac = elem_frac.reshape(elem_frac.shape[0], elem_frac.shape[1], 1)
out = np.concatenate((elem_num, elem_frac), axis=1)
if load_type == ("SEDM" or "SDM"):
if "task_id" not in df.columns.values.tolist():
raise LookupError(
"task_id column should be present in CSV if load_type is SEDM or SDM"
)
task_id = list(df["task_id"])
robo_path = join(robo_dir, "robocrys_features.csv")
robo_df = pd.read_csv(robo_path, keep_default_na=False, na_values=[""])
# filter down to relevant task_ids
robo_df = robo_df[robo_df["task_id"].isin(task_id)]
replace_keys = list(robo_df["task_id"])
replace_vals = list(robo_df.drop(columns="task_id").to_dict("index").values())
# apply replacement rules (e.g. 'mp-134' -> data)
robo_sub_df = df["task_id"].replace(replace_keys, replace_vals)
# find where robo_df is lacking in replace_keys if any
types = list(map(type, robo_sub_df))
TF = [t == str for t in types]
idx = np.where(np.array(TF))[0].tolist()
if len(idx) > 0:
warn(
f"Number of replace_keys missing in robo file: {len(idx)} ... filling with zeros. Printing indices if less than 20 total."
)
if len(idx) < 20:
print(idx)
# replace with empty dict
robo_sub_df[idx] = [{}] * len(idx)
# split/expand replace_vals into multiple columns
# robo_sub_df = pd.json_normalize(robo_sub_df) # dtypes not parsed correctly.., other errors
# robo_sub_df = robo_sub_df.explode(ignore_index=True)
# robo_sub_df = pd.DataFrame(robo_sub_df)
# slow, but seems more reliable than pd.json_normalize
robo_sub_df = robo_sub_df.apply(pd.Series)
robo_sub_df.fillna(False, inplace=True)
# pd.DataFrame(robo_sub_df.values.tolist())
robocrys_feat = robo_sub_df.drop(columns="pretty_formula")
# robo_cols = robocrys_feat.columns.values.tolist()
# Initialize
cat_feat = | pd.DataFrame() | pandas.DataFrame |
'''
Created on Dec 24, 2018
@author: mkim1
'''
import numpy as np
from scipy.stats import sem
import statsmodels.formula.api as smf
import pandas as pd
from uncertainties import ufloat
from uncertainties.umath import exp as uexp
from statsmodels.regression.quantile_regression import QuantReg
import logging
logging.basicConfig(level=logging.ERROR) #For the case when verbose=False
logger = logging.getLogger(__name__) #Generate logger for this module
from scipy.special import comb
def coeffs(M):
"""
Generate the "Smooth noise-robust differentiators" as defined in Pavel
Holoborodko's formula for c_k
Parameters
----------
M : int
the order of the differentiator
c : float array of length M
coefficents for k = 1 to M
"""
m = (2*M - 2)/2
k = np.arange(1, M+1)
c = 1./2.**(2*m + 1)*(comb(2*m, m - k + 1) - comb(2*m, m - k - 1))
return c
def holo_diff(x,y,M=2):
"""
Implementation of Pavel Holoborodko's method of "Smooth noise-robust
differentiators" see
http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/
smooth-low-noise-differentiators
Creates a numerical approximation to the first derivative of a function
defined by data points. End point approximations are found from
approximations of lower order. Greater smoothing is achieved by using a
larger value for the order parameter, M.
Parameters
----------
x : float array or scalar
abscissa values of function or, if scalar, uniform step size
y : float array
ordinate values of function (same length as x if x is an array)
M : int, optional (default = 2)
order for the differentiator - will use surrounding 2*M + 1 points in
creating the approximation to the derivative
Returns
-------
dydx : float array
numerical derivative of the function of same size as y
"""
if np.isscalar(x):
x = x*np.arange(len(y))
assert len(x) == len(y), 'x and y must have the same length if x is ' + \
'an array, len(x) = {}, len(y) = {}'.format(len(x),len(y))
N = 2*M + 1
m = (N - 3)/2
c = coeffs(M)
df = np.zeros_like(y)
nf = len(y)
fk = np.zeros((M,(nf - 2*M)))
for i,cc in enumerate(c):
# k runs from 1 to M
k = i + 1
ill = M - k
ilr = M + k
iul = -M - k
# this formulation is needed for the case the k = M, where the desired
# index is the last one -- but range must be given as [-2*M:None] to
# include that last point
iur = ((-M + k) or None)
fk[i,:] = 2.*k*cc*(y[ilr:iur] - y[ill:iul])/(x[ilr:iur] -
x[ill:iul])
df[M:-M] = fk.sum(axis=0)
# may want to incorporate a variety of methods for getting edge values,
# e.g. setting them to 0 or just using closest value with M of the ends.
# For now we recursively calculate values closer to the edge with
# progressively lower order approximations -- which is in some sense
# ideal, though maybe not for all cases
if M > 1:
dflo = holo_diff(x[:2*M],y[:2*M],M=M-1)
dfhi = holo_diff(x[-2*M:],y[-2*M:],M=M-1)
df[:M] = dflo[:M]
df[-M:] = dfhi[-M:]
else:
df[0] = (y[1] - y[0])/(x[1] - x[0])
df[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])
return df
class init(object):
def __init__(self, q, t, correction_ET=None, verbose=False):
self.q = q
self.t = t
if correction_ET is None:
self.correction = np.ones_like(q)
else:
self.correction = correction_ET
if verbose:
logger.setLevel('DEBUG')
def def_period(self, exclude=None, merge_gap=0, min_length=0):
if exclude is None:
exclude = np.zeros_like(self.t, dtype = bool)
dec = ~exclude
# find the start and end of the recession events
idx_st = np.where(np.logical_and(dec[1:], ~dec[:-1]))[0]+1
if dec[0]:
idx_st = np.append(0, idx_st)
idx_end = np.where(np.logical_and(dec[:-1], ~dec[1:]))[0]+1
if dec[-1]:
idx_end = np.append(idx_end,len(dec))
# merge events if they are mergable
self.merge_gap = merge_gap
if merge_gap>0:
gaps = (idx_st[1:] - idx_end[:-1]) - 1
mergable = np.where(gaps<=merge_gap,True,False) #True when mergable
#need to check if [y:x+1] is okay
mergable_exclude = ~np.array([True in exclude[y:x+1] for x, y in zip(idx_st[1:], idx_end[:-1])] ) #True when mergable
idx_st = idx_st[np.append(True,~np.logical_and(mergable, mergable_exclude))]
idx_end = idx_end[np.append(~np.logical_and(mergable, mergable_exclude),True)]
# remove if they are too short
if min_length>1:
too_short = (idx_end - idx_st)<min_length
idx_end = idx_end[~too_short]
idx_st = idx_st[~too_short]
self.events = [event_obj(idx_st[i] , idx_end[i] , self.q[idx_st[i]:idx_end[i]+1] , self.t[idx_st[i]:idx_end[i]+1], self.correction[idx_st[i]:idx_end[i]+1]) for i in range(len(idx_st))]
def est_dqdt(self, grad_mult = 1.0, method = None):
self.corh = {}
self.th = {}
self.qh = {}
self.dq_dt = {}
for idx,item in enumerate(method):
self.corh[item] = np.array([])
self.th[item] = np.array([])
self.qh[item] = np.array([])
self.dq_dt[item] = np.array([])
for R in self.events:
R.getdqdt_method(method = method, grad_mult = grad_mult)
for _,item in enumerate(method):
self.corh[item] = np.append(self.corh[item], R.corh[item])
self.th[item] = np.append(self.th[item], R.th[item])
self.qh[item] = np.append(self.qh[item], R.qh[item])
self.dq_dt[item] = np.append(self.dq_dt[item], R.dq_dt[item])
def _fitgfunc_set(self, obj, g_func, p0=None,method_dqdt = None, method_fit = None, regression_function = None, uncertainty = True):
if (method_fit == 'ols') or (method_fit == 'quantile'):
if len(obj.dq_dt[method_dqdt])>0:
x = np.log(np.array(obj.qh[method_dqdt]))
y = np.log(obj.dq_dt[method_dqdt] * obj.corh[method_dqdt])
#Remove data when discharge <= 0
y = y[~np.isinf(x)]
x = x[~np.isinf(x)]
x = x[~np.isinf(y)]
y = y[~np.isinf(y)]
if len(x)>0:
d = {'x': x, 'y': y}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
#!/usr/bin/env python
"""
Convert PCAP output to undirected graph and save in Parquet format.
"""
from __future__ import print_function
import re
import socket
import struct
import sys
import fastparquet as fp
import numpy as np
import pandas as pd
def ip_to_integer(s):
return struct.unpack("!I", socket.inet_aton(s))[0]
def get_ip_protocol(s):
if "tcp" in s:
return "tcp"
if "UDP" in s:
return "udp"
if "EIGRP" in s:
return "eigrp"
if "ICMP" in s:
return "icmp"
return None
def to_parquet(filename, prefix="maccdc2012"):
with open(filename) as f:
traffic = {}
nodes = set()
for line in f.readlines():
if "unreachable" in line:
continue
fields = line.split()
if not fields:
continue
if fields[1] != "IP":
continue
protocol = get_ip_protocol(line)
if protocol not in ("tcp", "udp", "eigrp", "icmp"):
continue
try:
addresses = []
# Extract source IP address and convert to integer
m = re.match(r'(?P<address>\d+\.\d+\.\d+\.\d+)', fields[2])
if not m:
continue
addresses.append(ip_to_integer(m.group('address')))
# Extract target IP address and convert to integer
m = re.match(r'(?P<address>\d+\.\d+\.\d+\.\d+)', fields[4])
if not m:
continue
addresses.append(ip_to_integer(m.group('address')))
nodes = nodes.union(addresses)
src, dst = sorted(addresses)
key = (protocol, src, dst)
# Extract packet size
nbytes = int(fields[-1])
if key in traffic:
traffic[key] += nbytes
else:
traffic[key] = nbytes
except:
pass
nodes = dict([(node, i) for i, node in enumerate(sorted(nodes))])
edges = []
for key in traffic:
edge = [nodes[key[1]], nodes[key[2]], key[0], traffic[key]]
edges.append(edge)
nodes_df = pd.DataFrame(np.arange(len(nodes)), columns=['id'])
nodes_df = nodes_df.set_index('id')
edges_df = pd.DataFrame(np.array(edges), columns=['source', 'target', 'protocol', 'weight'])
edges_df['source'] = pd.to_numeric(edges_df['source'])
edges_df['target'] = pd.to_numeric(edges_df['target'])
edges_df['weight'] = | pd.to_numeric(edges_df['weight']) | pandas.to_numeric |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
store = pd.read_csv('store.csv')
#1、去除销量中的极值,由于无法估计极值对结果的影响,所以拟合模型的时候可以进行两次,去除极值和未去除极值
#再试一下标准差标准误差
def rm_outliers(df):
q1 = np.percentile(df['Sales'], 25, axis=0)
q3 = np.percentile(df['Sales'], 75, axis=0)
k = 2.5
iqr = q3 - q1
df = df[df['Sales'] > q1 - k*iqr]
df = df[df['Sales'] < q3 + k*iqr]
return df
def rm_outliers_std(df):
std = df['Sales'].std()
mean = df['Sales'].mean()
k = 3
df = df[df['Sales'] > mean - k*std]
df = df[df['Sales'] < mean + k*std]
return df
#2、对时间的拆分
def data_processing(df):
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import datetime
import os
import shutil
import unittest
from copy import deepcopy
from typing import Optional, Tuple, Any, Callable, Dict, Sequence, List
from unittest.mock import patch
import pandas as pd
from pandas.testing import assert_frame_equal
from datacode.models.column.column import Column
from datacode.models.dtypes.str_type import StringType
from datacode.models.source import DataSource
from datacode.models.variables import Variable
from datacode.models.variables.expression import Expression
from datacode import Transform, DataOutputNotSafeException, Index, ColumnIndex
from tests.utils import GENERATED_PATH, assert_frame_not_equal
def transform_cell_data_func(col: Column, variable: Variable, cell: Any) -> Any:
if isinstance(cell, str):
return cell
return cell + 1
def transform_series_data_func(col: Column, variable: Variable, series: pd.Series) -> pd.Series:
return series + 1
def transform_dataframe_data_func(col: Column, variable: Variable, df: pd.DataFrame) -> pd.DataFrame:
df[variable.name] = df[variable.name] + 1
return df
def transform_source_data_func(col: Column, variable: Variable, source: DataSource) -> DataSource:
# Extra unnecessary logic to access source.columns to test looking up columns
cols = source.columns
for this_col in cols:
if not this_col.variable.key == col.variable.key:
continue
if not variable.dtype.is_numeric:
continue
source.df[variable.name] = source.df[variable.name] + 1
return source
def expression_series_func(cols: Sequence[Column]) -> pd.Series:
return cols[0].series + cols[1].series
class SourceTest(unittest.TestCase):
test_df = pd.DataFrame(
[
(1, 2, 'd'),
(3, 4, 'd'),
(5, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_df_no_rename_a_plus_1 = pd.DataFrame(
[
(2, 2, 'd'),
(4, 4, 'd'),
(6, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_df_no_rename_a_plus_2 = pd.DataFrame(
[
(3, 2, 'd'),
(5, 4, 'd'),
(7, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_loaded_df_rename_only = pd.DataFrame(
[
(1, 2, 'd'),
(3, 4, 'd'),
(5, 6, 'e')
],
columns=['A', 'B', 'C'],
).convert_dtypes()
expect_loaded_df_rename_only_indexed_c = expect_loaded_df_rename_only.set_index('C')
expect_loaded_df_rename_only_a_indexed_c = expect_loaded_df_rename_only_indexed_c.drop('B', axis=1)
expect_loaded_df_rename_only_a_b = pd.DataFrame(
[
(1, 2,),
(3, 4,),
(5, 6,)
],
columns=['A', 'B']
).convert_dtypes()
expect_loaded_df_with_transform = pd.DataFrame(
[
(2, 3, 'd'),
(4, 5, 'd'),
(6, 7, 'e')
],
columns=['A_1', 'B_1', 'C']
).convert_dtypes()
expect_loaded_df_with_a_and_a_transform = pd.DataFrame(
[
(1, 2, 2, 'd'),
(3, 4, 4, 'd'),
(5, 6, 6, 'e')
],
columns=['A', 'A_1', 'B', 'C']
).convert_dtypes()
expect_loaded_df_with_a_transform_and_a = pd.DataFrame(
[
(2, 1, 2, 'd'),
(4, 3, 4, 'd'),
(6, 5, 6, 'e')
],
columns=['A_1', 'A', 'B', 'C']
).convert_dtypes()
expect_loaded_df_with_transform_only_a_b = pd.DataFrame(
[
(2, 3,),
(4, 5,),
(6, 7,)
],
columns=['A_1', 'B_1']
).convert_dtypes()
expect_loaded_df_with_transform_and_a_pre_transformed = pd.DataFrame(
[
(1, 3, 'd'),
(3, 5, 'd'),
(5, 7, 'e')
],
columns=['A_1', 'B_1', 'C']
).convert_dtypes()
expect_loaded_df_with_calculated = pd.DataFrame(
[
(1, 2, 'd', 3),
(3, 4, 'd', 7),
(5, 6, 'e', 11)
],
columns=['A', 'B', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculated_c_d_only = pd.DataFrame(
[
('d', 3),
('d', 7),
('e', 11)
],
columns=['C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculated_transformed = pd.DataFrame(
[
(1, 2, 'd', 4),
(3, 4, 'd', 8),
(5, 6, 'e', 12)
],
columns=['A', 'B', 'C', 'D_1'],
).convert_dtypes()
expect_loaded_df_with_calculated_and_calculated_transformed = pd.DataFrame(
[
(1, 2, 'd', 3, 4),
(3, 4, 'd', 7, 8),
(5, 6, 'e', 11, 12)
],
columns=['A', 'B', 'C', 'D', 'D_1'],
).convert_dtypes()
expect_loaded_df_with_calculated_transformed_and_calculated = pd.DataFrame(
[
(1, 2, 'd', 4, 3),
(3, 4, 'd', 8, 7),
(5, 6, 'e', 12, 11)
],
columns=['A', 'B', 'C', 'D_1', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_before_transform = pd.DataFrame(
[
(2, 3, 'd', 3),
(4, 5, 'd', 7),
(6, 7, 'e', 11)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_after_transform = pd.DataFrame(
[
(2, 3, 'd', 5),
(4, 5, 'd', 9),
(6, 7, 'e', 13)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_before_and_after_transform = pd.DataFrame(
[
(2, 3, 'd', 4),
(4, 5, 'd', 8),
(6, 7, 'e', 12)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_categorical = expect_loaded_df_rename_only.copy()
expect_loaded_df_categorical['C'] = expect_loaded_df_categorical['C'].astype('category')
transform_name_func = lambda x: f'{x}_1'
transform_cell = Transform('add_one_cell', transform_name_func, transform_cell_data_func, data_func_target='cell')
transform_series = Transform('add_one_series', transform_name_func, transform_series_data_func, data_func_target='series')
transform_dataframe = Transform('add_one_df', transform_name_func, transform_dataframe_data_func, data_func_target='dataframe')
transform_source = Transform('add_one_source', transform_name_func, transform_source_data_func, data_func_target='source')
csv_path = os.path.join(GENERATED_PATH, 'data.csv')
def setup_method(self, *args, **kwargs):
if os.path.exists(GENERATED_PATH):
shutil.rmtree(GENERATED_PATH)
os.makedirs(GENERATED_PATH)
def teardown_method(self, *args, **kwargs):
shutil.rmtree(GENERATED_PATH)
def create_source(self, **kwargs) -> DataSource:
config_dict = dict(
df=self.test_df,
location=self.csv_path,
)
config_dict.update(kwargs)
return DataSource(**config_dict)
def get_transform(self, func_type: str) -> Transform:
if func_type == 'cell':
return self.transform_cell
elif func_type == 'series':
return self.transform_series
elif func_type == 'dataframe':
return self.transform_dataframe
elif func_type == 'source':
return self.transform_source
else:
raise ValueError(
f'could not look up func_type {func_type}, should be one of cell, series, dataframe, source')
def create_csv(self, df: Optional[pd.DataFrame] = None, **to_csv_kwargs):
if df is None:
df = self.test_df
df.to_csv(self.csv_path, index=False, **to_csv_kwargs)
def get_transform_dict(self, transform_data: str = '', apply_transforms: bool = True):
if transform_data:
transform = self.get_transform(transform_data)
transform_dict = dict(
available_transforms=[transform],
)
if apply_transforms:
transform_dict['applied_transforms'] = [transform]
else:
transform_dict = {}
return transform_dict
def create_variables(self, transform_data: str = '', apply_transforms: bool = True) -> Tuple[Variable, Variable, Variable]:
transform_dict = self.get_transform_dict(transform_data=transform_data, apply_transforms=apply_transforms)
a = Variable('a', 'A', dtype='int', **transform_dict)
b = Variable('b', 'B', dtype='int', **transform_dict)
c = Variable('c', 'C', dtype='str')
return a, b, c
def create_columns(self, transform_data: str = '', apply_transforms: bool = True) -> List[Column]:
a, b, c = self.create_variables(transform_data=transform_data, apply_transforms=apply_transforms)
ac = Column(a, 'a')
bc = Column(b, 'b')
cc = Column(c, 'c')
return [
ac,
bc,
cc
]
def create_c_index(self) -> Index:
c_index = Index('c', dtype=StringType(categorical=True))
return c_index
def create_variables_and_c_colindex(self, transform_data: str = '', apply_transforms: bool = True
) -> Tuple[List[Variable], ColumnIndex]:
a, b, c = self.create_variables(transform_data=transform_data, apply_transforms=apply_transforms)
c_index = self.create_c_index()
c_col_index = ColumnIndex(c_index, [c])
return [a, b, c], c_col_index
def create_indexed_columns(self, transform_data: str = '', apply_transforms: bool = True) -> List[Column]:
(a, b, c), c_col_index = self.create_variables_and_c_colindex(
transform_data=transform_data, apply_transforms=apply_transforms
)
ac = Column(a, 'a', indices=[c_col_index])
bc = Column(b, 'b', indices=[c_col_index])
cc = Column(c, 'c')
return [
ac,
bc,
cc
]
class TestCreateSource(SourceTest):
def test_create_source_from_df(self):
ds = self.create_source(location=None)
assert_frame_equal(ds.df, self.test_df)
def test_create_source_from_file_path(self):
self.create_csv()
ds = self.create_source(df=None)
assert_frame_equal(ds.df, self.test_df)
def test_create_source_with_columns(self):
all_cols = self.create_columns()
ds = self.create_source(location=None, columns=all_cols)
assert ds.columns == all_cols
def test_graph(self):
ds = self.create_source(location=None)
# Need to do a better job with this test, see TestDataMergePipeline.test_graph
ds.graph
class TestLoadSource(SourceTest):
def test_load_with_columns(self):
self.create_csv()
all_cols = self.create_columns()
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only)
def test_load_with_columns_subset(self):
self.create_csv()
all_cols = self.create_columns()
all_vars = self.create_variables()
var_subset = [var for var in all_vars if var.key != 'c']
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only_a_b)
def test_with_with_repeated_variables_different_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
# First with original variable first, then transformation
load_variables = [
a,
a.add_one_cell(),
b,
c,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_a_and_a_transform)
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
# Now with transformation first, then original variable
load_variables = [
a.add_one_cell(),
a,
b,
c,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_a_transform_and_a)
def test_load_with_repeated_variable_names_raises_error(self):
self.create_csv()
all_cols = self.create_columns()
all_cols.append(deepcopy(all_cols[2]))
with self.assertRaises(ValueError) as cm:
ds = self.create_source(df=None, columns=all_cols)
exc = cm.exception
assert 'variable name C repeated in load variables' in str(exc)
def test_with_columns_and_load_variables_with_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
load_variables = [
a.add_one_cell(),
b.add_one_cell(),
c
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_cell(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_series(self):
self.create_csv()
all_cols = self.create_columns(transform_data='series')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_dataframe(self):
self.create_csv()
all_cols = self.create_columns(transform_data='dataframe')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_source(self):
self.create_csv()
all_cols = self.create_columns(transform_data='source')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_transforms_and_pre_applied_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell')
a, b, c = self.create_variables(transform_data='cell')
all_cols[0] = Column(a, 'a', applied_transform_keys=['add_one_cell'])
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform_and_a_pre_transformed)
def test_load_with_categorical(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
all_cols[2] = Column(c, 'c', dtype=StringType(categorical=True))
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_categorical)
def test_load_with_datetime(self):
test_df = self.test_df.copy()
test_df['d'] = pd.to_datetime('1/1/2000')
self.create_csv(df=test_df)
expect_df = self.expect_loaded_df_rename_only.copy()
expect_df['Date'] = pd.to_datetime('1/1/2000')
date_var = Variable('Date', dtype='datetime')
date_col = Column(date_var, 'd')
all_cols = self.create_columns()
all_cols.append(date_col)
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, expect_df)
def test_load_with_calculated_variable(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
d = Variable('d', 'D', calculation=a + b)
ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b, c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated)
def test_load_with_calculated_variable_from_func(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
expr = Expression([a, b], func=expression_series_func, summary='Add em up')
d = Variable('d', 'D', calculation=expr)
ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b, c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated)
def test_load_with_calculated_transformed_variable(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
tran = self.get_transform('cell')
d = Variable('d', 'D', calculation=a + b, available_transforms=[tran])
ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b, c, d.add_one_cell()])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated_transformed)
def test_load_with_calculated_and_same_calculated_variable_transformed(self):
self.create_csv()
# Try with plain calculated variable first
all_cols = self.create_columns()
a, b, c = self.create_variables()
tran = self.get_transform('cell')
d = Variable('d', 'D', calculation=a + b, available_transforms=[tran])
load_vars = [
a,
b,
c,
d,
d.add_one_cell()
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_vars)
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated_and_calculated_transformed)
# Now try with plain calculated variable second
all_cols = self.create_columns()
a, b, c = self.create_variables()
tran = self.get_transform('cell')
d = Variable('d', 'D', calculation=a + b, available_transforms=[tran])
load_vars = [
a,
b,
c,
d.add_one_cell(),
d,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_vars)
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated_transformed_and_calculated)
def test_load_with_calculate_on_transformed_after_transform(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables(transform_data='cell')
d = Variable('d', 'D', calculation=a + b)
ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b, c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculate_on_transformed_after_transform)
def test_load_with_calculate_on_transformed_before_transform(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
d = Variable('d', 'D', calculation=a + b)
ds = self.create_source(df=None, columns=all_cols, load_variables=[a.add_one_cell(), b.add_one_cell(), c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculate_on_transformed_before_transform)
def test_load_with_calculate_on_transformed_before_and_after_transform(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
d = Variable('d', 'D', calculation=a + b.add_one_cell())
ds = self.create_source(df=None, columns=all_cols, load_variables=[a.add_one_cell(), b.add_one_cell(), c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculate_on_transformed_before_and_after_transform)
def test_load_with_calculated_variable_using_non_passed_load_variables(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
d = Variable('d', 'D', calculation=a + b)
ds = self.create_source(df=None, columns=all_cols, load_variables=[c, d])
assert_frame_equal(ds.df, self.expect_loaded_df_with_calculated_c_d_only)
class TestDunders(SourceTest):
def test_str(self):
source = self.create_source(location=None)
print(source)
class TestTransform(SourceTest):
def test_transform_existing_source(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
orig_ds = self.create_source(df=None, columns=all_cols, load_variables=[a, b])
all_ds = []
all_ds.append(self.transform_cell.apply_to_source(orig_ds))
all_ds.append(self.transform_series.apply_to_source(orig_ds))
all_ds.append(self.transform_dataframe.apply_to_source(orig_ds))
all_ds.append(self.transform_source.apply_to_source(orig_ds))
for ds in all_ds:
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform_only_a_b)
def test_transform_subset_existing_source(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
orig_ds = self.create_source(df=None, columns=all_cols)
all_ds = []
all_ds.append(self.transform_cell.apply_to_source(orig_ds, subset=[a, b]))
all_ds.append(self.transform_series.apply_to_source(orig_ds, subset=[a, b]))
all_ds.append(self.transform_dataframe.apply_to_source(orig_ds, subset=[a, b]))
all_ds.append(self.transform_source.apply_to_source(orig_ds, subset=[a, b]))
for ds in all_ds:
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
class TestOutput(SourceTest):
def test_save_then_load(self):
self.create_csv()
all_cols = self.create_columns()
ds = self.create_source(df=None, columns=all_cols)
ds.output()
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only)
def test_save_then_load_with_indices(self):
self.create_csv()
all_cols = self.create_indexed_columns()
ds = self.create_source(df=None, columns=all_cols)
ds.output()
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only_indexed_c)
def test_save_then_load_variable_subset(self):
self.create_csv()
all_cols = self.create_columns()
all_vars = self.create_variables()
var_subset = [var for var in all_vars if var.key != 'c']
# Test when in safe mode, raises error because will delete data
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
with self.assertRaises(DataOutputNotSafeException) as cm:
ds.output()
# Test when bypassing safe mode, can save and load properly
ds = self.create_source(
df=None, columns=all_cols, load_variables=var_subset, data_outputter_kwargs=dict(safe=False)
)
ds.output()
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only_a_b)
def test_save_then_load_variable_subset_and_indices(self):
self.create_csv()
all_cols = self.create_indexed_columns()
(a, b, c), c_col_index = self.create_variables_and_c_colindex()
var_subset = [a, c]
# Test when in safe mode, raises error because will delete data
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
with self.assertRaises(DataOutputNotSafeException) as cm:
ds.output()
# Test when bypassing safe mode, can save and load properly
ds = self.create_source(
df=None, columns=all_cols, load_variables=var_subset, data_outputter_kwargs=dict(safe=False)
)
ds.output()
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only_a_indexed_c)
def test_save_then_load_with_transformations(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
load_variables = [
a.add_one_cell(),
b.add_one_cell(),
c
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
ds.output()
# This works because the same variables and columns are used to load again
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
| assert_frame_equal(ds.df, self.expect_loaded_df_with_transform) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import re
import sklearn.metrics.pairwise as pw
from scipy import sparse
from sklearn.metrics.pairwise import pairwise_distances
from scipy.sparse.linalg import svds
def recommend_books_userbased(userID):
books_details_df = | pd.read_csv('C:/Users/Nikhita/Desktop/Dataset/Final/final_book_details.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import utils
class Indicators:
def __init__(self, stock, start_date, end_date):
self.stock = stock
self.start_date = start_date
self.end_date = end_date
self.data = utils.read_stock_data(stock)
def calculate_all_indicators(self):
indicators = [
self.adj_close_price(),
self.bollinger_bands(),
self.cci(4),
self.cci(12),
self.cci(20),
self.ema(2),
self.ema(6),
self.ema(10),
self.ema(12),
self.macd(),
self.mfi(14),
self.mfi(16),
self.mfi(18),
self.obv(),
self.px_volume(),
self.rsi(6),
self.rsi(12),
self.sma(3),
self.sma(10),
self.trix(),
self.volatility(2),
self.volatility(4),
self.volatility(6),
self.volatility(8),
self.volatility(10),
self.volatility(12),
self.volatility(14),
self.volatility(16),
self.volatility(18),
self.volatility(20),
self.willr()
]
dates = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)['Date']
df = pd.concat(indicators, axis=1)
return df.set_index(dates)
def adj_close_price(self):
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
return pd.DataFrame(df['Close'].values, columns=['Adj Close Price'])
def bollinger_bands(self):
window_len = 20
def Bollinger_Bands(stock_price, window_size, num_of_std):
rolling_mean = stock_price['Close'].rolling(window=window_size).mean()[window_size-1:]
rolling_std = stock_price['Close'].rolling(window=window_size).std()[window_size-1:]
upper_band = np.add(rolling_mean, rolling_std * num_of_std)
lower_band = np.subtract(rolling_mean, rolling_std * num_of_std)
return rolling_mean, upper_band, lower_band
prices = utils.include_n_days_before(self.data, window_len, self.start_date, self.end_date)
middle, upper, lower = Bollinger_Bands(prices, window_len, 2)
result_df = pd.DataFrame({'BB_Middle': middle.values, \
'BB_Upper': upper.values, 'BB_Lower': lower.values})
return result_df
def cci(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
df_after_start_date = df[num_days-1:]
def calculate_tp(t):
return(t['High']+t['Low']+t['Close'])/3
tp_df = df_after_start_date.apply(calculate_tp, 1)
# calculate TpAvg(t) where TpAvg(t,n)=Avg(Tp(t)) over [t, t-1, . . . , t-n+1];
tp_avg_df = df.apply(calculate_tp, 1)
tp_avg_df = tp_avg_df.rolling(window=num_days).mean()
tp_avg_df = tp_avg_df[(num_days-1):]
# calculate MD(t) where MD(t)=Avg(Abs(Tp(t)-TpAvg(t,n)));
md = np.mean(np.absolute(np.subtract(tp_df, tp_avg_df)))
# calculate CCI(t) where CCI(t) = Tp(t)-TpAvg(t,n)/(0.15*MD(t));
cci = np.subtract(tp_df, tp_avg_df)/(0.15*md)
return pd.DataFrame(cci.values, columns=['CCI_{0}'.format(num_days)])
def ema(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
ema = df['Close'].ewm(span=num_days).mean()
ema = ema[num_days-1:]
return pd.DataFrame(ema.values, columns=['EMA_{0}'.format(num_days)])
def macd(self):
n_slow = 26
n_fast = 12
n_signal = 9
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
# Calculate MACD
ema_slow = df['Close'].ewm(span=n_slow, min_periods=1).mean()
ema_fast = df['Close'].ewm(span=n_fast, min_periods=1).mean()
macd = np.subtract(ema_fast, ema_slow)
# Calculate MACD signal
macd_signal = macd.ewm(span=n_signal, min_periods=1).mean()
# Calculate MACD histogram
macd_hist = np.subtract(macd, macd_signal)
result_df = pd.DataFrame({'MACD': macd.values, \
'MACD_Sig': macd_signal.values, \
'MACD_Hist': macd_hist.values})
return result_df
def mfi(self, num_days):
df = utils.include_n_days_before(self.data, num_days, self.start_date, self.end_date)
def Money_Flow_Index(window_df, tp_df, mf_df):
pos_mf = 0.0
neg_mf = 0.0
for i in range(len(window_df)):
tp = tp_df.iloc[i].item()
mf = mf_df.iloc[i].item()
if i == 0:
pos_mf += mf
else:
tp_before = tp_df.iloc[i-1].item()
if tp > tp_before:
pos_mf += mf
elif tp < tp_before:
neg_mf += mf
mfi = (pos_mf / (pos_mf + neg_mf)) * 100
return mfi
tp_df = (df['High']+df['Low']+df['Close'])/3
mf_df = tp_df * df['Volume']
col_name = 'MFI_{0}'.format(num_days)
mfi_df = pd.DataFrame(columns=[col_name])
for i in range(len(df)-num_days+1):
temp_df = df.iloc[i:i+num_days, :]
temp_tp_df = tp_df.iloc[i:i+num_days]
temp_mf_df = mf_df.iloc[i:i+num_days]
mfi = Money_Flow_Index(temp_df, temp_tp_df, temp_mf_df)
mfi_df = mfi_df.append(pd.DataFrame([mfi], columns=[col_name]), ignore_index=True)
return mfi_df
def momentum(self, num_days):
df = utils.include_n_days_before(self.data, num_days+1, self.start_date, self.end_date)
momentum = df['Close'].rolling(window=num_days+1)\
.apply(lambda t: t[num_days]-t[0])
momentum = momentum[num_days:]
return pd.DataFrame(momentum.values, columns=['MOM_{0}'.format(num_days)])
def obv(self):
df = utils.include_n_days_before(self.data, 1, self.start_date, self.end_date)
obv_df = pd.DataFrame([0.0], columns=['OBV'])
obv = 0.0
for i in range(len(df)-1):
row_i = df.iloc[i]
row_i_1 = df.iloc[i+1]
volume = 0.0
if row_i_1['Close'] > row_i['Close']:
volume = row_i_1['Volume']
elif row_i_1['Close'] < row_i['Close']:
volume = row_i_1['Volume'] * -1
obv += volume
obv_df = obv_df.append( | pd.DataFrame([obv], columns=['OBV']) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype,
)
from pandas.core.index import _ensure_index
from pandas.core.base import DataError
from modin.error_message import ErrorMessage
from modin.engines.base.block_partitions import BaseBlockPartitions
class PandasQueryCompiler(object):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self,
block_partitions_object: BaseBlockPartitions,
index: pandas.Index,
columns: pandas.Index,
dtypes=None,
):
assert isinstance(block_partitions_object, BaseBlockPartitions)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
"""By default, constructor method will invoke an init"""
return type(self)(block_paritions_object, index, columns, dtypes)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self.data.full_reduce(map_func, dtype_builder, 0)
self._dtype_cache.index = self.columns
elif not self._dtype_cache.index.equals(self.columns):
self._dtype_cache.index = self.columns
return self._dtype_cache
def _set_dtype(self, dtypes):
self._dtype_cache = dtypes
dtypes = property(_get_dtype, _set_dtype)
# These objects are currently not distributed.
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _validate_set_axis(self, new_labels, old_labels):
new_labels = _ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = _ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = _ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
# END Index and columns objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix):
new_column_names = self.columns.map(lambda x: str(prefix) + str(x))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
def add_suffix(self, suffix):
new_column_names = self.columns.map(lambda x: str(x) + str(suffix))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(), self.index.copy(), self.columns.copy(), self._dtype_cache
)
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if isinstance(other, list):
return self._join_list_of_managers(other, **kwargs)
else:
return self._join_query_compiler(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
# Concatenating two managers requires aligning their indices. After the
# indices are aligned, it should just be a simple concatenation of the
# `BaseBlockPartitions` objects. This should not require remote compute.
joined_axis = self._join_index_objects(
axis,
[other.columns if axis == 0 else other.index for other in others],
join,
sort=sort,
)
# Since we are concatenating a list of managers, we will align all of
# the indices based on the `joined_axis` computed above.
to_append = [other.reindex(axis ^ 1, joined_axis).data for other in others]
new_self = self.reindex(axis ^ 1, joined_axis).data
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_query_compiler(self, other, **kwargs):
assert isinstance(
other, type(self)
), "This method is for data manager objects only"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(1, other.index, how, sort=sort)
to_join = other.reindex(0, joined_index).data
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# We are using proxy DataFrame objects to build the columns based on
# the `lsuffix` and `rsuffix`.
self_proxy = pandas.DataFrame(columns=self.columns)
other_proxy = pandas.DataFrame(columns=other.columns)
new_columns = self_proxy.join(
other_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of DataManager objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(
1, [other.index for other in others], how, sort=sort
)
to_join = [other.reindex(0, joined_index).data for other in others]
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
self_proxy = pandas.DataFrame(columns=self.columns)
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
joined_index = self._join_index_objects(1, other.index, how_to_join, sort=False)
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
reindexed_other = other.reindex(0, joined_index).data
reindexed_self = self.reindex(0, joined_index).data
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, self_cols, other_cols, func):
left.columns = self_cols
right.columns = other_cols
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1,
lambda l, r: inter_data_op_builder(l, r, self_cols, other_cols, func),
reindexed_other,
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = pandas.DataFrame()._get_axis_number(kwargs.get("axis", 0))
if isinstance(other, type(self)):
return self.inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self.scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def add(self, other, **kwargs):
"""Adds this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with added data and new index.
"""
func = pandas.DataFrame.add
return self._inter_df_op_handler(func, other, **kwargs)
def div(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.div
return self._inter_df_op_handler(func, other, **kwargs)
def eq(self, other, **kwargs):
"""Compares equality (==) with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.eq
return self._inter_df_op_handler(func, other, **kwargs)
def floordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.floordiv
return self._inter_df_op_handler(func, other, **kwargs)
def ge(self, other, **kwargs):
"""Compares this manager >= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ge
return self._inter_df_op_handler(func, other, **kwargs)
def gt(self, other, **kwargs):
"""Compares this manager > than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.gt
return self._inter_df_op_handler(func, other, **kwargs)
def le(self, other, **kwargs):
"""Compares this manager < than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.le
return self._inter_df_op_handler(func, other, **kwargs)
def lt(self, other, **kwargs):
"""Compares this manager <= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.lt
return self._inter_df_op_handler(func, other, **kwargs)
def mod(self, other, **kwargs):
"""Mods this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod-ed data and index.
"""
func = pandas.DataFrame.mod
return self._inter_df_op_handler(func, other, **kwargs)
def mul(self, other, **kwargs):
"""Multiplies this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with multiplied data and index.
"""
func = pandas.DataFrame.mul
return self._inter_df_op_handler(func, other, **kwargs)
def ne(self, other, **kwargs):
"""Compares this manager != to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ne
return self._inter_df_op_handler(func, other, **kwargs)
def pow(self, other, **kwargs):
"""Exponential power of this manager to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and index.
"""
func = pandas.DataFrame.pow
return self._inter_df_op_handler(func, other, **kwargs)
def rdiv(self, other, **kwargs):
"""Divides other object (manager or scalar) with this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.rdiv
return self._inter_df_op_handler(func, other, **kwargs)
def rfloordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.rfloordiv
return self._inter_df_op_handler(func, other, **kwargs)
def rmod(self, other, **kwargs):
"""Mods this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod data and index.
"""
func = pandas.DataFrame.rmod
return self._inter_df_op_handler(func, other, **kwargs)
def rpow(self, other, **kwargs):
"""Exponential power of other object (manager or scalar) to this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and new index.
"""
func = pandas.DataFrame.rpow
return self._inter_df_op_handler(func, other, **kwargs)
def rsub(self, other, **kwargs):
"""Subtracts other object (manager or scalar) from this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.rsub
return self._inter_df_op_handler(func, other, **kwargs)
def sub(self, other, **kwargs):
"""Subtracts this manager from other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.sub
return self._inter_df_op_handler(func, other, **kwargs)
def truediv(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Functionally same as div
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.truediv
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self.map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self.scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
# We are required to perform this reindexing on everything to
# shuffle the data together
reindexed_cond = cond.reindex(0, self.index).data
reindexed_other = other.reindex(0, self.index).data
reindexed_self = self.reindex(0, self.index).data
first_pass = reindexed_cond.inter_data_operation(
1,
lambda l, r: where_builder_first_pass(l, r, **kwargs),
reindexed_other,
)
final_pass = reindexed_self.inter_data_operation(
1, lambda l, r: where_builder_second_pass(l, r, **kwargs), first_pass
)
return self.__constructor__(final_pass, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = [i for i in range(len(other))]
def where_builder_series(df, cond, other, **kwargs):
return df.where(cond, other, **kwargs)
reindexed_self = self.reindex(
axis, self.index if not axis else self.columns
).data
reindexed_cond = cond.reindex(
axis, self.index if not axis else self.columns
).data
new_data = reindexed_self.inter_data_operation(
axis,
lambda l, r: where_builder_series(l, r, other, **kwargs),
reindexed_cond,
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
New DataManager with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self.map_partitions(func)
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
New DataManager with updated data and new index.
"""
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
New DataManager with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = "index" if "index" not in self.columns else "level_0"
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
#
# _is_transposed, 0 for False or non-transposed, 1 for True or transposed.
_is_transposed = 0
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(new_data, self.columns, self.index)
# It is possible that this is already transposed
new_manager._is_transposed = self._is_transposed ^ 1
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_reduce(self, axis, map_func, reduce_func=None, numeric_only=False):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
numeric_only: Apply only over the numeric rows.
Return:
Returns Pandas Series containing the results from map_func and reduce_func.
"""
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
if reduce_func is None:
reduce_func = map_func
# The XOR here will ensure that we reduce over the correct axis that
# exists on the internal partitions. We flip the axis
result = query_compiler.data.full_reduce(
map_func, reduce_func, axis ^ self._is_transposed
)
if result.shape == (0,):
return result
elif not axis:
result.index = query_compiler.columns
else:
result.index = query_compiler.index
return result
def _process_min_max(self, func, **kwargs):
"""Calculates the min or max of the DataFrame.
Return:
Pandas series containing the min or max values from each column or
row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
def min_max_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
map_func = self._prepare_method(min_max_builder, **kwargs)
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
Pandas series containing counts of non-NaN objects from each column or row.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", False)
map_func = self._prepare_method(pandas.DataFrame.count, **kwargs)
reduce_func = self._prepare_method(pandas.DataFrame.sum, **kwargs)
return self.full_reduce(axis, map_func, reduce_func, numeric_only)
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
Pandas series with the maximum values from each column or row.
"""
return self._process_min_max(pandas.DataFrame.max, **kwargs)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
Pandas series containing the mean from each numerical column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
try:
# If we need to drop any columns, it will throw a TypeError
return sums.divide(counts)
# In the case that a TypeError is thrown, we need to iterate through, similar to
# how pandas does and do the division only on things that can be divided.
# NOTE: We will only hit this condition if numeric_only is not True.
except TypeError:
def can_divide(l, r):
try:
pandas.Series([l]).divide(r)
except TypeError:
return False
return True
# Iterate through the sums to check that we can divide them. If not, then
# drop the record. This matches pandas behavior.
return pandas.Series(
{
idx: sums[idx] / counts[idx]
for idx in sums.index
if can_divide(sums[idx], counts[idx])
}
)
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
Pandas series with the minimum value from each column or row.
"""
return self._process_min_max(pandas.DataFrame.min, **kwargs)
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
Pandas Series with sum or prod of DataFrame.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", None) if not axis else True
min_count = kwargs.get("min_count", 0)
reduce_index = self.columns if axis else self.index
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
else:
query_compiler = self
new_index = query_compiler.index if axis else query_compiler.columns
def sum_prod_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
else:
return pandas.DataFrame([])
map_func = self._prepare_method(sum_prod_builder, **kwargs)
if min_count <= 1:
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
elif min_count > len(reduce_index):
return pandas.Series(
[np.nan] * len(new_index), index=new_index, dtype=np.dtype("object")
)
else:
return self.full_axis_reduce(map_func, axis)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
Pandas series with the product of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.prod, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
Pandas series with the sum of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.sum, **kwargs)
# END Full Reduce operations
# Map partitions operations
# These operations are operations that apply a function to every partition.
def map_partitions(self, func, new_dtypes=None):
return self.__constructor__(
self.data.map_across_blocks(func), self.index, self.columns, new_dtypes
)
def abs(self):
func = self._prepare_method(pandas.DataFrame.abs)
return self.map_partitions(func, new_dtypes=self.dtypes.copy())
def applymap(self, func):
remote_func = self._prepare_method(pandas.DataFrame.applymap, func=func)
return self.map_partitions(remote_func)
def isin(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.isin, **kwargs)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isna(self):
func = self._prepare_method(pandas.DataFrame.isna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isnull(self):
func = self._prepare_method(pandas.DataFrame.isnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def negative(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.__neg__, **kwargs)
return self.map_partitions(func)
def notna(self):
func = self._prepare_method(pandas.DataFrame.notna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def notnull(self):
func = self._prepare_method(pandas.DataFrame.notnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def round(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.round, **kwargs)
return self.map_partitions(func, new_dtypes=self._dtype_cache)
# END Map partitions operations
# Map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if dtype != self.dtypes[column]:
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
new_dtype = np.dtype(dtype)
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
# END Map partitions across select indices
# Column/Row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce(self, func, axis, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
# We XOR with axis because if we are doing an operation over the columns
# (i.e. along the rows), we want to take the transpose so that the
# results from the same parition will be concated together first.
# We need this here because if the operations is over the columns,
# map_across_full_axis does not transpose the result before returning.
result = self.data.map_across_full_axis(axis, func).to_pandas(
self._is_transposed ^ axis
)
if result.empty:
return result
if not axis:
result.index = (
alternate_index if alternate_index is not None else self.columns
)
else:
result.index = (
alternate_index if alternate_index is not None else self.index
)
return result
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.any(**kwargs), **kwargs)
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
Pandas Series containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis_none = True if axis is None else False
axis = 0 if axis is None else axis
kwargs["axis"] = axis
bool_only = kwargs.get("bool_only", None)
kwargs["bool_only"] = False if bool_only is None else bool_only
not_bool_col = []
numeric_col_count = 0
for col, dtype in zip(self.columns, self.dtypes):
if not is_bool_dtype(dtype):
not_bool_col.append(col)
numeric_col_count += 1 if is_numeric_dtype(dtype) else 0
if bool_only:
if axis == 0 and not axis_none and len(not_bool_col) == len(self.columns):
return pandas.Series(dtype=bool)
if len(not_bool_col) == len(self.columns):
query_compiler = self
else:
query_compiler = self.drop(columns=not_bool_col)
else:
if (
bool_only is False
and axis_none
and len(not_bool_col) == len(self.columns)
and numeric_col_count != len(self.columns)
):
if func == pandas.DataFrame.all:
return self.getitem_single_key(self.columns[-1])[self.index[-1]]
elif func == pandas.DataFrame.any:
return self.getitem_single_key(self.columns[0])[self.index[0]]
query_compiler = self
builder_func = query_compiler._prepare_method(func, **kwargs)
result = query_compiler.full_axis_reduce(builder_func, axis)
if axis_none:
return func(result)
else:
return result
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._prepare_method(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.min()]
def _post_process_idx_ops(self, axis, intermediate_result):
"""Converts internal index to external index.
Args:
axis: 0 for columns and 1 for rows. Defaults to 0.
intermediate_result: Internal index of self.data.
Returns:
External index of the intermediate_result.
"""
index = self.index if not axis else self.columns
result = intermediate_result.apply(lambda x: index[x])
return result
def idxmax(self, **kwargs):
"""Returns the first occurance of the maximum over requested axis.
Returns:
Series containing the maximum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmax_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmax(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmax_builder, **kwargs)
max_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, max_result)
def idxmin(self, **kwargs):
"""Returns the first occurance of the minimum over requested axis.
Returns:
Series containing the minimum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmin_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmin(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmin_builder, **kwargs)
min_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, min_result)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._prepare_method(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.max()]
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
Series containing the median of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.median, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
Series containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(index=False, deep=deep)
deep = kwargs.get("deep", False)
func = self._prepare_method(memory_usage_builder, **kwargs)
return self.full_axis_reduce(func, 0)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
Series of ints indexed by column or index names.
"""
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.nunique, **kwargs)
return self.full_axis_reduce(func, axis)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
Series containing the quantile of each column or row.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
numeric_only = kwargs.get("numeric_only", True)
assert type(q) is float
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._prepare_method(quantile_builder, **kwargs)
result = query_compiler.full_axis_reduce(func, axis)
result.name = q
return result
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
Series containing the skew of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.skew, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
Series containing the standard deviation of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.std, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def to_datetime(self, **kwargs):
"""Converts the Manager to a Series of DateTime objects.
Returns:
Series of DateTime objects.
"""
columns = self.columns
def to_datetime_builder(df, **kwargs):
df.columns = columns
return pandas.to_datetime(df, **kwargs)
func = self._prepare_method(to_datetime_builder, **kwargs)
return self.full_axis_reduce(func, 1)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
Series containing the variance of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = query_compiler._prepare_method(pandas.DataFrame.var, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce_along_select_indices(
self, func, axis, index, pandas_result=True
):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces Manager to Series using full knowledge of an
axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting series.
pandas_result: Return the result as a Pandas Series instead of raw data.
Returns:
Either a Pandas Series with index or BaseBlockPartitions object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
if pandas_result:
result = result.to_pandas(self._is_transposed)
result.index = index
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(
func, 0, new_columns, False
)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series(
[np.float64 for _ in new_columns], index=new_columns
)
else:
new_dtypes = pandas.Series(
[np.object for _ in new_columns], index=new_columns
)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cumsum(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cummax(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumprod(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def dropna(self, **kwargs):
"""Returns a new DataManager with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset")
thresh = kwargs.get("thresh")
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1) > thresh for ax in axis
}
else:
drop_values = {
ax ^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1) for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
def eval(self, expr, **kwargs):
"""Returns a new DataManager with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new PandasDataManager with new columns after applying expr.
"""
inplace = kwargs.get("inplace", False)
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
# if there is no assignment, then we simply save the results
# in the first column
if expect_series:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
else:
expr = "{0} = {1}".format(columns[0], expr)
def eval_builder(df, **kwargs):
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
result.columns = pandas.RangeIndex(0, len(result.columns))
return result
func = self._prepare_method(eval_builder, **kwargs)
new_data = self.map_across_full_axis(1, func)
if expect_series:
result = new_data.to_pandas()[0]
result.name = columns_copy.name
result.index = index
return result
else:
columns = columns_copy.columns
return self.__constructor__(new_data, self.index, columns)
def mode(self, **kwargs):
"""Returns a new DataManager with modes calculated for each label along given axis.
Returns:
A new PandasDataManager with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = | pandas.concat([result, append_values], ignore_index=True) | pandas.concat |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from datetime import datetime
from datetime import timedelta
import numpy as np
import json
import os.path, time
## database imports
import os
import psycopg2
import psycopg2.extras as extras
# Imports from this application
from app import app
# Connecting to database
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
sql = "select * from flow;"
database_df = pd.read_sql_query(sql, conn)
conn = None
database_df = database_df.rename(columns={"observation":"Observation", "forecast":"Forecast"})
database_df = database_df.drop(columns="id")
mapping_df = database_df
mapping_df['date'] = | pd.to_datetime(mapping_df['date']) | pandas.to_datetime |
import getpass
import tkinter as tk
import tkinter.messagebox as message
from datetime import datetime, timedelta
from tkinter import filedialog
import pandas as pd
from keyword_explorer.Apps.AppBase import AppBase
from keyword_explorer.TwitterV2.TwitterV2Counts import TwitterV2Counts, TwitterV2Count
from keyword_explorer.tkUtils.Buttons import Buttons
from keyword_explorer.tkUtils.DataField import DataField
from keyword_explorer.tkUtils.DateEntryField import DateEntryField
from keyword_explorer.tkUtils.ListField import ListField
from keyword_explorer.tkUtils.TextField import TextField
from typing import Dict
class TweetCountExplorer(AppBase):
tvc:TwitterV2Counts
prompt_text_field:TextField
response_text_field:TextField
keyword_text_field:TextField
start_date_field:DateEntryField
end_date_field:DateEntryField
token_list:ListField
engine_list:ListField
sample_list:ListField
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.totals_dict = {}
print("TweetCountExplorer")
def setup_app(self):
self.app_name = "TweetCountExplorer"
self.app_version = "3.2.22"
self.geom = (850, 440)
self.tvc = TwitterV2Counts()
if not self.tvc.key_exists():
message.showwarning("Key Error", "Could not find Environment key 'BEARER_TOKEN_2'")
def build_app_view(self, row:int, main_text_width:int, main_label_width:int) -> int:
param_text_width = 15
param_label_width = 15
row += 1
lf = tk.LabelFrame(self, text="Twitter")
lf.grid(row=row, column=0, columnspan = 2, sticky="nsew", padx=5, pady=2)
self.build_twitter(lf, main_text_width, main_label_width)
lf = tk.LabelFrame(self, text="Twitter Params")
lf.grid(row=row, column=2, columnspan = 2, sticky="nsew", padx=5, pady=2)
self.build_twitter_params(lf, param_text_width, param_label_width)
self.end_date_field.set_date()
self.start_date_field.set_date(d = (datetime.utcnow() - timedelta(days=10)))
return row+1
def build_twitter(self, lf:tk.LabelFrame, text_width:int, label_width:int):
row = 0
self.keyword_text_field = TextField(lf, row, 'Test Keyword(s)', text_width, height=10, label_width=label_width)
row = self.keyword_text_field.get_next_row()
self.start_date_field = DateEntryField(lf, row, 'Start Date', text_width, label_width=label_width)
row = self.start_date_field.get_next_row()
self.end_date_field = DateEntryField(lf, row, 'End Date', text_width, label_width=label_width)
row = self.end_date_field.get_next_row()
buttons = Buttons(lf, row, "Actions", label_width=label_width)
buttons.add_button("Clear", self.clear_counts_callbacks)
buttons.add_button("Test Keyword", self.test_keyword_callback)
buttons.add_button("Plot", self.plot_counts_callback)
buttons.add_button("Save", self.save_callback)
buttons.add_button("Launch Twitter", self.launch_twitter_callback)
row = buttons.get_next_row()
def build_twitter_params(self, lf:tk.LabelFrame, text_width:int, label_width:int):
row = 0
self.sample_list = ListField(lf, row, "Sample", width=text_width, label_width=label_width, static_list=True)
self.sample_list.set_text(text='day, week, month')
self.sample_list.set_callback(self.set_time_sample_callback)
self.set_time_sample_callback()
row = self.sample_list.get_next_row()
def launch_twitter_callback(self):
# single word
key_list = self.keyword_text_field.get_list("\n")
start_dt = self.start_date_field.get_date()
end_dt = self.end_date_field.get_date()
self.log_action("Launch_twitter", {"twitter_start": start_dt.strftime("%Y-%m-%d"), "twitter_end":end_dt.strftime("%Y-%m-%d"), "terms":" ".join(key_list)})
self.tvc.launch_twitter(key_list, start_dt, end_dt)
# webbrowser.open('https://twitter.com/search?q=chinavirus%20until%3A2020-02-01%20since%3A2019-12-01&src=typed_query')
# webbrowser.open('https://twitter.com/search?q=%22china%20virus%22%20until%3A2020-02-01%20since%3A2019-12-01&src=typed_query')
def set_time_sample_callback(self, event:tk.Event = None):
sample_str = self.sample_list.get_selected()
self.sample_list.set_label("Sample\n({})".format(sample_str))
def test_keyword_callback(self):
key_list = self.keyword_text_field.get_list("\n")
print(key_list)
start_dt = self.start_date_field.get_date()
end_dt = self.end_date_field.get_date()
for keyword in key_list:
if len(keyword) < 3:
message.showwarning("Keyword too short",
"Please enter something longer than [{}] text area".format(keyword))
return
granularity = self.sample_list.get_selected()
log_dict = {"granularity":granularity, "twitter_start": start_dt.strftime("%Y-%m-%d"), "twitter_end":end_dt.strftime("%Y-%m-%d")}
for keyword in key_list:
if granularity == 'day':
self.tvc.get_counts(keyword, start_dt, end_time=end_dt, granularity=granularity)
print("testing keyword {} between {} and {} - granularity = {}".format(keyword, start_dt, end_dt, granularity))
elif granularity == 'week':
self.tvc.get_sampled_counts(keyword, start_dt, end_time=end_dt, skip_days=7)
print("testing keyword {} between {} and {} - skip_days = {}".format(keyword, start_dt, end_dt, 7))
elif granularity == 'month':
self.tvc.get_sampled_counts(keyword, start_dt, end_time=end_dt, skip_days=30)
print("testing keyword {} between {} and {} - skip_days = {}".format(keyword, start_dt, end_dt, 30))
else:
self.dp.dprint("test_keyword_callback() unable to handle granularity = {}".format(granularity))
return
tvc:TwitterV2Count
for tvc in self.tvc.count_list:
print(tvc.to_string())
for k, v in self.tvc.totals_dict.items():
log_dict[k] = v
self.log_action("test_keyword", log_dict)
self.tvc.plot()
def clear_counts_callbacks(self):
self.tvc.reset()
def plot_counts_callback(self):
self.tvc.plot()
def save_callback(self):
default = "{} {}.xlsx".format(self.experiment_field.get_text(), datetime.now().strftime("%B_%d_%Y_(%H_%M_%S)"))
filename = filedialog.asksaveasfilename(filetypes=(("Excel files", "*.xlsx"),("All Files", "*.*")), title="Save Excel File", initialfile=default)
if filename:
print("saving to {}".format(filename))
df1 = self.get_description_df(self.prompt_text_field.get_text(), self.response_text_field.get_text())
df2 = self.tvc.to_dataframe()
with pd.ExcelWriter(filename) as writer:
df1.to_excel(writer, sheet_name='Experiment')
df2.to_excel(writer, sheet_name='Results')
writer.save()
self.log_action("save", {"filename":filename})
def get_description_df(self, probe:str, response:str) -> pd.DataFrame:
now = datetime.now()
now_str = now.strftime("%B_%d_%Y_(%H:%M:%S)")
sample_str = self.sample_list.get_selected()
description_dict = {'name':getpass.getuser(), 'date':now_str, 'probe':probe, 'response':response, 'sampling':sample_str}
df = | pd.DataFrame.from_dict(description_dict, orient='index', columns=['Value']) | pandas.DataFrame.from_dict |
import sqlite3
import json
import pandas as pd
import pickle
experiments = ['../atm.db'] ### path to atm's generated db - can be a list of result dbs for different experiments
df = | pd.DataFrame(columns=['experiment', 'method', 'id', 'accuracy', 'SD', 'cohen_kappa', 'f1', 'mcc', 'roc_auc', 'ap', 'parameters']) | pandas.DataFrame |
import pytest
import helper_functions
import pandas as pd
import numpy as np
from helper_functions import Helpers
def test_null_count():
df1 = pd.DataFrame([[1,2,3,4],[1,2,3,4],[1,2,3,np.nan]])
df2 = pd.DataFrame([[1,2,3,4],[1,2,np.nan,4],[1,2,3,np.nan]])
df3 = | pd.DataFrame([1,np.nan,3,4,1,2,3,4,1,2,3,np.nan]) | pandas.DataFrame |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'time_to_prev',
'time_to_next',
'time_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TIME_PREV_TO_NEXT not in move_df
move_df.generate_time_features()
assert_frame_equal(move_df, expected)
def test_generate_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
| Timestamp('2008-10-23 05:53:06') | pandas.Timestamp |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
codes=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize("func, values", [
("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}),
("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]})
])
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'],
'c_int': [1, 2, 3, 4],
'c_float': [4.02, 3.03, 2.04, 1.05],
'c_date': ['2019', '2018', '2016', '2017']})
df['c_date'] = pd.to_datetime(df['c_date'])
result = getattr(df.groupby('name'), func)()
expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
@pytest.mark.parametrize("func", [
'mean', 'var', 'std', 'cumprod', 'cumsum'
])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby('k').describe()
with pytest.raises(ValueError, match=msg):
df2.groupby('key').describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_ | numeric(temp_df["涨跌额"], errors="coerce") | pandas.to_numeric |
import os,time, pdb
import datetime as dt
import sqlalchemy
import numpy as np
import pandas as pd
import statsmodels.api as sm
class LongShortWeighted(object):
def __init__(self):
self._industry_styles = ['Bank','RealEstate','Health','Transportation','Mining',
'NonFerMetal','HouseApp','LeiService','MachiEquip','BuildDeco',
'CommeTrade','CONMAT','Auto','Textile','FoodBever','Electronics',
'Computer','LightIndus','Utilities','Telecom','AgriForest','CHEM',
'Media','IronSteel','NonBankFinan','ELECEQP','AERODEF','Conglomerates']
self._risk_styles = ['BETA','MOMENTUM','SIZE','EARNYILD','RESVOL','GROWTH','BTOP',
'LEVERAGE','LIQUIDTY','SIZENL']
#暂时写此处,去极值
def winsorize(self, total_data, method='sigma', limits=(3.0, 3.0), drop=True):
se = total_data.copy()
if method == 'quantile':
down, up = se.quantile([limits[0], 1.0 - limits[1]])
elif method == 'sigma':
std, mean = se.std(), se.mean()
down, up = mean - limits[0]*std, mean + limits[1]*std
if drop:
se[se<down] = np.NaN
se[se>up] = np.NaN
else:
se[se<down] = down
se[se>up] = up
return se
#标准化
def standardize(self, total_data):
try:
res = (total_data - total_data.mean()) / total_data.std()
except:
res = | pd.Series(data=np.NaN, index=total_data.index) | pandas.Series |
#.. oleelliset muuttujat, muita ei kannata käyttää:
#lex.Xst : status (0/1) kuollut vai elossa
#Relevel(age.cat,list(1:2,3,4,5,6,7:9)): ikä, luokiteltu uudelleen
#per.cat: kalenteriaika (vuosi)
#(C10AA.DDD>0): indikaattori onko statiini käytössä seurannan alussa ATC-koodi C10AA vai ei ( https://www.whocc.no/atc_ddd_index/ täältä koodi)
#DM.type : diabeteksen tyyppi
#.i.cancer: onko syöpä seurannan alussa (0/1)
#factor(shp); sairaahoitopiiri faktorina eli lukittelumuuttujana
#lex.dur: seuranta-ajan pituus (vuosissa) ; seuranta-aikaa kertoo myös muuttuja fu
# varmasti myös sukupuoli on merkittävä
import numpy as np
import pandas as pd
import pickle
def get_path():
path_file = open('path.txt', 'r')
path = path_file.readlines()[0][:-1]
path_file.close()
return path
def fetch_data(return_maps=False):
path = get_path()
female_data = pd.read_csv(path + 'female_aggregated_data.csv', delimiter=',')
male_data = | pd.read_csv(path + 'male_aggregated_data.csv', delimiter=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/schools8_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W_0ED20uQKha"
# In this notebook, we fit a hierarchical Bayesian model to the "8 schools" dataset.
# See also https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# + id="HXRokZL1QPvB"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="C5EHDB-rQSIa" colab={"base_uri": "https://localhost:8080/"} outputId="d6d8b024-96ba-4014-97d9-ddef6d88349e"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import theano.tensor as tt
import theano
# #!pip install arviz
import arviz as az
# + id="sKlvHNY6RUaP"
# !mkdir ../figures
# + [markdown] id="-jby_J17HqBT"
# # Data
# + id="8pNC3UANQjeO" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="8f91ec2e-e81b-452b-dcf7-8c9f6ddda82a"
# https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
print(np.mean(y))
print(np.median(y))
names=[];
for t in range(8):
names.append('{}'.format(t));
# Plot raw data
fig, ax = plt.subplots()
y_pos = np.arange(8)
ax.errorbar(y,y_pos, xerr=sigma, fmt='o')
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
plt.title('8 schools')
plt.savefig('../figures/schools8_data.png')
plt.show()
# + [markdown] id="vcAdKbnXHsKE"
# # Centered model
# + id="-Lxa_JgfQmAI" colab={"base_uri": "https://localhost:8080/", "height": 723} outputId="573cdde1-a178-4949-de75-af036d02f6dd"
# Centered model
with pm.Model() as Centered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with Centered_eight:
trace_centered = pm.sample(1000, chains=4, return_inferencedata=False)
pm.summary(trace_centered).round(2)
# PyMC3 gives multiple warnings about divergences
# Also, see r_hat ~ 1.01, ESS << nchains*1000, especially for sigma_alpha
# We can solve these problems below by using a non-centered parameterization.
# In practice, for this model, the results are very similar.
# + id="pOrDPo_lQob_" colab={"base_uri": "https://localhost:8080/"} outputId="0cbd7421-2754-43c2-a468-7250ae30b8d1"
# Display the total number and percentage of divergent chains
diverging = trace_centered['diverging']
print('Number of Divergent Chains: {}'.format(diverging.nonzero()[0].size))
diverging_pct = diverging.nonzero()[0].size / len(trace_centered) * 100
print('Percentage of Divergent Chains: {:.1f}'.format(diverging_pct))
# + id="bYbhbC-kT8GV" outputId="77b27048-57ad-456c-f6ea-7bbeee7d1d94" colab={"base_uri": "https://localhost:8080/"}
dir(trace_centered)
# + id="9ODVo7cLUKs8" outputId="505c9b7c-6b7f-4b12-be22-c67809d19641" colab={"base_uri": "https://localhost:8080/"}
trace_centered.varnames
# + id="gClLFgqHVuW1" outputId="7447a76c-0e85-4d11-ca0a-fd24babe57dd" colab={"base_uri": "https://localhost:8080/", "height": 356}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_centered_acf_combined.png', dpi=300)
# + id="uWPD88BxTkMj" outputId="ed94b053-2ebc-41f1-91c3-12f0d7eec423" colab={"base_uri": "https://localhost:8080/", "height": 452}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha']);
plt.savefig('schools8_centered_acf.png', dpi=300)
# + id="Uv1QEiQOQtGc" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="7ce96252-9002-4f18-a64c-c55046f5415d"
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=True);
plt.savefig('schools8_centered_forest_combined.png', dpi=300)
# + id="cgzmwxVGZxub" outputId="8979ca4c-d9df-43bb-847e-bad33b2258bb" colab={"base_uri": "https://localhost:8080/", "height": 542}
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=False);
plt.savefig('schools8_centered_forest.png', dpi=300)
# + [markdown] id="BkphbYr_HxOj"
# # Non-centered
# + id="jLFiQS0ZQvR4" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="8c0caa4b-4aa4-4685-f8ef-ef23ba60b82c"
# Non-centered parameterization
with pm.Model() as NonCentered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha_offset = pm.Normal('alpha_offset', mu=0, sigma=1, shape=J)
alpha = pm.Deterministic('alpha', mu_alpha + sigma_alpha * alpha_offset)
#alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with NonCentered_eight:
trace_noncentered = pm.sample(1000, chains=4)
pm.summary(trace_noncentered).round(2)
# Samples look good: r_hat = 1, ESS ~= nchains*1000
# + id="RyB5Qu-MQxuM" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="4a21b628-5b80-4ae4-a148-a208f33d6d43"
with NonCentered_eight:
az.plot_autocorr(trace_noncentered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_noncentered_acf_combined.png', dpi=300)
# + id="JHmvYgsAQzuK" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="5ed95cc6-49b8-4bc6-acca-59f7c5f5c06b"
with NonCentered_eight:
az.plot_forest(trace_noncentered, var_names="alpha",
combined=True, hdi_prob=0.95);
plt.savefig('schools8_noncentered_forest_combined.png', dpi=300)
# + id="vb8tzwUhXlW0" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="efad1751-55c1-4d1d-97b8-198f67af8935"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha",
combined=True, hdi_prob=0.95);
plt.axvline(np.mean(y), color='k', linestyle='--')
# + id="JETMmNSuZUV7" colab={"base_uri": "https://localhost:8080/", "height": 647} outputId="835e3d2c-7874-41b5-d22e-d64e18fae9ab"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha", kind='ridgeplot',
combined=True, hdi_prob=0.95);
# + [markdown] id="Q_SYYgL0H13G"
# # Funnel of hell
# + id="E3CtP2kcT4s5" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="17af872c-3d56-48e6-be05-a5aab0b4aa39"
# Plot the "funnel of hell"
# Based on
# https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(trace_centered['mu_alpha'], name='mu_alpha')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log_sigma_alpha')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='log(sigma)');
#axs[0].axhline(0.01)
x = pd.Series(trace_noncentered['mu_alpha'], name='mu')
y = pd.Series(trace_noncentered['log_sigma_alpha'], name='log_sigma_alpha')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='log(sigma)');
#axs[1].axhline(0.01)
plt.savefig('schools8_funnel.png', dpi=300)
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="EMOdWlU-Q13N" colab={"base_uri": "https://localhost:8080/", "height": 953} outputId="0125ea26-646a-4b29-8a69-7fc508ac5d66"
x = pd.Series(trace_centered['mu_alpha'], name='mu')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log sigma_alpha')
sns.jointplot(x, y, xlim=xlim, ylim=ylim);
plt.suptitle('centered')
plt.savefig('schools8_centered_joint.png', dpi=300)
x = pd.Series(trace_noncentered['mu_alpha'], name='mu')
y = pd.Series(trace_noncentered['log_sigma_alpha'], name='log sigma_alpha')
sns.jointplot(x, y, xlim=xlim, ylim=ylim);
plt.suptitle('noncentered')
plt.savefig('schools8_noncentered_joint.png', dpi=300)
# + id="qAfA7fIWWN9B" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="9a307f3d-bee9-4ce9-e219-c7b847dc5f78"
group = 0
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(10,5))
x = | pd.Series(trace_centered['alpha'][:, group], name=f'alpha {group}') | pandas.Series |
# -*- coding: utf-8 -*-
from parsers.index_parser import *
from parsers.news_parser import *
from pandas import DataFrame
from typing import Union
from http import HTTPStatus
from requests import request
from requests.exceptions import RequestException, RetryError
class Crawler:
URL_INDEX = 'https://web.fosu.edu.cn/school-news/page/%d'
def __init__(self, strict: bool = False, retry: int = 3, timeout: Union[tuple, float] = None):
"""
initialization for crawler
:param strict: should parse DOM nodes in strict mode?
:param retry: retry times
:param timeout: seconds timeout for connect & read buffer
"""
self._retry = retry
self._timeout = timeout
self._index_parser = IndexParser(strict)
self._news_parser = NewsParser(strict)
def crawl(self, start_page: int = 1, n: int = -1) -> DataFrame:
"""
start the crawler
:param start_page: start page for crawl
:param n: crawl n pages (value `-1` means all pages)
"""
if start_page < 1:
raise AttributeError(f'invalid value for start_page: {start_page}')
if n <= 0 and ~n:
raise AttributeError(f'invalid value for n: {n}')
crawled = 0 # crawled page count
data = | DataFrame(columns=['title', 'date', 'author', 'content']) | pandas.DataFrame |
import itertools
import traceback
from functools import partial, reduce
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from nninst import AttrMap, Graph, GraphAttrKey, mode
from nninst.backend.tensorflow.dataset import (
cifar10_main,
cifar100_main,
imagenet,
imagenet_raw,
mnist,
)
from nninst.backend.tensorflow.dataset.config import (
CIFAR10_TEST,
CIFAR100_TEST,
MNIST_TEST,
)
from nninst.backend.tensorflow.dataset.imagenet_hierarchy import imagenet_class_tree
from nninst.backend.tensorflow.dataset.imagenet_preprocessing import (
alexnet_preprocess_image,
)
from nninst.backend.tensorflow.graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from nninst.backend.tensorflow.model import AlexNet, LeNet, ResNet50
from nninst.backend.tensorflow.model.config import (
ALEXNET,
DENSENET_CIFAR10,
LENET,
RESNET_18_CIFAR10,
RESNET_18_CIFAR100,
RESNET_50,
VGG_16,
ModelConfig,
)
from nninst.backend.tensorflow.trace import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
)
from nninst.backend.tensorflow.trace.common import (
reconstruct_stat_from_tf,
reconstruct_trace_from_tf_v2,
)
from nninst.dataset.envs import IMAGENET_RAW_DIR
from nninst.path import (
get_trace_path_in_fc_layers,
get_trace_path_intersection_in_fc_layers,
)
from nninst.statistics import (
calc_iou,
calc_iou_compact,
calc_iou_compact_per_layer,
calc_trace_path_num,
calc_trace_side_overlap_both_compact,
calc_trace_size,
calc_trace_size_per_layer,
inter_class_similarity_matrix_ray,
self_similarity_matrix_ray,
)
from nninst.trace import (
TraceKey,
compact_edge,
compact_trace,
merge_compact_trace,
merge_compact_trace_diff,
merge_compact_trace_intersect,
)
from nninst.utils import filter_value_not_null, map_prefix, merge_dict
from nninst.utils.fs import CsvIOAction, ImageIOAction, IOAction, abspath
from nninst.utils.numpy import arg_approx, arg_sorted_topk
from nninst.utils.ray import ray_iter
__all__ = [
"overlap_ratio",
"get_overlay_summary",
"resnet_50_imagenet_overlap_ratio",
"alexnet_imagenet_overlap_ratio",
"resnet_50_imagenet_overlap_ratio_error",
"get_overlay_summary_one_side",
"resnet_50_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5",
"resnet_50_imagenet_overlap_ratio_top5_rand",
"resnet_50_imagenet_overlap_ratio_top5",
"alexnet_imagenet_overlap_ratio_error",
"alexnet_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5_rand",
"alexnet_imagenet_overlap_ratio_top5_diff",
]
def calc_all_overlap(
class_trace: AttrMap,
trace: AttrMap,
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
node_name: str = None,
compact: bool = False,
use_intersect_size: bool = False,
key: str = TraceKey.EDGE,
) -> Dict[str, float]:
if node_name is None:
if use_intersect_size:
overlap_ratio, intersect_size = overlap_fn(
class_trace, trace, key, return_size=True
)
return {key + "_size": intersect_size, key: overlap_ratio}
else:
return {
**{
key + "_size": calc_trace_size(trace, key, compact=compact)
for key in [
TraceKey.EDGE,
# TraceKey.POINT,
# TraceKey.WEIGHT
]
},
**{
key: overlap_fn(class_trace, trace, key)
for key in [
TraceKey.EDGE,
# TraceKey.POINT,
# TraceKey.WEIGHT
]
},
}
else:
all_overlap = {
key: overlap_fn(class_trace, trace, key, node_name)
for key in [
TraceKey.EDGE,
# TraceKey.POINT,
# TraceKey.WEIGHT
]
}
for key in [
TraceKey.EDGE,
# TraceKey.POINT,
# TraceKey.WEIGHT
]:
if node_name in trace.ops:
node_trace = trace.ops[node_name]
if key in node_trace:
if compact:
all_overlap[key + "_size"] = np.count_nonzero(
np.unpackbits(node_trace[key])
)
else:
all_overlap[key + "_size"] = TraceKey.to_array(
node_trace[key]
).size
return all_overlap
def overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
# class_id = mnist_info.test().label(image_id)
#
# if class_id != trace.attrs[GraphAttrKey.PREDICT]:
# return [{}] if per_node else {}
if trace is None:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
#
# if adversarial_example is None:
# return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
return row
else:
return {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"node_name": node_name,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
# for image_id in range(0, 50)
for class_id in range(1, 1001)
),
# for class_id in range(1, 2)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_error(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if class_id == trace.attrs[GraphAttrKey.PREDICT]:
return {}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 3)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
class_trace = merge_compact_trace(
*[
class_trace_fn(label).load()
for label in trace.attrs[GraphAttrKey.PREDICT_TOP5]
]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: mnist.test(data_dir, normed=False)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
return adversarial_example
name = "lenet_mnist"
path = f"store/example/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
# deprecated
def alexnet_imagenet_example_trace_old(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace/{name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_example_trace_of_target_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
trace_of_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
return compact_trace(trace_of_target_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace_of_target_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(adversarial_trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace_of_original_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace_of_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
return compact_trace(adversarial_trace_of_original_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace_of_original_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def generate_traces(
trace_fn: Callable[..., IOAction[AttrMap]],
attack_name: str,
class_ids: Iterable[int],
image_ids: Iterable[int],
**kwargs,
):
def generate_traces_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id, **kwargs
).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
results = ray_iter(
generate_traces_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
print(f"finish class {class_id} image {image_id}")
def get_example(
model_config: ModelConfig,
attack_name,
class_id: int,
image_id: int,
attack_fn=None,
generate_adversarial_fn=None,
generate_input_fn=None,
cache: bool = True,
label: str = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
model_dir = abspath(model_config.model_dir)
if label is not None:
if model_dir.endswith("_import"):
model_dir = model_dir[:-7] + "_" + label
else:
model_dir = model_dir + "_" + label
create_model = lambda: model_config.network_class()
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: generate_input_fn(class_id, image_id, model_config)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
return adversarial_example
name = f"{model_config.name}_imagenet"
if label is not None and label != "import":
name = name + "_" + label
path = f"store/example/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=cache, compress=True)
imagenet_example = partial(
get_example,
generate_input_fn=lambda class_id, image_id, model_config: imagenet_raw.test(
IMAGENET_RAW_DIR,
class_id,
image_id,
normed=False,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
),
)
alexnet_imagenet_example = partial(
imagenet_example, model_config=ALEXNET.with_model_dir("tf/alexnet/model_import")
)
resnet_50_imagenet_example = partial(imagenet_example, model_config=RESNET_50)
vgg_16_imagenet_example = partial(imagenet_example, model_config=VGG_16)
cifar100_example = partial(
get_example,
generate_input_fn=lambda class_id, image_id, model_config: cifar100_main.test(
CIFAR100_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
normed=False,
),
)
cifar10_example = partial(
get_example,
generate_input_fn=lambda class_id, image_id, model_config: cifar10_main.test(
CIFAR10_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
normed=False,
),
)
resnet_18_cifar100_example = partial(cifar100_example, model_config=RESNET_18_CIFAR100)
resnet_18_cifar10_example = partial(cifar10_example, model_config=RESNET_18_CIFAR10)
densenet_cifar10_example = partial(cifar10_example, model_config=DENSENET_CIFAR10)
def alexnet_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def resnet_50_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def example_trace(
model_config: ModelConfig,
attack_name,
class_id: int,
image_id: int,
threshold: float,
attack_fn=None,
generate_adversarial_fn=None,
trace_fn=None,
generate_input_fn=None,
per_channel: bool = False,
cache: bool = True,
train: bool = False,
**kwargs,
) -> IOAction[AttrMap]:
def get_example_trace() -> AttrMap:
mode.check(False)
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: generate_input_fn(class_id, image_id, model_config, train)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
if attack_name == "original":
trace = reconstruct_trace_from_tf_v2(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
trace = compact_trace(trace, graph, per_channel=per_channel)
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf_v2(
model_fn=model_fn,
input_fn=adversarial_input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
return adversarial_trace
name = f"{model_config.name}_imagenet"
if train:
name = f"{name}_train"
if per_channel:
trace_name = "example_channel_trace"
else:
trace_name = "example_trace"
path = f"store/{trace_name}/approx_{threshold:.3f}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
imagenet_example_trace = partial(
example_trace,
generate_input_fn=lambda class_id, image_id, model_config, train: (
imagenet_raw.train if train else imagenet_raw.test
)(
IMAGENET_RAW_DIR,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
),
)
alexnet_imagenet_example_trace = partial(
imagenet_example_trace,
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
)
resnet_50_imagenet_example_trace = partial(
imagenet_example_trace, model_config=RESNET_50
)
vgg_16_imagenet_example_trace = partial(imagenet_example_trace, model_config=VGG_16)
cifar100_example_trace = partial(
example_trace,
generate_input_fn=lambda class_id, image_id, model_config, train: (
cifar100_main.train if train else cifar100_main.test
)(
CIFAR100_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
cifar10_example_trace = partial(
example_trace,
generate_input_fn=lambda class_id, image_id, model_config, train: (
cifar10_main.train if train else cifar10_main.test
)(
CIFAR10_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
resnet_18_cifar100_example_trace = partial(
cifar100_example_trace, model_config=RESNET_18_CIFAR100
)
resnet_18_cifar10_example_trace = partial(
cifar10_example_trace, model_config=RESNET_18_CIFAR10
)
mnist_example_trace = partial(
example_trace,
generate_input_fn=lambda class_id, image_id, model_config, train: (
mnist.train if train else mnist.test
)(
MNIST_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
lenet_mnist_example_trace = partial(mnist_example_trace, model_config=LENET)
def imagenet_example_stat(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = "avg",
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
def get_example_trace() -> Dict[str, np.ndarray]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# input_fn = lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn = lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
# if predicted_label != class_id:
# return None
if attack_name == "original":
trace = reconstruct_stat_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_stat_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return adversarial_trace
name = f"{model_config.name}_imagenet"
trace_name = "example_stat"
path = (
f"store/{trace_name}/{stat_name}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
)
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
def adversarial_example_image(
example_io: IOAction[np.ndarray], cache: bool = True
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
example = example_io.load()
if example is None:
return None
return (np.squeeze(example, axis=0) * 255).astype(np.uint8)
path = example_io.path.replace(".pkl", ".png")
return ImageIOAction(path, init_fn=get_example, cache=cache)
def generate_examples(
example_fn: Callable[..., IOAction[np.ndarray]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
cache: bool = True,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_io = example_fn(
attack_name=attack_name,
class_id=class_id,
image_id=image_id,
cache=cache,
)
example_io.save()
adversarial_example_image(example_io, cache=cache).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
# num_gpus=0,
num_gpus=1,
num_cpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def generate_examples_old(
example_fn: Callable[..., IOAction[np.ndarray]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
cache: bool = True,
**kwargs,
):
return generate_examples(
example_fn=partial(
example_fn,
attack_fn=attack_fn,
generate_adversarial_fn=partial(generate_adversarial_fn, **kwargs),
),
class_ids=class_ids,
image_ids=image_ids,
attack_name=attack_name,
cache=cache,
)
def generate_example_traces(
example_trace_fn: Callable[..., IOAction[AttrMap]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def intra_class_similarity(
name: str,
threshold: float,
example_trace_fn: Callable[..., IOAction[AttrMap]],
class_id: int,
image_ids: Iterable[int],
attack_name: str,
label: str = None,
key: str = TraceKey.EDGE,
variant: str = None,
layer_name: str = None,
) -> IOAction[np.ndarray]:
def generate_examples_fn(
image_id: int,
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
image_id = int(image_id)
while True:
example_trace_io = example_trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id
)
example_trace_io.save()
if example_trace_io.load() is not None:
return class_id, image_id
else:
image_id += 1
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
def get_intra_class_similarity(partial_path, image_ids):
print(f"begin {attack_name}")
image_ids = list(image_ids)
results = ray_iter(
generate_examples_fn,
image_ids,
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
actual_image_ids = []
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
actual_image_ids.append(image_id)
assert len(actual_image_ids) == len(image_ids)
if layer_name is None:
similarity_fn = calc_iou_compact
else:
similarity_fn = partial(calc_iou_compact_per_layer, node_name=layer_name)
return self_similarity_matrix_ray(
partial_path,
actual_image_ids,
trace_fn=lambda image_id: example_trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id
).load(),
similarity_fn=similarity_fn,
key=key,
)
threshold_name = "{0:.3f}".format(threshold)
if label is not None:
trace_name = f"{name}_{label}"
else:
trace_name = name
if variant is not None:
trace_name = f"{trace_name}_{variant}"
similarity_type = "intra_class_similarity"
if key == TraceKey.EDGE:
prefix = f"store/analysis/{similarity_type}/{trace_name}/approx_{threshold_name}/{attack_name}/class{class_id}/"
elif key == TraceKey.WEIGHT:
prefix = f"store/analysis/{similarity_type}_weight/{trace_name}/approx_{threshold_name}/{attack_name}/class{class_id}/"
else:
raise RuntimeError(f"key {key} is invalid")
if layer_name is not None:
prefix = prefix + f"{layer_name}/"
path = f"{prefix}/intra_class_similarity.pkl"
partial_path = f"{prefix}/partial/"
return IOAction(
path,
init_fn=lambda: get_intra_class_similarity(partial_path, image_ids),
cache=True,
)
def example_similarity(
name: str,
threshold: float,
example_trace_fn: Callable[..., IOAction[AttrMap]],
class_trace_fn: Callable[..., IOAction[AttrMap]],
class_id: int,
image_ids: Iterable[int],
attack_name: str,
label: str = None,
key: str = TraceKey.EDGE,
variant: str = None,
layer_name: str = None,
cache: bool = True,
) -> IOAction[np.ndarray]:
def generate_examples_fn(
image_id: int,
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id
)
example_trace_io.save()
if example_trace_io.load() is not None:
return class_id, image_id
else:
return class_id, None
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
def get_example_similarity(image_ids):
print(f"begin {attack_name}")
image_ids = list(image_ids)
results = ray_iter(
generate_examples_fn,
image_ids,
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
actual_image_ids = []
for class_id, image_id in results:
if image_id is not None:
actual_image_ids.append(image_id)
if layer_name is None:
similarity_fn = calc_trace_side_overlap_both_compact
else:
similarity_fn = partial(
calc_trace_side_overlap_both_compact, node_name=layer_name
)
def calc_similarity(image_id):
example_trace = example_trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id
).load()
class_trace = class_trace_fn(class_id).load()
if example_trace is None or class_trace is None:
return 0.0
else:
similarity = similarity_fn(class_trace, example_trace, key=key)
return similarity
similarity_list = ray_iter(
lambda i: (i, calc_similarity(actual_image_ids[i])),
range(len(actual_image_ids)),
out_of_order=True,
chunksize=1,
)
array = np.zeros((len(actual_image_ids),), dtype=float)
for i, similarity in similarity_list:
array[i] = similarity
return array
threshold_name = "{0:.3f}".format(threshold)
if label is not None:
trace_name = f"{name}_{label}"
else:
trace_name = name
if variant is not None:
trace_name = f"{trace_name}_{variant}"
similarity_type = "example_similarity"
if key == TraceKey.EDGE:
prefix = f"store/analysis/{similarity_type}/{trace_name}/approx_{threshold_name}/{attack_name}/class{class_id}/"
elif key == TraceKey.WEIGHT:
prefix = f"store/analysis/{similarity_type}_weight/{trace_name}/approx_{threshold_name}/{attack_name}/class{class_id}/"
else:
raise RuntimeError(f"key {key} is invalid")
if layer_name is not None:
prefix = prefix + f"{layer_name}/"
path = f"{prefix}/example_similarity.pkl"
return IOAction(
path, init_fn=lambda: get_example_similarity(image_ids), cache=cache
)
def inter_class_similarity(
name: str,
trace_fn,
class_ids: Iterable[int],
start_index_map: Dict[str, int],
is_compact: bool = True,
per_channel: bool = False,
):
def self_similarity_fn(
threshold: float,
label: str = None,
key: str = TraceKey.EDGE,
compress: bool = True,
variant: str = None,
) -> IOAction[np.ndarray]:
def get_self_similarity(partial_path) -> np.ndarray:
if is_compact:
similarity_fn = calc_iou_compact
trace_label = "compact" if label is None else label + "_compact"
else:
similarity_fn = calc_iou
trace_label = label
return inter_class_similarity_matrix_ray(
partial_path,
class_ids,
trace_fn=lambda class_id, start_index_name: trace_fn(
class_id=class_id,
threshold=threshold,
label=trace_label,
compress=compress,
variant=(
variant + f"[start_index={start_index_map[start_index_name]}]"
)
if variant is not None
else f"[start_index={start_index_map[start_index_name]}]",
).load(),
similarity_fn=similarity_fn,
key=key,
)
threshold_name = "{0:.3f}".format(threshold)
if label is not None:
trace_name = f"{name}_{label}"
else:
trace_name = name
if variant is not None:
trace_name = f"{trace_name}_{variant}"
if per_channel:
similarity_type = "inter_class_channel_similarity"
else:
similarity_type = "inter_class_similarity"
if key == TraceKey.EDGE:
prefix = f"store/analysis/{similarity_type}/{trace_name}/approx_{threshold_name}/"
elif key == TraceKey.WEIGHT:
prefix = f"store/analysis/{similarity_type}_weight/{trace_name}/approx_{threshold_name}/"
else:
raise RuntimeError(f"key {key} is invalid")
path = f"{prefix}/inter_class_similarity.pkl"
partial_path = f"{prefix}/partial/"
return IOAction(
path, init_fn=lambda: get_self_similarity(partial_path), cache=True
)
return self_similarity_fn
def generate_example_stats(
example_trace_fn: Callable[..., IOAction[Dict[str, np.ndarray]]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
stat_name: str = None,
cache: bool = True,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def alexnet_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
(
f"original.{TraceKey.WEIGHT}" in row
and row[f"original.{TraceKey.WEIGHT}"] is not None
)
or (
f"original.{TraceKey.EDGE}" in row
and row[f"original.{TraceKey.EDGE}"]
)
is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def get_predicted_value_contribution(
trace: AttrMap, graph: Graph, class_id: int, create_model, input_fn, model_dir
) -> float:
# print(calc_density_compact(trace, TraceKey.EDGE))
return get_predicted_value(
class_id=class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
prediction_hooks=[MaskWeightWithTraceHook(graph, trace)],
)
def alexnet_imagenet_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5_diff_uint8(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = adversarial_example_image(
alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
)
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_example = (
np.expand_dims(adversarial_example, axis=0).astype(np.float32) / 255
)
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_logit_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
if base_class_id in rest_class_ids:
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
# if (class_id not in adversarial_label_top5) or (adversarial_label not in label_top5):
# return [{}] if per_node else {}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, label_top5, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, label_top5, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label_top5,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_ideal_metrics(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"original_class_rank_in_adversarial_example": get_rank(
class_id=class_id,
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
),
"target_class_rank_in_original_example": get_rank(
class_id=adversarial_label,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
),
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_fc_layer_path_ideal_metrics(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
path_layer_name = graph.layers()[-11]
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
trace = compact_trace(trace, graph, per_channel=per_channel)
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
class_trace_paths = {}
def get_class_trace_path(class_id: int) -> AttrMap:
if class_id not in class_trace_paths:
class_trace = get_class_trace(class_id)
class_trace_paths[class_id] = get_trace_path_in_fc_layers(
graph, class_trace, compact=True
)
return class_trace_paths[class_id]
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_path = get_trace_path_in_fc_layers(
graph, trace, compact=True
)
trace_path_intersection = get_trace_path_intersection_in_fc_layers(
trace, class_trace, graph=graph, compact=True
)
return {
"overlap_size": calc_trace_path_num(
trace_path_intersection, path_layer_name
),
"trace_path_size": calc_trace_path_num(
example_trace_path, path_layer_name
),
"class_trace_path_size": calc_trace_path_num(
get_class_trace_path(base_class_id), path_layer_name
),
}
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, trace_target_class),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_trace_original_class),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_ideal_metrics_per_layer(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace, layer_name, compact=True
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
merge_compact_trace_intersect(
trace_target_class, adversarial_trace
),
adversarial_input_fn,
),
f"shared.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
merge_compact_trace_intersect(
adversarial_trace_original_class, trace
),
adversarial_input_fn,
),
f"shared.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
rank=rank,
**kwargs,
)
def resnet_18_cifar100_real_metrics_per_layer(rank: int = None, **kwargs):
return (
cifar100_real_metrics_per_layer_per_rank
if rank
else cifar100_real_metrics_per_layer_v2
)(model_config=RESNET_18_CIFAR100, rank=rank, **kwargs)
def resnet_18_cifar10_real_metrics_per_layer(rank: int = None, **kwargs):
return (
cifar10_real_metrics_per_layer_per_rank
if rank
else cifar10_real_metrics_per_layer_v2
)(model_config=RESNET_18_CIFAR10, rank=rank, **kwargs)
def densenet_cifar10_real_metrics_per_layer(rank: int = None, **kwargs):
return (
cifar10_real_metrics_per_layer_per_rank
if rank
else cifar10_real_metrics_per_layer_v2
)(model_config=DENSENET_CIFAR10, rank=rank, **kwargs)
def imagenet_real_metrics_per_layer(
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=model_config.class_from_zero,
# preprocessing_fn=model_config.preprocessing_fn)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
if support_diff:
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([label_top5[1]]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class),
f"original.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label_top5[1]]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label_top5[1], adversarial_trace_original_class
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
)
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def real_metrics_per_layer_v2(
model_config: ModelConfig,
attack_name: str,
generate_input_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
threshold: float = None,
label: str = None,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: generate_input_fn(class_id, image_id, model_config)
assert threshold is not None
trace = example_trace(
model_config=model_config,
attack_name="original",
attack_fn=None,
generate_adversarial_fn=None,
trace_fn=None,
generate_input_fn=None,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
).load()
if trace is None:
return [{}] if per_node else {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = example_trace(
model_config=model_config,
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
trace_fn=None,
generate_input_fn=None,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
).load()
if adversarial_trace is None:
return [{}] if per_node else {}
adversarial_example = get_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
generate_input_fn=None,
class_id=class_id,
image_id=image_id,
label=label,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
def adversarial_input_fn():
adversarial_input = model_config.normalize_fn(adversarial_example)
if not isinstance(adversarial_input, tf.data.Dataset):
adversarial_input = tf.data.Dataset.from_tensors(adversarial_input)
return adversarial_input
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
if support_diff:
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([label_top5[1]]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class),
f"original.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label_top5[1]]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label_top5[1], adversarial_trace_original_class
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, model_config.image_num_per_class)
for class_id in model_config.class_list()
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
imagenet_real_metrics_per_layer_v2 = partial(
real_metrics_per_layer_v2,
generate_input_fn=lambda class_id, image_id, model_config: imagenet_raw.test(
IMAGENET_RAW_DIR,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
),
)
cifar100_real_metrics_per_layer_v2 = partial(
real_metrics_per_layer_v2,
generate_input_fn=lambda class_id, image_id, model_config: cifar100_main.test(
CIFAR100_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
cifar10_real_metrics_per_layer_v2 = partial(
real_metrics_per_layer_v2,
generate_input_fn=lambda class_id, image_id, model_config: cifar10_main.test(
CIFAR10_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
def real_metrics_per_layer_per_rank(
model_config: ModelConfig,
attack_name: str,
generate_input_fn,
trace_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
path: str,
rank: int,
use_weight: bool = False,
threshold: float = None,
use_point: bool = False,
per_channel: bool = False,
get_overlap_with_all_class: bool = False,
label: str = None,
**kwargs,
):
trace_label = label
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: generate_input_fn(class_id, image_id, model_config)
assert threshold is not None
if attack_name == "normal":
trace = reconstruct_trace_from_tf_v2(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
trace_fn=partial(
trace_fn,
select_seed_fn=lambda output: arg_sorted_topk(output, rank)[
rank - 1 : rank
],
),
model_dir=model_dir,
rank=rank,
)[0]
else:
adversarial_example = get_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
generate_input_fn=None,
class_id=class_id,
image_id=image_id,
label=trace_label,
).load()
if adversarial_example is None:
return {}
def adversarial_input_fn():
adversarial_input = model_config.normalize_fn(adversarial_example)
if not isinstance(adversarial_input, tf.data.Dataset):
adversarial_input = tf.data.Dataset.from_tensors(
adversarial_input
)
return adversarial_input
trace = reconstruct_trace_from_tf_v2(
model_fn=model_fn,
input_fn=adversarial_input_fn,
trace_fn=partial(
trace_fn,
select_seed_fn=lambda output: arg_sorted_topk(output, rank)[
rank - 1 : rank
],
),
model_dir=model_dir,
rank=rank,
)[0]
if trace is None:
return {}
label = trace.attrs[GraphAttrKey.SEED]
prediction = trace.attrs[GraphAttrKey.PREDICT]
def get_class_trace(class_id: int) -> AttrMap:
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
if use_point:
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
graph.op(graph.id(layer_name))
.output_nodes[0]
.name,
compact=True,
key=TraceKey.POINT,
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class",
example_trace_in_class,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
else:
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class",
example_trace_in_class,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
trace = compact_trace(trace, graph, per_channel=per_channel)
row = {}
if get_overlap_with_all_class:
for other_class_id in model_config.class_list():
row = {
**row,
**map_prefix(
get_overlap(other_class_id, trace), f"class{other_class_id}"
),
}
else:
row = {**row, **get_overlap(label, trace)}
row = {
"class_id": class_id,
"image_id": image_id,
"label": label,
"prediction": prediction,
**row,
}
# print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, model_config.image_num_per_class)
for class_id in model_config.class_list()
)
traces = list(
ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
)
assert len(traces) == model_config.class_num * model_config.image_num_per_class
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces).sort_values(by=["class_id", "image_id"])
return CsvIOAction(path, init_fn=get_overlap_ratio)
imagenet_real_metrics_per_layer_per_rank = partial(
real_metrics_per_layer_per_rank,
generate_input_fn=lambda class_id, image_id, model_config: imagenet_raw.test(
IMAGENET_RAW_DIR,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
),
)
cifar100_real_metrics_per_layer_per_rank = partial(
real_metrics_per_layer_per_rank,
generate_input_fn=lambda class_id, image_id, model_config: cifar100_main.test(
CIFAR100_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
cifar10_real_metrics_per_layer_per_rank = partial(
real_metrics_per_layer_per_rank,
generate_input_fn=lambda class_id, image_id, model_config: cifar10_main.test(
CIFAR10_TEST.data_dir,
batch_size=1,
transform_fn=lambda dataset: dataset.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1),
),
)
def resnet_50_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(model_config=RESNET_50, rank=rank, **kwargs)
def vgg_16_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(model_config=VGG_16, rank=rank, **kwargs)
def alexnet_imagenet_real_metrics_per_layer_targeted(target_class: int):
def metrics_fn(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
return imagenet_real_metrics_per_layer_targeted(
target_class=target_class,
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_trace_fn=class_trace_fn,
select_fn=select_fn,
path=path,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
per_node=per_node,
per_channel=per_channel,
use_weight=use_weight,
support_diff=support_diff,
**kwargs,
)
return metrics_fn
def resnet_50_imagenet_real_metrics_per_layer_targeted(target_class: int):
def metrics_fn(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
return imagenet_real_metrics_per_layer_targeted(
target_class=target_class,
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_trace_fn=class_trace_fn,
select_fn=select_fn,
path=path,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
per_node=per_node,
per_channel=per_channel,
use_weight=use_weight,
support_diff=support_diff,
**kwargs,
)
return metrics_fn
def imagenet_real_metrics_per_layer_targeted(
target_class: int,
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
if image_id == -1:
image_id = 0
while True:
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
try:
predicted_label = predict(
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
if predicted_label != class_id:
image_id += 1
else:
break
except IndexError:
return [{}] if per_node else {}
else:
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
trace = compact_trace(trace, graph, per_channel=per_channel)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([target_class]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class), f"original.target"
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"label_top5": label_top5,
"label_top5_value": label_top5_value,
"label_value": label_top5_value[0],
**row,
}
print(row)
return row
images = [(target_class, image_id) for image_id in range(0, 40)] + [
(class_id, -1) for class_id in range(0, 1000) if class_id != target_class
]
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_negative_example_ideal_metrics_per_layer(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_image_id = image_id + 1
while True:
adversarial_input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
adversarial_image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
try:
adversarial_predicted_label_rank = get_rank(
class_id=predicted_label,
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
except IndexError:
return [{}] if per_node else {}
if adversarial_predicted_label_rank == 0:
adversarial_image_id += 1
else:
if attack_name == "negative_example":
stop = True
elif attack_name == "negative_example_top5":
if adversarial_predicted_label_rank < 5:
stop = True
else:
stop = False
elif attack_name == "negative_example_out_of_top5":
if adversarial_predicted_label_rank >= 5:
stop = True
else:
stop = False
else:
raise RuntimeError()
if stop:
break
else:
adversarial_image_id += 1
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace, layer_name, compact=True
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
merge_compact_trace_intersect(
trace_target_class, adversarial_trace
),
adversarial_input_fn,
),
f"shared.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
merge_compact_trace_intersect(
adversarial_trace_original_class, trace
),
adversarial_input_fn,
),
f"shared.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5_unique(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
# adversarial_example = alexnet_imagenet_example(
# attack_name=attack_name,
# attack_fn=attack_fn,
# generate_adversarial_fn=generate_adversarial_fn,
# class_id=class_id,
# image_id=image_id,
# ).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
class_trace = get_class_trace(base_class_id)
return calc_all_overlap(
trace,
class_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
key=TraceKey.WEIGHT,
# key=TraceKey.EDGE,
)
row = {}
for k, base_class_id in zip(range(1, 6), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 6), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = resnet_50_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[class_trace_fn(class_id).load() for class_id in rest_class_ids]
)
class_trace = merge_compact_trace_diff(
class_trace_fn(base_class_id).load(), rest_class_trace
)
trace = merge_compact_trace_diff(trace, rest_class_trace)
return calc_all_overlap(
class_trace,
trace,
overlap_fn,
compact=True,
use_intersect_size=True,
)
row = {}
for k, base_class_id in zip(range(1, 3), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 3), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label)).skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
if class_id == adversarial_label:
return [{}] if per_node else {}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[class_trace_fn(class_id).load() for class_id in rest_class_ids]
)
class_trace = merge_compact_trace_diff(
class_trace_fn(base_class_id).load(), rest_class_trace
)
trace = merge_compact_trace_diff(trace, rest_class_trace)
return calc_all_overlap(
class_trace,
trace,
overlap_fn,
compact=True,
use_intersect_size=True,
)
row = {}
for k, base_class_id in zip(range(1, 3), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 3), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_error(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if class_id == trace.attrs[GraphAttrKey.PREDICT]:
return {}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return | pd.DataFrame(traces) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import cv2 # OpenCV
import pims
import numpy as np
import pandas as pd
import trackpy as tp
class VideoReader():
def __init__(self, filePath):
self.filePath = filePath
video = pims.open(self.filePath)
self.currentFrame_save = 0 # When saving, keeps track of progress
self.currentFrame_play = 0 # When playing, keeps track of progress
self.height = video.frame_shape[1]
self.width = video.frame_shape[0]
self.minHeight = 0
self.maxHeight = self.height
self.minWidth = 0
self.maxWidth = self.width
self.recordingSpeed = video.frame_rate
try:
self.frameCount = video.len()
self.realRecordedTime = video.get_time(video.len()-1)
self.recordingDate = video.frame_time_stamps[0][0]
except:
# Support for more video formats is needed
self.frameCount = 0
self.realRecordedTime = 0
self.recordingDate = 0
def cropVideo(self, minHeight='none', maxHeight='none',
minWidth='none', maxWidth='none'):
if minHeight == 'none':
self.minHeight = 0
else:
self.minHeight = minHeight
if maxHeight == 'none':
self.maxHeight = self.height
else:
self.maxHeight = maxHeight
if minWidth == 'none':
self.minWidth = 0
else:
self.minWidth = minWidth
if maxWidth == 'none':
self.maxWidth = self.width
else:
self.maxWidth = maxWidth
def saveAsImages(self, img_folder, verbose=False):
video = cv2.VideoCapture(self.filePath)
self.currentFrame_save = 0
while(video.isOpened()):
# Leemos el frame actual y lo asignamos a la variable frame
ret, frame = video.read()
# Recorto el frame a la zona que me interesa (es simplemente operar
# con arrays de numpy)
frame_crop = frame[self.minHeight:self.maxHeight, self.minWidth:self.maxWidth]
# Guardo el frame recortado a una imagen
path = img_folder + 'img' + "{:06d}".format(self.currentFrame_save) + '.png'
cv2.imwrite(path, frame_crop)
self.currentFrame_save+=1
if verbose == True:
percent = " - " + "{:.2f}".format(100*self.currentFrame_save/self.frameCount) + " %"
print("Frame nº: " + str(self.currentFrame_save)+" / "+str(self.frameCount) + percent)
# Cerramos el stream de video
video.release()
def playVideo(self, fps=1):
waitTime = int(1000/fps)
video = cv2.VideoCapture(self.filePath)
self.currentFrame_play = 0
while(video.isOpened()):
# Leemos el frame actual y lo asignamos a la variable frame
ret, frame = video.read()
# Recorto el frame a la zona que me interesa (es simplemente operar
# con arrays de numpy)
frame_crop = frame[self.minHeight:self.maxHeight, self.minWidth:self.maxWidth]
self.currentFrame_play+=1
# Mostramos en pantalla el video (esperando 3ms entre frame y frame)
# hasta que llega al final o se pulsa la tecla q
cv2.imshow('Video', frame_crop)
if cv2.waitKey(waitTime) & 0xFF == ord('q'):
break
# video.release()
# cv2.destroyAllWindows()
# Cerramos el stream de video y las ventanas abiertas
video.release()
cv2.destroyAllWindows()
def detectCircles(self, initialFrame, lastFrame):
# We initialize an empty (actually just a row with zeros) pandas DataFrame
# with the correct shape and types to store circles detected by OpenCV and
# later pass those to trackpy's linking function.
# TODO: I'm sure there's a simpler way of doing this.
A = np.zeros((1, 8), dtype=np.float64)
B = np.zeros((1, 1), dtype=np.int64)
names = ('x', 'y', 'mass', 'size', 'ecc', 'signal', 'raw_mass', 'ep')
A = | pd.DataFrame(A, index=('-1',), columns=names) | pandas.DataFrame |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.