prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import warnings
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.interpolate import interp1d
from astropy import units as u
from tardis import constants as const
from tardis.montecarlo.montecarlo import formal_integral
from tardis.montecarlo.spectrum import TARDISSpectrum
class IntegrationError(Exception):
pass
class FormalIntegrator(object):
def __init__(self, model, plasma, runner, points=1000):
self.model = model
self.plasma = plasma
self.runner = runner
self.points = points
def check(self, raises=True):
'''
A method that determines if the formal integral can be performed with
the current configuration settings
The function returns False if the configuration conflicts with the
required settings. If raises evaluates to True, then a
IntegrationError is raised instead
'''
def raise_or_return(message):
if raises:
raise IntegrationError(message)
else:
warnings.warn(message)
return False
for obj in (self.model, self.plasma, self.runner):
if obj is None:
return raise_or_return(
'The integrator is missing either model, plasma or '
'runner. Please make sure these are provided to the '
'FormalIntegrator.'
)
if not self.runner.line_interaction_type in ['downbranch', 'macroatom']:
return raise_or_return(
'The FormalIntegrator currently only works for '
'line_interaction_type == "downbranch"'
'and line_interaction_type == "macroatom"'
)
return True
def calculate_spectrum(self, frequency, points=None,
interpolate_shells=-1, raises=True):
# Very crude implementation
# The c extension needs bin centers (or something similar)
# while TARDISSpectrum needs bin edges
self.check(raises)
N = points or self.points
self.interpolate_shells = interpolate_shells
frequency = frequency.to('Hz', u.spectral())
luminosity = u.Quantity(
formal_integral(
self,
frequency,
N),
'erg'
) * (frequency[1] - frequency[0])
# Ugly hack to convert to 'bin edges'
frequency = u.Quantity(
np.concatenate([
frequency.value,
[
frequency.value[-1] + np.diff(frequency.value)[-1]
]]),
frequency.unit)
return TARDISSpectrum(
frequency,
luminosity
)
def make_source_function(self):
"""
Calculates the source function using the line absorption rate estimator `Edotlu_estimator`
Formally it calculates the expression ( 1 - exp(-tau_ul) ) S_ul but this product is what we need later,
so there is no need to factor out the source function explicitly.
Parameters
----------
model : tardis.model.Radial1DModel
Returns
-------
Numpy array containing ( 1 - exp(-tau_ul) ) S_ul ordered by wavelength of the transition u -> l
"""
model = self.model
plasma = self.plasma
runner = self.runner
atomic_data = self.plasma.atomic_data
macro_ref = atomic_data.macro_atom_references
macro_data = atomic_data.macro_atom_data
no_lvls = len(atomic_data.levels)
no_shells = len(model.w)
if runner.line_interaction_type == 'macroatom':
internal_jump_mask = (macro_data.transition_type >= 0).values
ma_int_data = macro_data[internal_jump_mask]
internal = plasma.transition_probabilities[internal_jump_mask]
source_level_idx = ma_int_data.source_level_idx.values
destination_level_idx = ma_int_data.destination_level_idx.values
Edotlu_norm_factor = (1 / (runner.time_of_simulation * model.volume))
exptau = 1 - np.exp(- plasma.tau_sobolevs)
Edotlu = Edotlu_norm_factor * exptau * runner.Edotlu_estimator
# The following may be achieved by calling the appropriate plasma
# functions
Jbluelu_norm_factor = (const.c.cgs * model.time_explosion /
(4 * np.pi * runner.time_of_simulation *
model.volume)).to("1/(cm^2 s)").value
# Jbluelu should already by in the correct order, i.e. by wavelength of
# the transition l->u
Jbluelu = runner.j_blue_estimator * Jbluelu_norm_factor
upper_level_index = atomic_data.lines.index.droplevel('level_number_lower')
e_dot_lu = pd.DataFrame(Edotlu, index=upper_level_index)
e_dot_u = e_dot_lu.groupby(level=[0, 1, 2]).sum()
e_dot_u_src_idx = macro_ref.loc[e_dot_u.index].references_idx.values
if runner.line_interaction_type == 'macroatom':
C_frame = pd.DataFrame(
columns=np.arange(no_shells), index=macro_ref.index
)
q_indices = (source_level_idx, destination_level_idx)
for shell in range(no_shells):
Q = sp.coo_matrix(
(internal[shell], q_indices), shape=(no_lvls, no_lvls)
)
inv_N = sp.identity(no_lvls) - Q
e_dot_u_vec = np.zeros(no_lvls)
e_dot_u_vec[e_dot_u_src_idx] = e_dot_u[shell].values
C_frame[shell] = sp.linalg.spsolve(inv_N.T, e_dot_u_vec)
e_dot_u.index.names = ['atomic_number', 'ion_number', 'source_level_number'] # To make the q_ul e_dot_u product work, could be cleaner
transitions = atomic_data.macro_atom_data[atomic_data.macro_atom_data.transition_type == -1].copy()
transitions_index = transitions.set_index(['atomic_number', 'ion_number', 'source_level_number']).index.copy()
tmp = plasma.transition_probabilities[(atomic_data.macro_atom_data.transition_type == -1).values]
q_ul = tmp.set_index(transitions_index)
t = model.time_explosion.value
lines = atomic_data.lines.set_index('line_id')
wave = lines.wavelength_cm.loc[transitions.transition_line_id].values.reshape(-1,1)
if runner.line_interaction_type == 'macroatom':
e_dot_u = C_frame.loc[e_dot_u.index]
att_S_ul = (wave * (q_ul * e_dot_u) * t / (4 * np.pi))
result = | pd.DataFrame(att_S_ul.values, index=transitions.transition_line_id.values) | pandas.DataFrame |
from scipy.stats import norm
from pandas_datareader import data as wb
from matplotlib.figure import Figure
from matplotlib.backends.backend_svg import FigureCanvasSVG
from matplotlib.backends.backend_agg import FigureCanvasAgg
from pandas_datareader._utils import RemoteDataError
from flask import request, Response
from flask import Flask, render_template, jsonify, url_for
from pandas_datareader import data as pdr
import yfinance as yf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import io
import os
import random
import requests
import matplotlib
matplotlib.use('Agg')
app = Flask(__name__, static_url_path='/static')
@app.route('/', methods=['GET', 'POST'])
def startPage():
if request.method == 'POST':
stockTicker = request.form.get("tradingSymbol")
stockTick = stockTicker
time = request.form.get("timePeriod")
stockTicker = request.args.get("stockTicker", stockTicker)
data = pd.DataFrame()
yf.pdr_override()
data[stockTick] = wb.DataReader(stockTick.strip(
'\n'), data_source="yahoo", start='2011-1-1')['Adj Close']
# percent change of asset price
log_returns = np.log(1 + data.pct_change())
# graph showing growth over time beginning from 2015
data.plot(figsize=(10, 6))
# graph of log returns of input ticker
# returns are normally distributed and have a consistent mean
log_returns.plot(figsize=(10, 6))
# calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
# Brownian Motion equation
#r = drift + standardDeviation * (e^r)
# prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
# Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
# 10 x 2 Matrix
x = np.random.rand(10, 2)
norm.ppf(x)
# stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10, 2))
# time interval for the stock price forecast
timeInterval = 365
iterations = 10
#r = drift + standardDeviation * (e^r)
# 10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values *
norm.ppf(np.random.rand(timeInterval, iterations)))
# returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
# iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
for i in range(len(priceList[-1])):
priceList[-1][i] = round(priceList[-1][i], 2)
# inputData = market.getData(tradingSymbol)
# a flask route function returns html (or a html render_template)
# mainFunction(stockTicker)
# add variables and their template equiv after the renderTemplate
return render_template('searchPage.html', stockTick=stockTicker, priceList=priceList)
return render_template('index.html', stockTick="")
@app.route("/matplot-as-image-<stockTick>.png")
def graph(stockTick):
data = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2016-present CERN โ European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Union, Sequence, Optional, Tuple
import pandas as pd
from numpy import nan
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.utils.miscellaneous.to_list_conversion import convert_to_list
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.futures.future_contract import FutureContract
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.futures.futures_adjustment_method import FuturesAdjustmentMethod
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.data_providers.helpers import cast_data_array_to_proper_type
class FuturesChain(pd.Series):
"""Class which facilitates the futures contracts management. Its main functionality is provided by the
get_price function, which returns a PricesDataFrame (PricesSeries) of prices for the given FutureTicker,
automatically managing the contracts chaining.
Parameters
------------
future_ticker: FutureTicker
The FutureTicker used to download the futures contracts, further chained and joined in order to obtain the
result of get_price function.
data_provider: DataProvider
Reference to the data provider, necessary to download latest prices, returned by the get_price function.
In case of backtests, the DataHandler wrapper should be used to avoid looking into the future.
method: FuturesAdjustmentMethod
FuturesAdjustmentMethod corresponding to one of two available methods of chaining the futures contracts.
"""
def __init__(self, future_ticker: FutureTicker, data_provider: "DataProvider", method: FuturesAdjustmentMethod =
FuturesAdjustmentMethod.NTH_NEAREST):
"""
The index consists of expiry dates of the future contracts.
"""
super().__init__(data=None, index=None, dtype=object, name=None, copy=False, fastpath=False)
self._future_ticker = future_ticker # type: FutureTicker
self._data_provider = data_provider # type: "DataProvider"
# Used for optimization purposes
self._specific_ticker = None # type: str
self._chain = None # type: PricesDataFrame
self._first_cached_date = None # type: datetime
self._futures_adjustment_method = method
self._cached_fields = set()
def get_price(self, fields: Union[PriceField, Sequence[PriceField]], start_date: datetime, end_date: datetime,
frequency: Frequency = Frequency.DAILY) -> Union[PricesDataFrame, PricesSeries]:
"""Combines consecutive specific FutureContracts data, in order to obtain a chain of prices.
Parameters
----------
fields: PriceField, Sequence[PriceField]
Data fields, corresponding to Open, High, Low, Close prices and Volume, that should be returned by the function.
start_date: datetime
First date for which the chain needs to be created.
end_date: datetime
Last date for which the chain needs to be created.
frequency: Frequency
Frequency of the returned data, by default set to daily frequency.
Returns
---------
PricesDataFrame, PricesSeries
"""
# 1 - Check if the chain was generated at least once, if not - preload the necessary data using the
# self._preload_data_and_generate_chain function, and then generate the chain of prices,
# otherwise - store the last and first available dates from the chain
fields_list, _ = convert_to_list(fields, PriceField)
if self._chain is not None and not self._chain.empty:
last_date_in_chain = self._chain.index[-1]
first_date_in_chain = self._first_cached_date
else:
return self._preload_data_and_generate_chain(fields, start_date, end_date, frequency).squeeze()
# 2 - Check if all the necessary data is available (if start_date >= first_cached_date) and cached fields
# include all fields from fields_list, if not - preload it by initializing the Futures Chain
uncached_fields = set(fields_list) - self._cached_fields
if start_date < first_date_in_chain or uncached_fields:
self._preload_data_and_generate_chain(fields, start_date, end_date, frequency)
# 3 - Download the prices since the last date available in the chain
if last_date_in_chain == end_date:
return self._chain[fields_list].loc[start_date:end_date].squeeze()
prices_df: PricesDataFrame = self._data_provider.get_price(self._future_ticker.get_current_specific_ticker(),
fields_list, last_date_in_chain, end_date)
assert isinstance(prices_df, PricesDataFrame)
# If no changes to the PricesDataFrame should be applied return the existing chain
if prices_df.empty:
return self._chain[fields_list].loc[start_date:end_date].squeeze()
prices_after_last_date_in_chain = prices_df.iloc[1:] if prices_df.index[0] == last_date_in_chain else prices_df
if prices_after_last_date_in_chain.empty:
return self._chain[fields_list].loc[start_date:end_date].squeeze()
# 4 - Check if between last_date_in_chain and end_date an expiration date occurred
def expiration_day_occurred() -> bool:
"""
Returns True if an expiration day occurred since last price was added to the chain, otherwise it returns
False.
If the price for the last_date_in_chain in self._chain differs from the value for the same date in prices_df
it means that the expiration day occurred a few days ago, but no data was shifted yet (e.g. it happened on
saturday and thus there was no new data for the next ticker, which could have been used for data shifting)
"""
different_ticker = self._specific_ticker != self._future_ticker.ticker
if last_date_in_chain in prices_df.index:
different_prices = not self._chain[fields_list].loc[last_date_in_chain].equals(
prices_df[fields_list].loc[last_date_in_chain])
else:
different_prices = True
return different_ticker or different_prices
if expiration_day_occurred():
# After expiration day the FutureChain has to be regenerated in case of both FuturesAdjustmentMethods, also
# in case of the N-th nearest contract method.
# This is caused by the use of last_date_in_chain variable to indicate the beginning of the the prices data
# frame, that need to be appended to the chain. An exemplary problem may occur in the following situation:
# Let C1 and C2 denote two consecutive futures contracts, and let C1 expire on the 16th of July. If no
# prices for C1 will be available since e.g. 13th July (exclusive), then on the 16th July the last_date_in_
# chain will still point to 13th. Therefore, the prices_df will contain prices for C2 within e.g. 14 - 16th
# July. As the expiration of C1 occurred on the 16th, the computed prices_df data frame cannot be appended
# to the chain and the chain should be regenerated.
return self._preload_data_and_generate_chain(fields, start_date, end_date, frequency).squeeze()
else:
# Append the new prices to the existing PricesDataFrame chain
self._chain = self._chain.append(prices_after_last_date_in_chain, sort=False)
self._specific_ticker = self._future_ticker.ticker
return self._chain[fields_list].loc[start_date:end_date].squeeze()
def _preload_data_and_generate_chain(self, fields: Union[PriceField, Sequence[PriceField]], start_date: datetime,
end_date: datetime, frequency: Frequency) -> \
Union[PricesDataFrame, PricesSeries]:
"""
Function, which at first preloads all of the necessary data, by initializing the Futures Chain object with
the self._initialize_futures_chain function. Afterwards, it generates the PricesDataFrame (PricesSeries)
using the self._generate_chain function and updates the self._specific_ticker. It returns the resulting
PricesDataFrame (PricesSeries).
At first, it initializes the FuturesChain with all the necessary data. If the selected futures adjustment
method is the BACK_ADJUST, verify whether the fields contain the PriceField.Open and PriceField.Close
and add them if needed.
"""
fields_list, _ = convert_to_list(fields, PriceField)
necessary_fields = set(fields_list).union({PriceField.Open, PriceField.Close})
necessary_fields = necessary_fields.union(self._cached_fields)
necessary_fields = list(necessary_fields)
self._initialize_futures_chain(necessary_fields, start_date, end_date, frequency)
# Generate the PricesDataFrame (PricesSeries)
self._chain = self._generate_chain(fields, start_date, end_date)
# Update the specific ticker
self._specific_ticker = self._future_ticker.ticker
self._cached_fields = set(fields_list)
return self._chain[fields_list].loc[start_date:end_date].squeeze()
def _generate_chain(self, fields, start_time: datetime, end_time: datetime) -> PricesDataFrame:
""" Returns a chain of futures combined together using a certain method. """
# Verify the parameters values
N = self._future_ticker.get_N()
days_before_exp_date = self._future_ticker.get_days_before_exp_date()
fields, got_single_field = convert_to_list(fields, PriceField)
if N < 1 or days_before_exp_date < 1:
raise ValueError("The number of the contract and the number of days before expiration date should be "
"greater than 0.")
# Shift the index and data according to the start time and end time values. We shift the number of days by 1,
# so that the days_before_exp_date=1 will use the prices on the expiration date from the newer contract.
shifted_index = | pd.DatetimeIndex(self.index) | pandas.DatetimeIndex |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
import os
import io
from io import StringIO
def load_model(code_dir):
model_path = 'saved_model.pb'
model = tf.keras.models.load_model('./recommender_model/',
custom_objects = None,
compile = True,
options = None)
print ("model loaded")
return model
def score(data, model, **kwargs):
predictions = model.predict(data)
print (predictions)
s = | pd.DataFrame(predictions) | pandas.DataFrame |
import pandas as pd
from pandas_ta import percent_return
from sklearn.model_selection import train_test_split
import yfinance as yf
from catch22 import catch22_all
from tuneta.tune_ta import TuneTA
if __name__ == "__main__":
# Download data set from yahoo
X = yf.download("SPY", period="10y", interval="1d", auto_adjust=True)
# Add catch22 30 day rolling features for demonstration purposes
c22 = [pd.Series(catch22_all(r)['values']) for r in X.Close.rolling(30) if len(r) == 30]
features = | pd.concat(c22, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Cleaning US Census Data.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1HK9UVhluQDGY9TG6OjMq6uRGYD4nlkOz
#Cleaning US Census Data
"""
#Importing datasets
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
"""Using glob, loop through the census files available and load them into DataFrames. Then, concatenate all of those DataFrames together into one DataFrame"""
us_census = glob.glob('states*.csv')
df_list = []
for filename in us_census:
data = | pd.read_csv(filename) | pandas.read_csv |
import logging
import os
import re
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Any, Tuple, Optional
import pandas as pd
from python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \
TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION
logger = logging.getLogger()
def read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]:
tree = ET.parse(xml_path)
# 1: read document info
root = tree.getroot()
assert root.tag == "Document"
doc_filename = root.attrib["doc_name"]
doc_id = root.attrib["doc_id"]
m = re.match(r"(?P<topic_id>\d+)_(?P<document_number>\d+)(?P<subtopic>\w+)\.xml", doc_filename)
topic_id = m.group("topic_id")
subtopic = m.group("subtopic")
document_number = int(m.group("document_number"))
documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)],
names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID])
documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index),
DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)})
# 2: read document content
contents_rows = []
contents_index = []
for token_elmt in root.iter("token"):
# index content
sentence_idx = int(token_elmt.attrib["sentence"])
token_idx = int(token_elmt.attrib["number"])
contents_index.append((doc_id, sentence_idx, token_idx))
# content
token = token_elmt.text
contents_rows.append({TOKEN: token})
contents_index = pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX])
contents = pd.DataFrame(contents_rows, index=contents_index)
# 3: read markables / mentions and entity/event descriptions
mentions_rows = []
mentions_index = []
entities_events = []
for markable in root.find("Markables").getchildren():
# Don't know what this is, skip it
if markable.tag == "UNKNOWN_INSTANCE_TAG":
continue
mention_id = int(markable.attrib["m_id"])
# there are markables without spans, these are descriptions of entities / events which we want to keep
if "TAG_DESCRIPTOR" in markable.attrib.keys():
if "instance_id" in markable.attrib.keys():
entities_events.append({
EVENT: markable.attrib["instance_id"],
DESCRIPTION: markable.attrib["TAG_DESCRIPTOR"]
})
continue
token_ids = [int(anchor.attrib["t_id"]) for anchor in markable.iter("token_anchor")]
token_ids_from, token_ids_to = min(token_ids), max(token_ids)
# the token_ids are cumulative token indexes, remove their cumulative nature
token_indexes = contents.index.get_level_values(TOKEN_IDX).values
token_idx_from = token_indexes[
token_ids_from - 1] # -1 because token_ids start at 1, so we need to access index 0 in the dataframe to find t_id 1
token_idx_to = token_indexes[
token_ids_to - 1] + 1 # additionally +1 here because we want mention spans represented as intervals [from, to[
sentence_idx = contents.index.get_level_values(SENTENCE_IDX).values[token_ids_from - 1]
# resolve non-contiguous mentions
is_non_contiguous_mention = len(token_ids) < token_idx_from - token_idx_to
if is_non_contiguous_mention:
logger.info("Converted non-contiguous mention to contiguous mention.")
mentions_index.append((doc_id, mention_id))
mentions_rows.append({SENTENCE_IDX: sentence_idx,
TOKEN_IDX_FROM: token_idx_from,
TOKEN_IDX_TO: token_idx_to,
MENTION_TYPE: markable.tag})
mentions_index = pd.MultiIndex.from_tuples(mentions_index, names=[DOCUMENT_ID, MENTION_ID])
mentions = pd.DataFrame(mentions_rows, index=mentions_index)
entities_events = pd.DataFrame(entities_events).set_index(EVENT)
# 4. read relations (clusters)
clusters_rows = []
for relation in root.find("Relations").getchildren():
tags_of_interest = ["CROSS_DOC_COREF", "INTRA_DOC_COREF"]
if not relation.tag in tags_of_interest:
logger.info("Unexpected tag " + relation.tag)
raise NotImplementedError
# There are relations with tags INTRA_DOC_COREF and CROSS_DOC_COREF. The cross-doc ones have a "note" attribute.
if "note" in relation.attrib:
# this is the case for CROSS_DOC_COREF tags
relation_id = relation.attrib["note"]
else:
# this is the case for INTRA_DOC_COREF tags
relation_id = doc_id + "_" + relation.attrib["r_id"]
for mention in relation.iter("source"):
mention_id = int(mention.attrib["m_id"])
clusters_rows.append({EVENT: relation_id, DOCUMENT_ID: doc_id, MENTION_ID: mention_id})
clusters = pd.DataFrame(clusters_rows)
# 5. create relations for singletons
# In ECB plus, there are ACTION_OCCURRENCE markables which are not assigned to a relation. These are singletons. We
# add one entry for each singleton to `clusters` to ensure consistency. Note that the opposite also exists:
# singleton mentions which are marked as participating in a cross-doc coref relation, but there is no second
# mention for this relation.
if clusters.empty:
singletons = mentions.index.to_frame().reset_index(drop=True)
else:
# This can most likely be done in a nicer way using some index difference...
outer = | pd.merge(mentions, clusters, left_index=True, right_on=[DOCUMENT_ID, MENTION_ID], how="outer") | pandas.merge |
from datetime import datetime
import pandas as pd
import os
import re
from .transformers_map import transformers_map
def build_data_frame(backtest: dict, csv_path: str):
"""Creates a Pandas DataFame with the provided backtest. Used when providing a CSV as the datafile
Parameters
----------
backtest: dict, provides instructions on how to build the dataframe
csv_path: string, absolute path of where to find the data file
Returns
-------
object, A Pandas DataFrame indexed buy date
"""
df = load_basic_df_from_csv(csv_path)
if df.empty:
raise Exception("Dataframe is empty. Check the start and end dates")
df = prepare_df(df, backtest)
return df
def load_basic_df_from_csv(csv_path: str):
"""Loads a dataframe from a csv
Parameters
----------
csv_path: string, path to the csv so it can be read
Returns
df, A basic dataframe with the data from the csv
"""
if not os.path.isfile(csv_path):
raise Exception(f"File not found: {csv_path}")
df = pd.read_csv(csv_path, header=0)
df = standardize_df(df)
return df
def prepare_df(df: pd.DataFrame, backtest: dict):
"""Prepares the provided dataframe for a backtest by applying the datapoints and splicing based on the given backtest.
Useful when loading an existing dataframe (ex. from a cache).
Parameters
----------
df: DataFrame, should have all the open, high, low, close, volume data set as headers and indexed by date
backtest: dict, provides instructions on how to build the dataframe
Returns
------
df: DataFrame, with all the datapoints as column headers and trimmed to the provided time frames
"""
datapoints = backtest.get("datapoints", [])
df = apply_transformers_to_dataframe(df, datapoints)
trailing_stop_loss = backtest.get("trailing_stop_loss", 0)
if trailing_stop_loss:
df["trailing_stop_loss"] = df["close"].cummax() * (
1 - float(trailing_stop_loss)
)
chart_period = backtest.get("chart_period", "1Min")
start_time = backtest.get("start")
stop_time = backtest.get("stop")
df = apply_charting_to_df(df, chart_period, start_time, stop_time)
return df
def apply_charting_to_df(
df: pd.DataFrame, chart_period: str, start_time: str, stop_time: str
):
"""Modifies the dataframe based on the chart_period, start dates and end dates
Parameters
----------
df: dataframe with data loaded
chart_period: string, describes how often to sample data, default is '1Min' (1 minute)
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
start_time: datestring in YYYY-MM-DD HH:MM (ex. 2020-08-31 04:00) of when to begin the backtest
stop_time: datestring of YYYY-MM-DD HH:MM when to stop the backtest
Returns
DataFrame, a sorted dataframe ready for consumption by run_backtest
"""
if df.index.dtype != "datetime64[ns]":
headers = df.columns.values.tolist()
headers.extend([df.index.name])
if "date" not in headers:
raise Exception(
"Data does not have a date column. Headers must include date, open, high, low, close, volume."
)
time_unit = detect_time_unit(df.date[1])
df.date = pd.to_datetime(df.date, unit=time_unit)
df.set_index("date", inplace=True)
if start_time:
if isinstance(start_time, datetime) or type(start_time) is int:
time_unit = detect_time_unit(start_time)
start_time = pd.to_datetime(start_time, unit=time_unit)
start_time = start_time.strftime("%Y-%m-%d %H:%M:%S")
if stop_time:
if isinstance(stop_time, datetime) or type(stop_time) is int:
time_unit = detect_time_unit(stop_time)
stop_time = | pd.to_datetime(stop_time, unit=time_unit) | pandas.to_datetime |
# flake8: noqa: F841
import tempfile
from typing import Any, Dict, List, Union
from pandas.io.parsers import TextFileReader
import numpy as np
import pandas as pd
from . import check_series_result, check_dataframe_result
def test_types_to_datetime() -> None:
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
r1: pd.Series = pd.to_datetime(df)
r2: pd.Series = pd.to_datetime(df, unit="s", origin="unix", infer_datetime_format=True)
r3: pd.Series = pd.to_datetime(df, unit="ns", dayfirst=True, utc=None, format="%M:%D", exact=False)
r4: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=pd.Timestamp("01/01/2000"))
r5: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=3)
r6: pd.DatetimeIndex = pd.to_datetime(["2022-01-03", "2022-02-22"])
r7: pd.DatetimeIndex = pd.to_datetime(pd.Index(["2022-01-03", "2022-02-22"]))
r8: pd.Series = pd.to_datetime({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
def test_types_concat() -> None:
s = pd.Series([0, 1, -10])
s2 = pd.Series([7, -5, 10])
check_series_result(pd.concat([s, s2]))
check_dataframe_result(pd.concat([s, s2], axis=1))
check_series_result(pd.concat([s, s2], keys=["first", "second"], sort=True))
check_series_result(pd.concat([s, s2], keys=["first", "second"], names=["source", "row"]))
# Depends on the axis
rs1: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2})
rs1a: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2}, axis=1)
rs2: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, 2: s2})
rs2a: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, 2: s2}, axis=1)
rs3: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, None: s2})
rs3a: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, None: s2}, axis=1)
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
df2 = pd.DataFrame(data={"col1": [10, 20], "col2": [30, 40]})
check_dataframe_result(pd.concat([df, df2]))
check_dataframe_result(pd.concat([df, df2], axis=1))
check_dataframe_result(pd.concat([df, df2], keys=["first", "second"], sort=True))
check_dataframe_result(pd.concat([df, df2], keys=["first", "second"], names=["source", "row"]))
result: pd.DataFrame = pd.concat({"a": pd.DataFrame([1, 2, 3]), "b": pd.DataFrame([4, 5, 6])}, axis=1)
result2: Union[pd.DataFrame, pd.Series] = pd.concat({"a": pd.Series([1, 2, 3]), "b": pd.Series([4, 5, 6])}, axis=1)
rdf1: pd.DataFrame = pd.concat({"a": df, "b": df2})
rdf2: pd.DataFrame = pd.concat({1: df, 2: df2})
rdf3: pd.DataFrame = pd.concat({1: df, None: df2})
rdf4: pd.DataFrame = pd.concat(list(map(lambda x: s2, ["some_value", 3])), axis=1)
adict = {"a": df, 2: df2}
rdict: pd.DataFrame = pd.concat(adict)
def test_types_json_normalize() -> None:
data1: List[Dict[str, Any]] = [
{"id": 1, "name": {"first": "Coleen", "last": "Volk"}},
{"name": {"given": "Mose", "family": "Regner"}},
{"id": 2, "name": "<NAME>"},
]
df1: pd.DataFrame = pd.json_normalize(data=data1)
df2: pd.DataFrame = pd.json_normalize(data=data1, max_level=0, sep=";")
df3: pd.DataFrame = pd.json_normalize(data=data1, meta_prefix="id", record_prefix="name", errors="raise")
df4: pd.DataFrame = pd.json_normalize(data=data1, record_path=None, meta="id")
data2: Dict[str, Any] = {"name": {"given": "Mose", "family": "Regner"}}
df5: pd.DataFrame = pd.json_normalize(data=data2)
def test_types_read_csv() -> None:
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
csv_df: str = df.to_csv()
with tempfile.NamedTemporaryFile(delete=False) as file:
df.to_csv(file.name)
file.close()
df2: pd.DataFrame = pd.read_csv(file.name)
df3: pd.DataFrame = pd.read_csv(file.name, sep="a", squeeze=False)
df4: pd.DataFrame = pd.read_csv(file.name, header=None, prefix="b", mangle_dupe_cols=True, keep_default_na=False)
df5: pd.DataFrame = pd.read_csv(file.name, engine="python", true_values=[0, 1, 3], na_filter=False)
df6: pd.DataFrame = pd.read_csv(file.name, skiprows=lambda x: x in [0, 2], skip_blank_lines=True, dayfirst=False)
df7: pd.DataFrame = pd.read_csv(file.name, nrows=2)
df8: pd.DataFrame = pd.read_csv(file.name, dtype={"a": float, "b": int})
tfr1: TextFileReader = pd.read_csv(file.name, nrows=2, iterator=True, chunksize=3)
tfr2: TextFileReader = pd.read_csv(file.name, nrows=2, chunksize=1)
tfr3: TextFileReader = pd.read_csv(file.name, nrows=2, iterator=False, chunksize=1)
tfr4: TextFileReader = pd.read_csv(file.name, nrows=2, iterator=True)
def test_isna() -> None:
s = pd.Series([1, np.nan, 3.2])
check_series_result( | pd.isna(s) | pandas.isna |
import pandas as pd
from sqlalchemy import create_engine
from library import cf
import talib.abstract as ta
import pymysql.cursors
import numpy as np
from library.logging_pack import *
logger.debug("subindex์์!!!!")
pymysql.install_as_MySQLdb()
daily_craw_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_craw",
encoding='utf-8')
daily_buy_list_engine = create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_buy_list" ,
encoding='utf-8')
simul_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/simulator11",
encoding='utf-8')
min_craw_engine = create_engine("mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/min_craw",
encoding='utf-8')
stand_date = '20070903'
#๋ฐ์ดํฐ ๋ณํ
class subindex:
def __init__(self):
logger.debug("subindex ํจ์๋ก ๋ค์ด์๋ค!!")
def collecting(self):
co_sql = f"select TABLE_NAME FROM information_schema.tables WHERE table_schema = 'daily_craw'"
target_code = daily_craw_engine.execute(co_sql).fetchall()
num = len(target_code)
for i in range(num):
self.db_name = target_code[i][0]
self.db_name = self.db_name.replace("%", "%%")
self.collect_db()
print(self.db_name , "์ ๊ฐ์ ธ์จ๋ค!")
def collect_db(self):
# ๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ
sql = "select date,code,vol10,code_name,open,close,low,high,volume from daily_craw.`%s` where Date >= %s order by Date "
rows = daily_craw_engine.execute(sql%(self.db_name,stand_date)).fetchall()
three_s = pd.DataFrame(rows, columns=['date', 'code','vol10' ,'code_name','open' ,'close', 'low', 'high', 'volume'])
three_s = three_s.fillna(0)
# ๋ฐ์ดํฐ ๋ณํ
th_date = list(np.asarray(three_s['date'].tolist()))
th_date_np = np.array(th_date, dtype='f8')
th_close = list(np.asarray(three_s['close'].tolist()))
th_close_np = np.array(th_close, dtype='f8')
th_high = list(np.asarray(three_s['high'].tolist()))
th_high_np = np.array(th_high, dtype='f8')
th_low = list(np.asarray(three_s['low'].tolist()))
th_low_np = np.array(th_low, dtype='f8')
th_volume = list(np.asarray(three_s['volume'].tolist()))
th_volume_np = np.array(th_volume, dtype='f8')
th_open = list(np.asarray(three_s['open'].tolist()))
th_open_np = np.array(th_open, dtype='f8')
th_vol10 = list(np.asarray(three_s['vol10'].tolist()))
th_vol10_np = np.array(th_vol10, dtype='f8')
#์ฃผ๊ฐ๊ณ ๊ฐ์ ๊ฐ ๋ณ๋ํญ
th_diff =((three_s['high']-three_s['low'])/three_s['high'])*100
# 30์ผ๊ฐ ์ฃผ๊ฐ์ต์ ์ต๊ณ ๋ณ๋ํญ ํด๋
th_diff30 = th_diff.rolling(30).max()
# ๋ณด์กฐ์งํ ๊ณ์ฐ
th_cci = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 9)
th_cci60 = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 60)
##rsi
th_rsi = ta._ta_lib.RSI(th_close_np, 14)
th_rsi5 = ta._ta_lib.RSI(th_close_np, 5)
th_OBV = ta._ta_lib.OBV(th_close_np, th_volume_np)
th_macd, th_macd_signal, th_macd_hist = ta._ta_lib.MACD(th_close_np, fastperiod=12, slowperiod=26,
signalperiod=9)
th_stoch_slowk, th_stoch_slowd = ta._ta_lib.STOCH(th_high_np, th_low_np, th_close_np,
fastk_period=10, slowk_period=2, slowk_matype=0,
slowd_period=2, slowd_matype=0)
##์ฑ
์๋ฐ๋ผ 12์ผ์ ๊ธฐ์ค์ผ๋ก ๋ฐ๊ฟ
th_BBAND_U, th_BBAND_M, th_BBAND_L = ta._ta_lib.BBANDS(th_close_np, timeperiod=20, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_U14, th_BBAND_M14, th_BBAND_L14 = ta._ta_lib.BBANDS(th_close_np, timeperiod=14, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_WIDE = (th_BBAND_U-th_BBAND_L)/th_BBAND_M
th_BBAND_WIDE14 = (th_BBAND_U14 - th_BBAND_L14) / th_BBAND_M14
th_pb=(th_close_np-th_BBAND_L) / (th_BBAND_U-th_BBAND_L)
th_pb14 = (th_close_np - th_BBAND_L14) / (th_BBAND_U14 - th_BBAND_L14)
th_sar = ta._ta_lib.SAR(th_high_np, th_low_np,0.04,0.4)
th_ibs = (th_close_np -th_low_np)/(th_high_np-th_low_np)
th_dema5 = ta._ta_lib.DEMA(th_close_np, 5)
th_dema20 = ta._ta_lib.DEMA(th_close_np,20)
th_dema60 = ta._ta_lib.DEMA(th_close_np, 60)
th_tema5 = ta._ta_lib.TEMA(th_close_np,5)
th_tema20 = ta._ta_lib.TEMA(th_close_np, 20)
th_tema60 = ta._ta_lib.TEMA(th_close_np, 60)
#ema = ์ง์์ด๋ํ๊ท
th_ema5 = ta._ta_lib.EMA(th_close_np, 5)
th_ema20 = ta._ta_lib.EMA(th_close_np, 20)
th_ema60 = ta._ta_lib.EMA(th_close_np, 60)
th_ema112 = ta._ta_lib.EMA(th_close_np, 112)
th_ema224 = ta._ta_lib.EMA(th_close_np, 224)
th_ema448 = ta._ta_lib.EMA(th_close_np, 448)
th_ema448diff = ((th_close_np-th_ema448)/th_close_np * 100)
th_ema224diff = ((th_close_np-th_ema224)/th_close_np*100)
th_ema112diff = ((th_close_np-th_ema112)/th_close_np*100)
#ma ์ด๋ํ๊ท
th_ma112 = ta._ta_lib.MA(th_close_np, 112)
th_ma224 = ta._ta_lib.MA(th_close_np, 224)
th_ma448 = ta._ta_lib.MA(th_close_np, 448)
th_clo5diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 5)) / th_close_np * 100)
th_clo20diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 20)) / th_close_np * 100)
#dmi๊ฐ๋ค 14->11๋ก ๊ณ ์ณ์
th_pdi = ta._ta_lib.PLUS_DI(th_high_np,th_low_np,th_close_np, 11)
th_mdi = ta._ta_lib.MINUS_DI(th_high_np, th_low_np, th_close_np, 11)
th_dm = ta._ta_lib.PLUS_DM(th_high_np,th_low_np, 11)
th_adx = ta._ta_lib.ADX(th_high_np,th_low_np,th_close_np, 14)
th_adxr = ta._ta_lib.ADXR(th_high_np, th_low_np, th_close_np, 14)
th_obvsig9 =ta._ta_lib.MA(ta._ta_lib.OBV(th_close_np, th_volume_np),9)
#์๋ฆฌ์ ๋ณ๋์จ
th_williumr = ta._ta_lib.WILLR(th_high_np,th_low_np,th_close_np, 14)
th_mfi = ta._ta_lib.MFI(th_high_np,th_low_np,th_close_np,th_volume_np, 14)
#๊ฑฐ๋๋ ์ค์ค๋ ์ดํฐ๊ณต์ 10์ผ
th_ad = ((th_close_np-th_open_np)/(th_high_np-th_low_np) * th_volume_np / th_vol10_np*10)
# #์ผ์ค๊ฐ๋
th_ll = (2*th_close_np-th_high_np-th_low_np)/(th_high_np-th_low_np) * th_volume_np
# nan์ ๋ชจ๋ 0์ผ๋ก ์ ํ
np.nan_to_num(th_cci, copy=False)
np.nan_to_num(th_cci60, copy=False)
np.nan_to_num(th_rsi, copy=False)
np.nan_to_num(th_macd, copy=False)
np.nan_to_num(th_macd_signal, copy=False)
np.nan_to_num(th_macd_hist, copy=False)
np.nan_to_num(th_stoch_slowk, copy=False)
np.nan_to_num(th_stoch_slowd, copy=False)
np.nan_to_num(th_BBAND_L, copy=False)
np.nan_to_num(th_BBAND_M, copy=False)
np.nan_to_num(th_BBAND_U, copy=False)
np.nan_to_num(th_BBAND_L14, copy=False)
np.nan_to_num(th_BBAND_M14, copy=False)
np.nan_to_num(th_BBAND_U14, copy=False)
np.nan_to_num(th_OBV, copy=False)
np.nan_to_num(th_sar, copy=False)
np.nan_to_num(th_dema5, copy=False)
np.nan_to_num(th_dema20, copy=False)
np.nan_to_num(th_dema60, copy=False)
np.nan_to_num(th_tema5, copy=False)
np.nan_to_num(th_tema20, copy=False)
np.nan_to_num(th_tema60, copy=False)
np.nan_to_num(th_ema5, copy=False)
np.nan_to_num(th_ema112diff, copy=False)
np.nan_to_num(th_ema224diff, copy=False)
np.nan_to_num(th_ema448diff, copy=False)
np.nan_to_num(th_ema20, copy=False)
np.nan_to_num(th_ema60, copy=False)
np.nan_to_num(th_ema112, copy=False)
np.nan_to_num(th_ema224, copy=False)
np.nan_to_num(th_ema448, copy=False)
np.nan_to_num(th_ma112, copy=False)
np.nan_to_num(th_ma224, copy=False)
np.nan_to_num(th_ma448, copy=False)
np.nan_to_num(th_pdi, copy=False)
np.nan_to_num(th_mdi, copy=False)
np.nan_to_num(th_dm, copy=False)
np.nan_to_num(th_adx, copy=False)
np.nan_to_num(th_adxr, copy=False)
np.nan_to_num(th_williumr, copy=False)
np.nan_to_num(th_pb, copy=False)
np.nan_to_num(th_pb14, copy=False)
np.nan_to_num(th_BBAND_WIDE, copy=False)
np.nan_to_num(th_BBAND_WIDE14, copy=False)
np.nan_to_num(th_mfi, copy=False)
np.nan_to_num(th_ll, copy=False)
np.nan_to_num(th_ad, copy=False)
np.nan_to_num(th_rsi5, copy=False)
np.nan_to_num(th_ibs, copy=False)
np.nan_to_num(th_diff, copy=False)
np.nan_to_num(th_diff30, copy=False)
np.nan_to_num(th_obvsig9, copy=False)
# DataFrame ํ ํ๊ธฐ
df_ad = pd.DataFrame(th_ad, columns=['ad'])
df_cci = pd.DataFrame(th_cci, columns=['cci'])
df_cci60 = pd.DataFrame(th_cci, columns=['cci60'])
df_rsi5 = pd.DataFrame(th_rsi5, columns=['rsi5'])
df_rsi = pd.DataFrame(th_rsi, columns=['rsi'])
df_macd = pd.DataFrame(th_macd, columns=['macd'])
df_macd_signal = pd.DataFrame(th_macd_signal, columns=['macd_signal'])
df_macd_hist = pd.DataFrame(th_macd_hist, columns=['macd_hist'])
df_stoch_slowk = pd.DataFrame(th_stoch_slowk, columns=['stoch_slowk'])
df_stoch_slowd = pd.DataFrame(th_stoch_slowd, columns=['stoch_slowd'])
#๋ณผ๋ฆฐ์ ๋ฐด๋
df_BBand_U = pd.DataFrame(th_BBAND_U, columns=['BBand_U'])
df_BBand_M = pd.DataFrame(th_BBAND_M, columns=['BBand_M'])
df_BBand_L = pd.DataFrame(th_BBAND_L, columns=['BBand_L'])
df_BBand_U14 = pd.DataFrame(th_BBAND_U, columns=['BBand_U14'])
df_BBand_M14 = pd.DataFrame(th_BBAND_M, columns=['BBand_M14'])
df_BBand_L14 = pd.DataFrame(th_BBAND_L, columns=['BBand_L14'])
df_ibs = pd.DataFrame(th_ibs, columns=['ibs'])
df_pb14 = pd.DataFrame(th_pb, columns=['pb14'])
df_obvsig9 = pd.DataFrame(th_obvsig9, columns=['obvsig9'])
df_OBV = pd.DataFrame(th_OBV, columns=['OBV'])
df_sar = pd.DataFrame(th_sar, columns=['sar'])
# 2์ค์ข
ํฉ์ง์
df_dema5 = pd.DataFrame(th_dema5, columns=['dema5'])
df_dema20 = pd.DataFrame(th_dema20, columns=['dema20'])
df_dema60 = | pd.DataFrame(th_dema60, columns=['dema60']) | pandas.DataFrame |
import pandas as pd
import os
from configparser import ConfigParser
from datetime import datetime
import statistics
import numpy as np
import glob
from drop_bp_cords import define_movement_cols
#def analyze_process_movement(configini):
configini = r"Z:\DeepLabCut\DLC_extract\Troubleshooting\Trouble_040720\project_folder\project_config.ini"
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in = os.path.join(projectPath, 'csv', 'machine_results')
vidLogFilePath = os.path.join(projectPath, 'logs', 'video_info.csv')
vidinfDf = pd.read_csv(vidLogFilePath)
noAnimals = config.getint('process movements', 'no_of_animals')
Animal_1_Bp = config.get('process movements', 'animal_1_bp')
Animal_2_Bp = config.get('process movements', 'animal_2_bp')
pose_estimation_body_parts = config.get('create ensemble settings', 'pose_estimation_body_parts')
VideoNo_list, columnNames1, fileCounter = [], [], 0
########### logfile path ###########
log_fn = os.path.join(projectPath, 'logs', 'Movement_log_' + dateTime + '.csv')
if not os.path.exists(log_fn):
os.makedirs(log_fn)
columnNames = define_movement_cols(pose_estimation_body_parts=pose_estimation_body_parts,columnNames=columnNames1)
log_df = pd.DataFrame(columns=columnNames)
########### FIND CSV FILES ###########
filesFound = glob.glob(csv_dir_in + '/*.csv')
print('Processing movement data for ' + str(len(filesFound)) + ' files...')
frames_processed_list, meanVeloM1, medianVeloM1, totMoveM1 = ([], [], [], [])
if noAnimals == 2:
meanVeloM2, medianVeloM2, totMoveM2, mean_distance, median_distance = [], [], [], [], []
### CREATE SHIFTED COLUMNS
boutsDf['Shifted start'] = boutsDf['Start Time'].shift(-1)
########### SET MOVEMENT COLUMN IF USER DEFINED CONFIG ###########
if pose_estimation_body_parts == 'user_defined':
animal_1_movement_column = str('movement_' + Animal_1_Bp)
if noAnimals == 2:
animal_2_movement_column = str('movement_' + Animal_2_Bp)
distance_column = 'distance_' + str(Animal_1_Bp) + '_to_' + str(Animal_2_Bp)
bodyPart_distance_list, mean_bodypart_distance_list, median_bodypart_distance_list = ([], [], [])
for i in filesFound:
list_nose_movement_M1 = []
if noAnimals == 2:
centroid_distance_cm_list, nose_2_nose_dist_cm_list, list_nose_movement_M2 = ([], [], [])
frameCounter = 0
currentFile = i
currVidName = os.path.basename(currentFile)
videoSettings = vidinfDf.loc[vidinfDf['Video'] == str(currVidName.replace('.csv', ''))]
try:
fps = int(videoSettings['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
csv_df = | pd.read_csv(currentFile) | pandas.read_csv |
import warnings
warnings.simplefilter("ignore")
import json
import requests
import os
import geopandas as gpd
from osgeo import gdal
import pandas as pd
import rasterio
from rasterio import merge as riomerge
from pyproj import Transformer
import urllib
from shapely.geometry import Point
downloadfolder = f"{os.getcwd()}/"
outputfolder = os.path.join(downloadfolder, "output/")
downloadfolder_DSM = os.path.join(downloadfolder, "DSM_tif/")
downloadfolder_DTM = os.path.join(downloadfolder, "DTM_tif/")
be_shp_path = os.path.join(downloadfolder, "Kaartbladversnijdingen_NGI_numerieke_reeks_Shapefile/Shapefile/Kbl.shp")
BpnCapa_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCapa.shp")
BpnCapa_1_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCapa_1.shp")
BpnRebu_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnRebu.shp")
BpnRebu_1_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnRebu_1.shp")
BpnCabu_path = os.path.join(downloadfolder, "CadGIS_fiscaal_20210101_GewVLA_Shapefile/Shapefile/BpnCabu.shp")
basefiles_missing = False
if os.path.exists(be_shp_path) == False:
basefiles_missing = True
if os.path.exists(BpnCapa_path) == False:
basefiles_missing = True
if os.path.exists(BpnCapa_1_path) == False:
basefiles_missing = True
if os.path.exists(BpnRebu_path) == False:
basefiles_missing = True
if os.path.exists(BpnRebu_1_path) == False:
basefiles_missing = True
if os.path.exists(BpnCabu_path) == False:
basefiles_missing = True
if basefiles_missing:
print("Cannot run the program, download all needed files first.")
print("Readme has info on what files to download from government.")
quit()
cant_continue = True
while cant_continue:
my_adress = input("Enter an adress: ")
try:
expandbox = int(input("Enter number of meters to be added (100m-1000m, default=400m): "))
except ValueError:
expandbox = 400
if expandbox > 1000:
expandbox = 1000
if expandbox < 100:
expandbox = 100
url = "https://loc.geopunt.be/v4/Location?q=" + my_adress
r = requests.get(url)
try:
r_json = json.loads(r.text)["LocationResult"][0]
except IndexError:
print("that adress is not recognized...")
continue
bbox = r_json.get('BoundingBox', {})
lowerleft_x = bbox["LowerLeft"]["X_Lambert72"]
lowerleft_y = bbox["LowerLeft"]["Y_Lambert72"]
upperright_x = bbox["UpperRight"]["X_Lambert72"]
upperright_y = bbox["UpperRight"]["Y_Lambert72"]
print(f"Total size is {upperright_x - lowerleft_x + 2*expandbox}m, by {upperright_y - lowerleft_y + 2*expandbox}m")
if ((upperright_x - lowerleft_x + expandbox) < 1501) or ((upperright_y - lowerleft_y + expandbox) < 1501):
cant_continue = False
else:
print("That area is too large... Try again")
x_offset = 0
y_offset = 0
if len(json.loads(r.text)["LocationResult"]) == 1:
r_json = json.loads(r.text)["LocationResult"][0]
bbox = r_json.get('BoundingBox', {})
lowerleft_x = bbox["LowerLeft"]["X_Lambert72"] + x_offset
lowerleft_y = bbox["LowerLeft"]["Y_Lambert72"] + y_offset
upperright_x = bbox["UpperRight"]["X_Lambert72"] + x_offset
upperright_y = bbox["UpperRight"]["Y_Lambert72"] + y_offset
else:
print("Addres not found, please check for typos etc...")
# Check in what NGI map the adress coordinates are located
be_shp = gpd.read_file(be_shp_path)
lowerleft = Point(lowerleft_x - expandbox, lowerleft_y - expandbox)
upperleft = Point(lowerleft_x - expandbox, upperright_y + expandbox)
lowerright = Point(upperright_x + expandbox, lowerleft_y - expandbox)
upperright = Point(upperright_x + expandbox, upperright_y + expandbox)
lowerleft_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: lowerleft.within(x)) == True]["CODE"].tolist()
upperleft_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: upperleft.within(x)) == True]["CODE"].tolist()
lowerright_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: lowerright.within(x)) == True]["CODE"].tolist()
upperright_lst = be_shp.loc[be_shp["geometry"].apply(lambda x: upperright.within(x)) == True]["CODE"].tolist()
if len(lowerleft_lst) == 1 and len(upperleft_lst) == 1 and len(lowerright_lst) == 1 and len(upperright_lst) == 1:
print("Geometry points all within unique NGI maps --> OK")
else:
print("Geometry points NGI map error, cannot process this location (flemish gov NGI map seems incorrect)")
print("Trying to continue anyway...")
mapnumbers = list(dict.fromkeys((upperleft_lst[0], upperright_lst[0], lowerleft_lst[0], lowerright_lst[0])))
if len(mapnumbers) == 1:
print(f"All bounding box points are in the same Ngi map with Nr: {lowerleft_lst[0]}")
else:
print("The property is ovelapping multiple Ngi maps:")
print("maps top: ", upperleft_lst[0], upperright_lst[0])
print("maps bottom: ", lowerleft_lst[0], lowerright_lst[0])
print("creating Tiff coutouts...")
def get_dsmdtm_path(dsmdtm, thismap) -> str:
dsmdtm = dsmdtm.upper()
myfile = f"DHMVII{dsmdtm}RAS1m_k{thismap.zfill(2)}/GeoTIFF/DHMVII{dsmdtm}RAS1m_k{thismap.zfill(2)}.tif"
myfilefullpath = f"{downloadfolder}{dsmdtm}_tif/{myfile}"
if os.path.exists(myfilefullpath) == False:
print("Cannot find the tif file you requested, missing file is:")
print(myfilefullpath)
quit()
else:
return myfile
def create_tif_cutouts(thismap):
geotif_DSM_file = os.path.join(downloadfolder_DSM, get_dsmdtm_path("DSM", thismap))
resized_DSM_geotif = os.path.join(outputfolder, f"output_DSM{thismap}.tif")
geotif_DTM_file = os.path.join(downloadfolder_DTM, get_dsmdtm_path("DTM", thismap))
resized_DTM_geotif = os.path.join(outputfolder, f"output_DTM{thismap}.tif")
gdal.Translate(resized_DSM_geotif, geotif_DSM_file, projWin=[lowerleft_x - expandbox, upperright_y + expandbox, upperright_x + expandbox, lowerleft_y - expandbox])
crop_white_border(resized_DSM_geotif)
gdal.Translate(resized_DTM_geotif, geotif_DTM_file, projWin=[lowerleft_x - expandbox, upperright_y + expandbox, upperright_x + expandbox, lowerleft_y - expandbox])
crop_white_border(resized_DTM_geotif)
# crop the image borders if they have white values
def crop_white_border(my_geotif_file):
with rasterio.open(my_geotif_file) as src:
window = rasterio.windows.get_data_window(src.read(1, masked=True))
# window = Window(col_off=13, row_off=3, width=757, height=711)
kwargs = src.meta.copy()
kwargs.update({
'height': window.height,
'width': window.width,
'transform': rasterio.windows.transform(window, src.transform)})
with rasterio.open(my_geotif_file, 'w', **kwargs) as dst:
dst.write(src.read(window=window))
def createfinal(dsmdtm, mylist):
with rasterio.open(mylist[0]) as src:
meta = src.meta.copy()
# The merge function returns a single array and the affine transform info
arr, out_trans = riomerge.merge(mylist)
meta.update({
"driver": "GTiff",
"height": arr.shape[1],
"width": arr.shape[2],
"transform": out_trans
})
# Write the mosaic raster to disk
with rasterio.open(os.path.join(outputfolder, f"output_{dsmdtm}.tif"), "w", **meta) as dest:
dest.write(arr)
dsm_list = []
dtm_list = []
for thismap in mapnumbers:
create_tif_cutouts(thismap)
dsm_list.append(os.path.join(outputfolder, f"output_DSM{thismap}.tif"))
dtm_list.append(os.path.join(outputfolder, f"output_DTM{thismap}.tif"))
createfinal("DSM", dsm_list)
createfinal("DTM", dtm_list)
print("creating xyz data of the surroundings for blender...")
# create xyz dataframes
resized_DSM_geotif = os.path.join(outputfolder, "output_DSM.tif")
xyz_DSM_file = os.path.join(outputfolder, "output_DSM.xyz")
resized_DTM_geotif = os.path.join(outputfolder, "output_DTM.tif")
xyz_DTM_file = os.path.join(outputfolder, "output_DTM.xyz")
geo_DSM_resized = gdal.Open(resized_DSM_geotif)
gdal.Translate(xyz_DSM_file, geo_DSM_resized)
df_dsm = pd.read_csv(xyz_DSM_file, sep=" ", header=None)
df_dsm.columns = ["x", "y", "z"]
geo_DTM_resized = gdal.Open(resized_DTM_geotif)
gdal.Translate(xyz_DTM_file, geo_DTM_resized)
df_dtm = pd.read_csv(xyz_DTM_file, sep=" ", header=None)
df_dtm.columns = ["x", "y", "z"]
df_final = | pd.concat([df_dsm, df_dtm]) | pandas.concat |
import sys
import numpy as np
import pandas as pd
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from pandas.util.testing import assert_series_equal
import pytest
from test_pvsystem import sam_data, pvsyst_module_params
from conftest import fail_on_pvlib_version, requires_scipy, requires_tables
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_native_snl_ac_system(sam_data):
module = 'Canadian_Solar_CS5P_220M'
module_parameters = sam_data['cecmod'][module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_system(sam_data, pvsyst_module_params):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def location():
return Location(32.2, -111, altitude=700)
@pytest.fixture
def weather():
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]},
index=times)
return weather
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
with pytest.warns(pvlibDeprecationWarning):
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather(system, location, weather, mocker):
mc = ModelChain(system, location)
m = mocker.spy(system, 'sapm_celltemp')
weather['wind_speed'] = 5
weather['temp_air'] = 10
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
# assert_called_once_with cannot be used with series, so need to use
# assert_series_equal on call_args
assert_series_equal(m.call_args[0][1], weather['wind_speed']) # wind
assert_series_equal(m.call_args[0][2], weather['temp_air']) # temp
assert not mc.ac.empty
def test_run_model_tracker(system, location, weather, mocker):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mocker.spy(system, 'singleaxis')
mc = ModelChain(system, location)
mc.run_model(weather.index, weather=weather)
assert system.singleaxis.call_count == 1
assert (mc.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth',
'surface_tilt']).all()
assert mc.ac[0] > 0
assert np.isnan(mc.ac[1])
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('desoto', marks=requires_scipy),
pytest.param('pvsyst', marks=requires_scipy),
pytest.param('singlediode', marks=requires_scipy),
'pvwatts_dc'])
def test_infer_dc_model(system, cec_dc_snl_ac_system, pvsyst_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location, dc_model,
weather, mocker):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'desoto': cec_dc_snl_ac_system,
'pvsyst': pvsyst_dc_snl_ac_system,
'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
dc_model_function = {'sapm': 'sapm',
'cec': 'calcparams_cec',
'desoto': 'calcparams_desoto',
'pvsyst': 'calcparams_pvsyst',
'singlediode': 'calcparams_desoto',
'pvwatts_dc': 'pvwatts_dc'}
system = dc_systems[dc_model]
# remove Adjust from model parameters for desoto, singlediode
if dc_model in ['desoto', 'singlediode']:
system.module_parameters.pop('Adjust')
m = mocker.spy(system, dc_model_function[dc_model])
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.dc, (pd.Series, pd.DataFrame))
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('cec_native', marks=requires_scipy)])
def test_infer_spectral_model(location, system, cec_dc_snl_ac_system,
cec_dc_native_snl_ac_system, dc_model):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'cec_native': cec_dc_native_snl_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location,
orientation_strategy='None', aoi_model='physical')
assert isinstance(mc, ModelChain)
def test_dc_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'poadc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model=poadc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert not mc.ac.empty
def acdc(mc):
mc.ac = mc.dc
@pytest.mark.parametrize('ac_model', [
'snlinverter', pytest.param('adrinverter', marks=requires_scipy),
'pvwatts'])
def test_ac_models(system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, weather, mocker):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
if ac_model == 'pvwatts':
ac_model += '_ac'
m = mocker.spy(system, ac_model)
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, pd.Series)
assert not mc.ac.empty
assert mc.ac[1] < 1
def test_ac_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'acdc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model=acdc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert_series_equal(mc.ac, mc.dc)
assert not mc.ac.empty
def constant_aoi_loss(mc):
mc.aoi_modifier = 0.9
@pytest.mark.parametrize('aoi_model, method', [
('sapm', 'sapm_aoi_loss'), ('ashrae', 'ashraeiam'),
('physical', 'physicaliam')])
def test_aoi_models(system, location, aoi_model, method, weather, mocker):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
m = mocker.spy(system, method)
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, pd.Series)
assert not mc.ac.empty
assert mc.ac[0] > 150 and mc.ac[0] < 200
assert mc.ac[1] < 1
def test_aoi_model_no_loss(system, location, weather):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert mc.aoi_modifier == 1.0
assert not mc.ac.empty
assert mc.ac[0] > 150 and mc.ac[0] < 200
assert mc.ac[1] < 1
def test_aoi_model_user_func(system, location, weather, mocker):
m = mocker.spy(sys.modules[__name__], 'constant_aoi_loss')
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=constant_aoi_loss, spectral_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert mc.aoi_modifier == 0.9
assert not mc.ac.empty
assert mc.ac[0] > 140 and mc.ac[0] < 200
assert mc.ac[1] < 1
def constant_spectral_loss(mc):
mc.spectral_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('spectral_model', [
'sapm', 'first_solar', 'no_loss', constant_spectral_loss
])
def test_spectral_models(system, location, spectral_model, weather):
# add pw to weather dataframe
weather['precipitable_water'] = [0.3, 0.5]
mc = ModelChain(system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model=spectral_model)
spectral_modifier = mc.run_model(times=weather.index,
weather=weather).spectral_modifier
assert isinstance(spectral_modifier, (pd.Series, float, int))
def constant_losses(mc):
mc.losses = 0.9
mc.dc *= mc.losses
def test_losses_models_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
age = 1
pvwatts_dc_pvwatts_ac_system.losses_parameters = dict(age=age)
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='pvwatts')
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
m.assert_called_with(age=age)
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert not mc.ac.empty
# check that we're applying correction to dc
# GH 696
dc_with_loss = mc.dc
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
mc.run_model(weather.index, weather=weather)
assert not np.allclose(mc.dc, dc_with_loss, equal_nan=True)
def test_losses_models_ext_def(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'constant_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model=constant_losses)
mc.run_model(weather.index, weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert mc.losses == 0.9
assert not mc.ac.empty
def test_losses_models_no_loss(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
assert mc.losses_model == mc.no_extra_losses
mc.run_model(weather.index, weather=weather)
assert m.call_count == 0
assert mc.losses == 1
def test_invalid_dc_model_params(system, cec_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location):
kwargs = {'dc_model': 'sapm', 'ac_model': 'snlinverter',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temp_model': 'sapm', 'losses_model': 'no_loss'}
system.module_parameters.pop('A0') # remove a parameter
with pytest.raises(ValueError):
ModelChain(system, location, **kwargs)
kwargs['dc_model'] = 'singlediode'
cec_dc_snl_ac_system.module_parameters.pop('a_ref') # remove a parameter
with pytest.raises(ValueError):
ModelChain(cec_dc_snl_ac_system, location, **kwargs)
kwargs['dc_model'] = 'pvwatts'
kwargs['ac_model'] = 'pvwatts'
pvwatts_dc_pvwatts_ac_system.module_parameters.pop('pdc0')
with pytest.raises(ValueError):
ModelChain(pvwatts_dc_pvwatts_ac_system, location, **kwargs)
@pytest.mark.parametrize('model', [
'dc_model', 'ac_model', 'aoi_model', 'spectral_model', 'losses_model',
'temp_model', 'losses_model'
])
def test_invalid_models(model, system, location):
kwargs = {'dc_model': 'pvwatts', 'ac_model': 'pvwatts',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temp_model': 'sapm', 'losses_model': 'no_loss'}
kwargs[model] = 'invalid'
with pytest.raises(ValueError):
ModelChain(system, location, **kwargs)
def test_bad_get_orientation():
with pytest.raises(ValueError):
modelchain.get_orientation('bad value')
@fail_on_pvlib_version('0.7')
def test_deprecated_07():
# explicit system creation call because fail_on_pvlib_version
# does not support decorators.
# does not matter what the parameters are, just fake it until we make it
module_parameters = {'R_sh_ref': 1, 'a_ref': 1, 'I_o_ref': 1,
'alpha_sc': 1, 'I_L_ref': 1, 'R_s': 1}
system = PVSystem(module_parameters=module_parameters)
with pytest.warns(pvlibDeprecationWarning):
ModelChain(system, location,
dc_model='singlediode', # this should fail after 0.7
aoi_model='no_loss', spectral_model='no_loss',
ac_model='snlinverter')
@requires_tables
@fail_on_pvlib_version('0.7')
def test_deprecated_clearsky_07():
# explicit system creation call because fail_on_pvlib_version
# does not support decorators.
system = PVSystem(module_parameters={'pdc0': 1, 'gamma_pdc': -0.003})
location = Location(32.2, -110.9)
mc = ModelChain(system, location, dc_model='pvwatts', ac_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
with pytest.warns(pvlibDeprecationWarning):
mc.prepare_inputs(times=times)
@requires_scipy
def test_basic_chain_required(sam_data):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters[
'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
with pytest.raises(ValueError):
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
altitude=altitude)
@requires_scipy
def test_basic_chain_alt_az(sam_data):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters[
'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth)
expected = pd.Series(np.array([ 115.40352679, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
@requires_scipy
def test_basic_chain_strategy(sam_data):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters[
'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(
times, latitude, longitude, module_parameters, inverter_parameters,
orientation_strategy='south_at_latitude_tilt', altitude=altitude)
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
@requires_scipy
def test_basic_chain_altitude_pressure(sam_data):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
inverters = sam_data['cecinverter']
inverter_parameters = inverters[
'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
pressure=93194)
expected = pd.Series(np.array([ 116.595664887, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
altitude=altitude)
expected = pd.Series(np.array([ 116.595664887, -2.00000000e-02]),
index=times)
| assert_series_equal(ac, expected, check_less_precise=1) | pandas.util.testing.assert_series_equal |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = pd.read_sql(sql_query_assets, self.conn)
assets.set_index(['ProjectName', 'AssetName'], inplace=True)
assets.sort_index(axis=0, inplace=True)
if turbines_only:
assets = assets.loc[assets.AssetType == 'Turbine', :]
assets.drop('AssetType', axis=1, inplace=True)
if project is not None:
assets = assets.loc[project, :]
return assets
def operational_projects(self):
'''Returns:
List of all projects within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
padre_project_query = """
SELECT [ProjectKey]
,[ProjectName]
,[State]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project]
WHERE technology = 'Wind'"""
projects = pd.read_sql(padre_project_query, self.conn)
projects.set_index('ProjectName', inplace=True)
return projects
def turbine_categorizations(self, category_type='EDF'):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
padre_cetegory_query = """
SELECT [CategoryKey]
,[StringName]
FROM [PADREScada].[dbo].[Categories]
WHERE CategoryType = '%s'""" %category_type
categories = pd.read_sql(padre_cetegory_query, self.conn)
categories.set_index('CategoryKey', inplace=True)
return categories
def QCd_turbine_data(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT [TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Ambient_Temperature]
,[IEC Category]
,[EDF Category]
,[Expected Power (kW)]
,[Expected Energy (kWh)]
,[EnergyDelta (kWh)]
,[EnergyDelta (MWh)]
FROM [PADREScada].[dbo].[vw_10mDataBI]
WITH (NOLOCK)
WHERE [assetkey] = %i''' %asset_key
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_data(self, asset_key, start_date=None, end_date=None):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Nacelle_Direction]
,[Average_Blade_Pitch]
,[Minimum_Blade_Pitch]
,[Maximum_Blade_Pitch]
,[Average_Rotor_Speed]
,[Minimum_Rotor_Speed]
,[Maximum_Rotor_Speed]
,[Average_Ambient_Temperature]
,coalesce([IECStringKey_Manual]
,[IECStringKey_FF]
,[IECStringKey_Default]) IECKey
,coalesce([EDFStringKey_Manual]
,[EDFStringKey_FF]
,[EDFStringKey_Default]) EDFKey
,coalesce([State_and_Fault_Manual]
,[State_and_Fault_FF]
,[State_and_Fault]) State_and_Fault
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {} {}'''.format(asset_key, return_between_date_query_string(start_date, end_date))
turbine_data = | pd.read_sql(turbine_data_query, self.conn) | pandas.read_sql |
# encoding: utf-8
import itertools
import random
from datetime import date
from typing import List, Tuple
import pandas as pd
class DataFrameMock:
@staticmethod
def df_generic(sample_size):
"""
Create a generic DataFrame with ``sample_size`` samples and 2 columns.
The 2 columns of the returned DataFrame contain numerical and string
values, respectively.
Parameters
----------
sample_size:
Number of samples in the returned DataFrame.
Returns
-------
pd.DataFrame
Pandas DataFrame instance with ``sample_size`` samples and 2 columns:
one with numerical values and the other with string values only.
"""
return pd.DataFrame(
{
"metadata_num_col": list(range(sample_size)),
"metadata_str_col": [f"value_{i}" for i in range(sample_size)],
"exam_num_col_0": list(range(sample_size)),
"exam_num_col_1": list(range(sample_size)),
"exam_str_col_0": [f"value_{i}" for i in range(sample_size)],
}
)
@staticmethod
def df_many_nans(nan_ratio: float, n_columns: int) -> pd.DataFrame:
"""
Create pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of
NaNs.
DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns
contain less than ``nan_ratio`` ratio of NaNs.
Parameters
----------
nan_ratio : float
Ratio of NaNs that will be present in ``n_columns`` of the DataFrame.
n_columns : int
Number of columns that will contain ``nan_ratio`` ratio of NaNs.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of NaNs
and 5 columns with a lower ratio of NaNs.
"""
many_nan_dict = {}
sample_count = 100
# Create n_columns columns with NaN
nan_sample_count = int(sample_count * nan_ratio)
for i in range(n_columns):
many_nan_dict[f"nan_{i}"] = [pd.NA] * nan_sample_count + [1] * (
sample_count - nan_sample_count
)
# Create not_nan_columns with less than nan_ratio ratio of NaNs
not_nan_columns = 5
for j in range(not_nan_columns):
nan_ratio_per_column = nan_ratio - 0.01 * (j + 1)
# If nan_ratio_per_column < 0, set 0 samples to NaN (to avoid negative
# sample counts)
if nan_ratio_per_column < 0:
nan_sample_count = 0
else:
nan_sample_count = int(sample_count * nan_ratio_per_column)
many_nan_dict[f"not_nan_{j}"] = [pd.NA] * nan_sample_count + [1] * (
sample_count - nan_sample_count
)
return pd.DataFrame(many_nan_dict)
@staticmethod
def df_nans_filled(columns: List[str]) -> pd.DataFrame:
"""Starting from the df returned by ``.df_many_nans``, set ``columns`` to 1s.
Parameters
----------
columns : List[str]
Name of the columns to set to 1s
Returns
-------
pd.DataFrame
DataFrame with the ``columns`` set to 1s
"""
df = DataFrameMock.df_many_nans(nan_ratio=0.5, n_columns=3)
for column in columns:
df[column] = pd.Series(pd.Series([1] * 100))
return df
@staticmethod
def df_same_value(n_columns: int) -> pd.DataFrame:
"""
Create pandas DataFrame with ``n_columns`` containing the same repeated value.
DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns
contain different valid values (and a variable count of a repeated value).
Parameters
----------
n_columns : int
Number of columns that will contain the same repeated value.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``n_columns`` containing the same repeated value
and 5 columns with some different values.
"""
random.seed(42)
constant_value_dict = {}
sample_count = 100
# Create n_columns columns with same repeated value
for i in range(n_columns):
constant_value_dict[f"same_{i}"] = [4] * sample_count
# Create not_constant_columns with repeated values and random values
not_constant_columns = 5
for j in range(not_constant_columns):
constant_value_sample_count = int(sample_count * (1 - 0.1 * (j + 1)))
constant_value_dict[f"not_same_{j}"] = [4] * constant_value_sample_count + [
random.random()
for _ in range(sample_count - constant_value_sample_count)
]
return pd.DataFrame(constant_value_dict)
@staticmethod
def df_trivial(n_columns: int) -> pd.DataFrame:
"""
Create pandas DataFrame with ``n_columns`` containing trivial values.
Half of the trivial columns contains lots of NaN, and the other half contains
repeated values.
DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns
contain random values and a variable count of a repeated value and NaNs.
Parameters
----------
n_columns : int
Number of columns that will contain the same repeated value.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``n_columns`` containing the same repeated value
and 5 columns with some different values.
"""
random.seed(42)
trivial_dict = {}
sample_count = 100
nan_columns = n_columns // 2
constant_value_columns = n_columns - nan_columns
# Create half of n_columns columns with NaN
for i in range(nan_columns):
trivial_dict[f"nan_{i}"] = [pd.NA] * sample_count
# Create half of n_columns columns with repeated value
for j in range(constant_value_columns):
trivial_dict[f"same_{j}"] = [4] * sample_count
# Create 5 more columns with valid values (with NaN, repeated and random values)
valid_values_columns = 5
for k in range(valid_values_columns):
constant_value_sample_count = int(sample_count * (1 - 0.05 * (k + 1)) / 2)
nan_sample_count = int(sample_count * (1 - 0.05 * (k + 1)) / 2)
random_samples = [
random.random() * 100
for _ in range(
sample_count - constant_value_sample_count - nan_sample_count
)
]
trivial_dict[f"not_nan_not_same_{k}"] = (
[4] * constant_value_sample_count
+ [pd.NA] * nan_sample_count
+ random_samples
)
return pd.DataFrame(trivial_dict)
@staticmethod
def df_multi_type(sample_size: int) -> pd.DataFrame:
"""
Create pandas DataFrame with columns containing values of different types.
The returned DataFrame has a number of rows equal to the biggest
value V such that:
a) V < ``sample_size``
b) V is divisible by 10.
The DataFrame has columns as follows:
1. One column containing boolean values
2. One column containing string values
3. One column containing string repeated values ("category" dtype)
4. One column containing string values (it is meant to simulate metadata)
5. One column containing numerical values
6. One column containing numerical repeated values ("category" dtype)
7. One column containing datetime values
8. One column containing 'interval' typed values
9. One column containing values of mixed types
10. One column containing repeated values
11. One column containing NaN values (+ 1 numerical value)
Parameters
----------
sample_size: int
Number of samples that the returned DataFrame will contain.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``sample_size`` samples and 5 columns
containing values of different types.
"""
random.seed(42)
# Get only the part that is divisible by 2 and 5
sample_size = sample_size // 10 * 10
bool_col = [True, False, True, True, False] * (sample_size // 5)
random.shuffle(bool_col)
df_multi_type_dict = {
"metadata_num_col": list(range(sample_size)),
"bool_col": bool_col,
"string_col": [f"value_{i}" for i in range(sample_size)],
"str_forced_categorical_col": pd.Series(
["category_0", "category_1", "category_2", "category_3", "category_4"]
* (sample_size // 5),
dtype="category",
),
"str_categorical_col": pd.Series(
["category_0", "category_1", "category_2", "category_3", "category_4"]
* (sample_size // 5)
),
"int_forced_categorical_col": pd.Series(
[0, 1, 2, 3, 4] * (sample_size // 5), dtype="category"
),
"int_categorical_col": pd.Series([0, 1, 2, 3, 4] * (sample_size // 5)),
"float_col": [0.05 * i for i in range(sample_size)],
"int_col": list(range(sample_size)),
"datetime_col": [date(2000 + i, 8, 1) for i in range(sample_size)],
"interval_col": pd.arrays.IntervalArray(
[pd.Interval(0, i) for i in range(sample_size)],
),
"mixed_type_col": list(range(sample_size // 2))
+ [f"value_{i}" for i in range(sample_size // 2)],
"same_col": [2] * sample_size,
"nan_col": [pd.NA] * (sample_size - 1) + [3],
}
return pd.DataFrame(df_multi_type_dict)
@staticmethod
def df_column_names_by_type() -> pd.DataFrame:
"""
Create DataFrame sample that contains column name and types of a generic
DataFrame.
DataFrame has 11 rows and 2 columns. One column called "col_name" contains
some strings (that represent the column names of another DataFrame sample
"df2"). Another column called "col_type" contains some possible outputs from
``trousse.dataset._find_single_column_type`` function
that describe the type of values contained in the column of "df2".
Returns
-------
pd.DataFrame
Pandas DataFrame with 2 columns containing strings and types respectively.
"""
return pd.DataFrame(
[
{"col_name": "bool_col_0", "col_type": "bool_col"},
{"col_name": "bool_col_1", "col_type": "bool_col"},
{"col_name": "string_col_0", "col_type": "string_col"},
{"col_name": "string_col_1", "col_type": "string_col"},
{"col_name": "string_col_2", "col_type": "string_col"},
{"col_name": "numerical_col_0", "col_type": "numerical_col"},
{"col_name": "other_col_0", "col_type": "other_col"},
{"col_name": "mixed_type_col_0", "col_type": "mixed_type_col"},
{"col_name": "mixed_type_col_1", "col_type": "mixed_type_col"},
{"col_name": "mixed_type_col_2", "col_type": "mixed_type_col"},
{"col_name": "mixed_type_col_3", "col_type": "mixed_type_col"},
]
)
@staticmethod
def df_categorical_cols(sample_size: int) -> pd.DataFrame:
"""
Create pandas DataFrame with columns containing categorical values
The returned DataFrame will contain ``sample_size`` samples and 12 columns.
The columns will be distinguished based on value types (sample_type) and
number of unique values (unique_value_count).
Parameters
----------
sample_size: int
Number of samples in the returned DataFrame
Returns
-------
pd.DataFrame
Pandas DataFrame containing ``sample_size`` samples and 12 columns with
various sample types and number of unique values
"""
random.seed(42)
unique_value_counts = (3, 5, 8, 40)
categ_cols_dict = {}
mixed_list = [f"string_{i}" for i in range(20)] + [i * 20 for i in range(21)]
random.shuffle(mixed_list)
value_per_sample_type = {
"numerical": [i * 20 for i in range(41)],
"string": [f"string_{i}" for i in range(41)],
"mixed": mixed_list,
}
for unique_value_count, sample_type in itertools.product(
unique_value_counts, value_per_sample_type.keys()
):
if sample_size < unique_value_count:
# Cannot have more unique values than samples
unique_value_count = sample_size
# how many times every value will be repeated to fill up the column
repetitions_per_value = sample_size // unique_value_count
# This is to always have the same sample_size
last_value_repetitions = sample_size - repetitions_per_value * (
unique_value_count - 1
)
# Create list of lists containing the same repeated values (category)
repeated_categories_list = []
for i in range(unique_value_count - 1):
repeated_categories_list.append(
[value_per_sample_type[sample_type][i]] * repetitions_per_value
)
repeated_categories_list.append(
[value_per_sample_type[sample_type][unique_value_count]]
* last_value_repetitions
)
# Combine lists into one column of the DataFrame
categ_cols_dict[f"{sample_type}_{unique_value_count}"] = list(
itertools.chain.from_iterable(repeated_categories_list)
)
return | pd.DataFrame(categ_cols_dict) | pandas.DataFrame |
"""
The pymaf submodule is designed for working with MAF files. It implements
``pymaf.MafFrame`` which stores MAF data as ``pandas.DataFrame`` to allow
fast computation and easy manipulation. The ``pymaf.MafFrame`` class also
contains many useful plotting methods such as ``MafFrame.plot_oncoplot`` and
``MafFrame.plot_summary``. The submodule strictly adheres to the
standard `MAF specification
<https://docs.gdc.cancer.gov/Data/File_Formats/MAF_Format/>`_.
A typical MAF file contains many columns ranging from gene symbol to
protein change. However, most of the analysis in pymaf uses the
following columns:
+-----+------------------------+----------------------+-------------------------------+
| No. | Name | Description | Examples |
+=====+========================+======================+===============================+
| 1 | Hugo_Symbol | HUGO gene symbol | 'TP53', 'Unknown' |
+-----+------------------------+----------------------+-------------------------------+
| 2 | Chromosome | Chromosome name | 'chr1', '1', 'X' |
+-----+------------------------+----------------------+-------------------------------+
| 3 | Start_Position | Start coordinate | 119031351 |
+-----+------------------------+----------------------+-------------------------------+
| 4 | End_Position | End coordinate | 44079555 |
+-----+------------------------+----------------------+-------------------------------+
| 5 | Variant_Classification | Translational effect | 'Missense_Mutation', 'Silent' |
+-----+------------------------+----------------------+-------------------------------+
| 6 | Variant_Type | Mutation type | 'SNP', 'INS', 'DEL' |
+-----+------------------------+----------------------+-------------------------------+
| 7 | Reference_Allele | Reference allele | 'T', '-', 'ACAA' |
+-----+------------------------+----------------------+-------------------------------+
| 8 | Tumor_Seq_Allele1 | First tumor allele | 'A', '-', 'TCA' |
+-----+------------------------+----------------------+-------------------------------+
| 9 | Tumor_Seq_Allele2 | Second tumor allele | 'A', '-', 'TCA' |
+-----+------------------------+----------------------+-------------------------------+
| 10 | Tumor_Sample_Barcode | Sample ID | 'TCGA-AB-3002' |
+-----+------------------------+----------------------+-------------------------------+
| 11 | Protein_Change | Protein change | 'p.L558Q' |
+-----+------------------------+----------------------+-------------------------------+
It is also recommended to include additional custom columns such as variant
allele frequecy (VAF) and transcript name.
If sample annotation data are available for a given MAF file, use
the :class:`common.AnnFrame` class to import the data.
There are nine nonsynonymous variant classifcations that pymaf primarily
uses: Missense_Mutation, Frame_Shift_Del, Frame_Shift_Ins, In_Frame_Del,
In_Frame_Ins, Nonsense_Mutation, Nonstop_Mutation, Splice_Site, and
Translation_Start_Site.
"""
import re
import copy
import warnings
import itertools
from . import pyvcf, common
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from matplotlib_venn import venn2, venn3
from scipy.stats import fisher_exact
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
CHROM_LENGTHS = {
'hg18': [
247249719, 242951149, 199501827, 191273063, 180857866, 170899992,
158821424, 146274826, 140273252, 135374737, 134452384, 132349534,
114142980, 106368585, 100338915, 88827254, 78774742, 76117153,
63811651, 62435964, 46944323, 49691432, 154913754, 57772954
],
'hg19': [
249250621, 243199373, 198022430, 191154276, 180915260, 171115067,
159138663, 146364022, 141213431, 135534747, 135006516, 133851895,
115169878, 107349540, 102531392, 90354753, 81195210, 78077248,
59128983, 63025520, 48129895, 51304566, 155270560, 59373566
],
'hg38': [
248956422, 242193529, 198295559, 190214555, 181538259, 170805979,
159345973, 145138636, 138394717, 133797422, 135086622, 133275309,
114364328, 107043718, 101991189, 90338345, 83257441, 80373285,
58617616, 64444167, 46709983, 50818468, 156040895, 57227415
],
}
COMMON_COLUMNS = [
'Hugo_Symbol', 'Entrez_Gene_Id', 'Center', 'NCBI_Build', 'Chromosome',
'Start_Position', 'End_Position', 'Strand', 'Variant_Classification',
'Variant_Type', 'Reference_Allele', 'Tumor_Seq_Allele1',
'Tumor_Seq_Allele2', 'Tumor_Sample_Barcode', 'Protein_Change'
]
# Below is the list of calculated variant consequences from Ensembl VEP:
# https://m.ensembl.org/info/genome/variation/prediction/predicted_data.html
# (accessed on 2021-05-31)
#
# Note that both frameshift_variant and protein_altering_variant require
# additional information to find their correct Variant_Classification.
VEP_CONSEQUENCES = {
'transcript_ablation': 'Splice_Site',
'splice_acceptor_variant': 'Splice_Site',
'splice_donor_variant': 'Splice_Site',
'stop_gained': 'Nonsense_Mutation',
'frameshift_variant': 'AMBIGUOUS',
'stop_lost': 'Nonstop_Mutation',
'start_lost': 'Translation_Start_Site',
'transcript_amplification': 'Intron',
'inframe_insertion': 'In_Frame_Ins',
'inframe_deletion': 'In_Frame_Del',
'missense_variant': 'Missense_Mutation',
'protein_altering_variant': 'AMBIGUOUS',
'splice_region_variant': 'Splice_Region',
'incomplete_terminal_codon_variant': 'Silent',
'start_retained_variant': 'Silent',
'stop_retained_variant': 'Silent',
'synonymous_variant': 'Silent',
'coding_sequence_variant': 'Missense_Mutation',
'mature_miRNA_variant': 'RNA',
'5_prime_UTR_variant': "5'UTR",
'3_prime_UTR_variant': "3'UTR",
'non_coding_transcript_exon_variant': 'RNA',
'intron_variant': 'Intron',
'NMD_transcript_variant': 'Silent',
'non_coding_transcript_variant': 'RNA',
'upstream_gene_variant': "5'Flank",
'downstream_gene_variant': "3'Flank",
'TFBS_ablation': 'Targeted_Region',
'TFBS_amplification': 'Targeted_Region',
'TF_binding_site_variant': 'IGR',
'regulatory_region_ablation': 'Targeted_Region',
'regulatory_region_amplification': 'Targeted_Region',
'feature_elongation': 'Targeted_Region',
'regulatory_region_variant': 'IGR',
'feature_truncation': 'Targeted_Region',
'intergenic_variant': 'IGR',
}
VARCLS_LIST = [
"3'Flank",
"3'UTR",
"5'Flank",
"5'UTR",
'De_novo_Start_InFrame',
'De_novo_Start_OutOfFrame',
'Frame_Shift_Del',
'Frame_Shift_Ins',
'IGR',
'In_Frame_Del',
'In_Frame_Ins',
'Intron',
'Missense_Mutation',
'Nonsense_Mutation',
'Nonstop_Mutation',
'RNA',
'Silent',
'Splice_Region',
'Splice_Site',
'Start_Codon_Ins',
'Start_Codon_SNP',
'Stop_Codon_Del',
'Targeted_Region',
'Translation_Start_Site',
'lincRNA',
]
NONSYN_NAMES = [
'Missense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins',
'In_Frame_Del', 'In_Frame_Ins', 'Nonsense_Mutation',
'Nonstop_Mutation', 'Splice_Site', 'Translation_Start_Site'
]
NONSYN_COLORS = [
'tab:green', 'tab:blue', 'tab:purple', 'tab:olive', 'tab:red',
'tab:cyan', 'tab:pink', 'tab:orange', 'tab:brown'
]
SNV_CLASSES = {
'A>C': {'class': 'T>G', 'type': 'Tv'},
'A>G': {'class': 'T>C', 'type': 'Ti'},
'A>T': {'class': 'T>A', 'type': 'Tv'},
'C>A': {'class': 'C>A', 'type': 'Tv'},
'C>G': {'class': 'C>G', 'type': 'Tv'},
'C>T': {'class': 'C>T', 'type': 'Ti'},
'G>A': {'class': 'C>T', 'type': 'Ti'},
'G>C': {'class': 'C>G', 'type': 'Tv'},
'G>T': {'class': 'C>A', 'type': 'Tv'},
'T>A': {'class': 'T>A', 'type': 'Tv'},
'T>C': {'class': 'T>C', 'type': 'Ti'},
'T>G': {'class': 'T>G', 'type': 'Tv'},
}
SNV_CLASS_ORDER = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']
class MafFrame:
"""Class for storing MAF data.
Parameters
----------
df : pandas.DataFrame
DataFrame containing MAF data.
See Also
--------
MafFrame.from_file
Construct MafFrame from a MAF file.
"""
def __init__(self, df):
self.df = df.reset_index(drop=True)
@property
def shape(self):
"""tuple : Dimensionality of MafFrame (variants, samples)."""
return (self.df.shape[0], len(self.samples))
@property
def samples(self):
"""list : List of the sample names."""
return list(self.df.Tumor_Sample_Barcode.unique())
@property
def genes(self):
"""list : List of the genes."""
return list(self.df.Hugo_Symbol.unique())
def copy(self):
"""Return a copy of the MafFrame."""
return self.__class__(self.df.copy())
def compute_clonality(self, vaf_col, threshold=0.25):
"""
Compute the clonality of variants based on
:ref:`VAF <glossary:Variant allele frequency (VAF)>`.
A mutation will be defined as "Subclonal" if the VAF is less than the
threshold percentage (e.g. 25%) of the highest VAF in the sample and
is defined as "Clonal" if it is equal to or above this threshold.
Parameters
----------
vaf_col : str
MafFrame column containing VAF data.
threshold : float
Minimum VAF to be considered as "Clonal".
Returns
-------
panda.Series
Clonality for each variant.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.df['Clonality'] = mf.compute_clonality('i_TumorVAF_WU')
>>> mf.df['Clonality'][:10]
0 Clonal
1 Clonal
2 Clonal
3 Clonal
4 Clonal
5 Clonal
6 Clonal
7 Clonal
8 Clonal
9 Clonal
Name: Clonality, dtype: object
"""
d = self.df.groupby('Tumor_Sample_Barcode')[vaf_col].max().to_dict()
def one_row(r):
m = d[r.Tumor_Sample_Barcode]
if r[vaf_col] < m * threshold:
result = 'Subclonal'
else:
result = 'Clonal'
return result
s = self.df.copy().apply(one_row, axis=1)
return s
@classmethod
def from_file(cls, fn):
"""
Construct MafFrame from a MAF file.
Parameters
----------
fn : str
MAF file (compressed or uncompressed).
Returns
-------
MafFrame
MafFrame object.
See Also
--------
MafFrame
MafFrame object creation using constructor.
Examples
--------
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
"""
# Read the input MAF file.
df = pd.read_table(fn)
# Check the letter case of column names. This will help distinguish
# missing columns from columns with incorrect letter case (e.g.
# 'End_Position' vs. 'End_position').
lower_names = [x.lower() for x in COMMON_COLUMNS]
for col in df.columns:
if col.lower() in lower_names:
i = lower_names.index(col.lower())
if col != COMMON_COLUMNS[i]:
message = (
f"Input column '{col}' will be renamed "
f"as '{COMMON_COLUMNS[i]}'."
)
warnings.warn(message)
df = df.rename(columns={col: COMMON_COLUMNS[i]})
# Set the data type of chromosomes as string (e.g. 'chr1' vs. '1').
if 'Chromosome' in df.columns:
df.Chromosome = df.Chromosome.astype(str)
return cls(df)
@classmethod
def from_vcf(cls, vcf, keys=None, names=None):
"""
Construct MafFrame from a VCF file or VcfFrame.
It is recommended that the input VCF data be functionally annotated
by an annotation tool such as Ensembl VEP, SnpEff, and ANNOVAR;
however, the method can handle unannotated VCF data as well.
The preferred tool for functional annotation is Ensembl VEP with
"RefSeq transcripts" as the transcript database and the filtering
option "Show one selected consequence per variant".
Parameters
----------
vcf : str or VcfFrame
VCF file or VcfFrame.
keys : str or list
Genotype key (e.g. 'AD', 'AF') or list of genotype keys to be
added to the MafFrame.
names : str or list
Column name or list of column names for ``keys`` (must be the
same length). By default, the genotype keys will be used as
column names.
Examples
--------
Below is a simple example:
>>> from fuc import pyvcf, pymaf
>>> data = {
... 'CHROM': ['chr1', 'chr2'],
... 'POS': [100, 101],
... 'ID': ['.', '.'],
... 'REF': ['G', 'T'],
... 'ALT': ['A', 'C'],
... 'QUAL': ['.', '.'],
... 'FILTER': ['.', '.'],
... 'INFO': ['CSQ=T|missense_variant|MODERATE|MTOR|2475|Transcript|NM_001386500.1|protein_coding|47/58||||6792|6644|2215|S/Y|tCt/tAt|rs587777894&COSV63868278&COSV63868313||-1||EntrezGene||||||||G|G||deleterious(0)|possibly_damaging(0.876)||||||||||||||||||likely_pathogenic&pathogenic|0&1&1|1&1&1|26619011&27159400&24631838&26018084&27830187|||||', 'CSQ=C|splice_donor_variant|HIGH|MTOR|2475|Transcript|NM_001386500.1|protein_coding||46/57||||||||||-1||EntrezGene||||||||A|A|||||||||||||||||||||||||||||'],
... 'FORMAT': ['GT:AD:DP:AF', 'GT:AD:DP:AF'],
... 'A': ['0/1:176,37:213:0.174', '0/1:966,98:1064:0.092']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . CSQ=T|missense_variant|MODERATE|MTOR|2475|Tran... GT:AD:DP:AF 0/1:176,37:213:0.174
1 chr2 101 . T C . . CSQ=C|splice_donor_variant|HIGH|MTOR|2475|Tran... GT:AD:DP:AF 0/1:966,98:1064:0.092
>>> mf = pymaf.MafFrame.from_vcf(vf)
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode
0 MTOR 2475 . . chr1 100 100 - Missense_Mutation SNP G A A p.S2215Y A
1 MTOR 2475 . . chr2 101 101 - Splice_Site SNP T C C . A
We can add genotype keys such as AD and AF:
>>> mf = pymaf.MafFrame.from_vcf(vf, keys=['AD', 'AF'])
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode AD AF
0 MTOR 2475 . . chr1 100 100 - Missense_Mutation SNP G A A p.S2215Y A 176,37 0.174
1 MTOR 2475 . . chr2 101 101 - Splice_Site SNP T C C . A 966,98 0.092
The method can accept a VCF file as input instead of VcfFrame:
>>> mf = pymaf.MafFrame.from_vcf('annotated.vcf')
The method can also handle unannotated VCF data:
>>> data = {
... 'CHROM': ['chr1', 'chr1', 'chr1'],
... 'POS': [100, 200, 300],
... 'ID': ['.', '.', '.'],
... 'REF': ['G', 'C', 'TTC'],
... 'ALT': ['A', 'CAG', 'T'],
... 'QUAL': ['.', '.', '.'],
... 'FILTER': ['.', '.', '.'],
... 'INFO': ['.', '.', '.'],
... 'FORMAT': ['GT', 'GT', 'GT'],
... 'A': ['0/1', '0/1', '0/1']
... }
>>> vf = pyvcf.VcfFrame.from_dict([], data)
>>> vf.df
CHROM POS ID REF ALT QUAL FILTER INFO FORMAT A
0 chr1 100 . G A . . . GT 0/1
1 chr1 200 . C CAG . . . GT 0/1
2 chr1 300 . TTC T . . . GT 0/1
>>> mf = pymaf.MafFrame.from_vcf(vf)
>>> mf.df
Hugo_Symbol Entrez_Gene_Id Center NCBI_Build Chromosome Start_Position End_Position Strand Variant_Classification Variant_Type Reference_Allele Tumor_Seq_Allele1 Tumor_Seq_Allele2 Protein_Change Tumor_Sample_Barcode
0 . . . . chr1 100 100 . . SNP G A A . A
1 . . . . chr1 200 201 . . INS - AG AG . A
2 . . . . chr1 301 302 . . DEL TC - - . A
"""
# Parse the input VCF.
if isinstance(vcf, str):
vf = pyvcf.VcfFrame.from_file(vcf)
else:
vf = vcf
# Set some default values in case the VCF is not annotated.
ncbi_build = '.'
# Get the NCBI_Build data.
for line in vf.meta:
if line.startswith('##VEP'):
ncbi_build = re.search(r'assembly="(.*?)"', line).group(1)
break
# Define the conversion algorithm.
def one_row(r):
has_annot = 'CSQ=' in r.INFO
# Set some default values in case the VCF is not annotated.
strand = '.'
variant_classification = '.'
protein_change = '.'
hugo_symbol = '.'
entrez_gene_id = '.'
# Get the sequence data.
inframe = abs(len(r.REF) - len(r.ALT)) / 3 == 0
if len(r.REF) == len(r.ALT) == 1:
variant_type = 'SNP'
start_position = r.POS
end_position = r.POS
reference_allele = r.REF
tumor_seq_allele1 = r.ALT
tumor_seq_allele2 = r.ALT
elif len(r.REF) > len(r.ALT):
variant_type = 'DEL'
start_position = r.POS + 1
end_position = r.POS + len(r.REF) - len(r.ALT)
reference_allele = r.REF[1:]
tumor_seq_allele1 = '-'
tumor_seq_allele2 = '-'
else:
variant_type = 'INS'
start_position = r.POS
end_position = r.POS + 1
reference_allele = '-'
tumor_seq_allele1 = r.ALT[1:]
tumor_seq_allele2 = r.ALT[1:]
fields = r.INFO.replace('CSQ=', '').split(',')[0].split('|')
# Get the Strand data.
if has_annot:
strand = '+' if fields[19] == '1' else '-'
# Get the Variant_Classification data.
if has_annot:
consequence = fields[1].split('&')[0]
if consequence == 'frameshift_variant':
if variant_type == 'DEL':
variant_classification = 'Frame_Shift_Del'
else:
variant_classification = 'Frame_Shift_Ins'
elif consequence == 'protein_altering_variant':
if inframe:
if variant_type == 'DEL':
variant_classification = 'In_Frame_Del'
else:
variant_classification = 'In_Frame_Ins'
else:
if variant_type == 'DEL':
variant_classification = 'Frame_Shift_Del'
else:
variant_classification = 'Frame_Shift_Ins'
elif consequence in VEP_CONSEQUENCES:
variant_classification = VEP_CONSEQUENCES[consequence]
else:
m = f'Found unknown Ensembl VEP consequence: {consequence}'
raise ValueError(m)
# Get the Tumor_Sample_Barcode data.
s = r[9:].apply(pyvcf.gt_hasvar)
tumor_sample_barcode = ','.join(s[s].index.to_list())
# Get the Protein_Change data.
if has_annot:
pos = fields[14]
aa = fields[15].split('/')
if len(aa) > 1:
protein_change = f'p.{aa[0]}{pos}{aa[1]}'
# Get other data.
if has_annot:
hugo_symbol = fields[3]
entrez_gene_id = fields[4]
d = dict(
Hugo_Symbol = hugo_symbol,
Entrez_Gene_Id = entrez_gene_id,
Center = '.',
NCBI_Build = ncbi_build,
Chromosome = r.CHROM,
Start_Position = start_position,
End_Position = end_position,
Strand = strand,
Variant_Classification = variant_classification,
Variant_Type = variant_type,
Reference_Allele = reference_allele,
Tumor_Seq_Allele1 = tumor_seq_allele1,
Tumor_Seq_Allele2 = tumor_seq_allele2,
Tumor_Sample_Barcode = tumor_sample_barcode,
Protein_Change = protein_change,
CHROM = r.CHROM, # will be dropped
POS = r.POS, # will be dropped
REF = r.REF, # will be dropped
ALT = r.ALT, # will be dropped
)
return pd.Series(d)
# Apply the conversion algorithm.
df = vf.df.apply(one_row, axis=1)
# Expand the Tumor_Sample_Barcode column to multiple rows.
s = df['Tumor_Sample_Barcode'].str.split(',').apply(
pd.Series, 1).stack()
s.index = s.index.droplevel(-1)
s.name = 'Tumor_Sample_Barcode'
del df['Tumor_Sample_Barcode']
df = df.join(s)
# Append extra genotype keys, if necessary.
if keys is not None:
if names is None:
names = keys
if isinstance(keys, str):
keys = [keys]
if isinstance(names, str):
names = [names]
for i, key in enumerate(keys):
temp_df = vf.extract_format(key)
temp_df = pd.concat([vf.df.iloc[:, :9], temp_df], axis=1)
temp_df = temp_df.drop(
columns=['ID', 'QUAL', 'FILTER', 'INFO', 'FORMAT'])
temp_df = pd.melt(
temp_df,
id_vars=['CHROM', 'POS', 'REF', 'ALT'],
var_name='Tumor_Sample_Barcode',
)
temp_df = temp_df[temp_df.value != '.']
df = df.merge(temp_df,
on=['CHROM', 'POS', 'REF', 'ALT', 'Tumor_Sample_Barcode'])
df = df.rename(columns={'value': names[i]})
# Drop the extra columns.
df = df.drop(columns=['CHROM', 'POS', 'REF', 'ALT'])
return cls(df)
def matrix_prevalence(self):
"""
Compute a matrix of variant counts with a shape of (genes, samples).
Returns
-------
pandas.DataFrame
The said matrix.
"""
s = self.df.groupby(
'Hugo_Symbol')['Tumor_Sample_Barcode'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Hugo_Symbol',
columns='Tumor_Sample_Barcode', values='Count')
df.columns.name = ''
df = df.fillna(0)
return df
def matrix_genes(self, mode='variants', count=10):
"""
Compute a matrix of counts with a shape of (genes, variant
classifications).
This method only considers the nine nonsynonymous variant
classifications.
Parameters
----------
mode : {'variants', 'samples'}, default: 'variants'
Determines how to identify top mutated genes:
* 'variants': Count the number of observed variants.
* 'samples': Count the number of affected samples. Using this
option will create an additional variant classification called
'Multi_Hit'.
count : int, default: 10
Number of top mutated genes to include.
Returns
-------
pandas.DataFrame
The said matrix.
"""
if mode == 'variants':
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
df = df.groupby('Hugo_Symbol')[
'Variant_Classification'].value_counts().to_frame()
df.columns = ['Count']
df = df.reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Variant_Classification',
values='Count')
df = df.fillna(0)
for varcls in NONSYN_NAMES:
if varcls not in df.columns:
df[varcls] = 0
i = df.sum(axis=1).sort_values(ascending=False).index
df = df.reindex(index=i)
df = df[NONSYN_NAMES]
df = df[:count]
df = df.rename_axis(None, axis=1)
elif mode == 'samples':
df = self.matrix_waterfall(count)
df = df.apply(lambda r: r.value_counts(), axis=1)
for varcls in NONSYN_NAMES + ['Multi_Hit']:
if varcls not in df.columns:
df[varcls] = np.nan
df = df[NONSYN_NAMES + ['Multi_Hit']]
df = df.fillna(0)
else:
raise ValueError(f'Found incorrect mode: {mode}')
return df
def matrix_tmb(self):
"""
Compute a matrix of variant counts with a shape of (samples, variant
classifications).
Returns
-------
pandas.DataFrame
The said matrix.
"""
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
df = df.groupby('Tumor_Sample_Barcode')[
'Variant_Classification'].value_counts().to_frame()
df.columns = ['Count']
df = df.reset_index()
df = df.pivot(index='Tumor_Sample_Barcode',
columns='Variant_Classification', values='Count')
df = df.fillna(0)
for varcls in NONSYN_NAMES:
if varcls not in df.columns:
df[varcls] = 0
i = df.sum(axis=1).sort_values(ascending=False).index
df = df.reindex(index=i)
df = df[NONSYN_NAMES]
df = df.rename_axis(None, axis=1)
return df
def matrix_waterfall(self, count=10, keep_empty=False):
"""
Compute a matrix of variant classifications with a shape of
(genes, samples).
If there are multiple variant classifications available for a given
cell, they will be replaced as 'Multi_Hit'.
Parameters
----------
count : int, default: 10
Number of top mutated genes to include.
keep_empty : bool, default: False
If True, keep samples with all ``NaN``'s.
Returns
-------
pandas.DataFrame
The said matrix.
"""
df = self.df[self.df.Variant_Classification.isin(NONSYN_NAMES)]
f = lambda x: ''.join(x) if len(x) == 1 else 'Multi_Hit'
df = df.groupby(['Hugo_Symbol', 'Tumor_Sample_Barcode'])[
'Variant_Classification'].apply(f).to_frame()
df = df.reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Tumor_Sample_Barcode',
values='Variant_Classification')
# Sort the rows (genes).
i = df.isnull().sum(axis=1).sort_values(ascending=True).index
df = df.reindex(index=i)
# Select the top mutated genes.
df = df[:count]
# Drop samples with all NaN's.
if not keep_empty:
df = df.dropna(axis=1, how='all')
# Sort the columns (samples).
c = df.applymap(lambda x: 0 if pd.isnull(x) else 1).sort_values(
df.index.to_list(), axis=1, ascending=False).columns
df = df[c]
df = df.fillna('None')
df = df.rename_axis(None, axis=1)
return df
def plot_genes(
self, mode='variants', count=10, flip=False, ax=None, figsize=None,
**kwargs
):
"""
Create a bar plot showing variant distirbution for top mutated genes.
Parameters
----------
mode : {'variants', 'samples'}, default: 'variants'
Determines how to identify top mutated genes:
* 'variants': Count the number of observed variants.
* 'samples': Count the number of affected samples. Using this
option will create an additional variant classification called
'Multi_Hit'.
count : int, default: 10
Number of top mutated genes to display.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`pandas.DataFrame.plot.bar` or
:meth:`pandas.DataFrame.plot.barh`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
By default (``mode='variants'``), the method identifies top mutated
genes by counting the number of observed variants:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_genes()
>>> plt.tight_layout()
We can also identify top mutated genes by counting the number of
affected samples:
.. plot::
:context: close-figs
>>> mf.plot_genes(mode='samples')
>>> plt.tight_layout()
"""
if mode == 'variants':
colors = NONSYN_COLORS
elif mode == 'samples':
colors = NONSYN_COLORS + ['k']
else:
raise ValueError(f'Found incorrect mode: {mode}')
df = self.matrix_genes(count=count, mode=mode)
df = df.iloc[::-1]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
df = df.iloc[::-1]
kind = 'bar'
xlabel, ylabel = '', 'Count'
else:
kind = 'barh'
xlabel, ylabel = 'Count', ''
df.plot(
kind=kind, ax=ax, stacked=True, legend=False,
color=colors, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_oncoplot(
self, count=10, keep_empty=False, figsize=(15, 10), label_fontsize=15,
ticklabels_fontsize=15, legend_fontsize=15
):
"""
Create an oncoplot.
See this :ref:`tutorial <tutorials:Create customized oncoplots>` to
learn how to create customized oncoplots.
Parameters
----------
count : int, default: 10
Number of top mutated genes to display.
keep_empty : bool, default: False
If True, display samples that do not have any mutations.
figsize : tuple, default: (15, 10)
Width, height in inches. Format: (float, float).
label_fontsize : float, default: 15
Font size of labels.
ticklabels_fontsize : float, default: 15
Font size of tick labels.
legend_fontsize : float, default: 15
Font size of legend texts.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_oncoplot()
"""
g = {'height_ratios': [1, 10, 1], 'width_ratios': [10, 1]}
fig, axes = plt.subplots(3, 2, figsize=figsize, gridspec_kw=g)
[[ax1, ax2], [ax3, ax4], [ax5, ax6]] = axes
# Create the TMB plot.
samples = list(self.matrix_waterfall(count=count,
keep_empty=keep_empty).columns)
self.plot_tmb(ax=ax1, samples=samples, width=0.95)
ax1.set_xlabel('')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_xlim(-0.5, len(samples)-0.5)
ax1.set_ylabel('TMB', fontsize=label_fontsize)
ax1.set_yticks([0, self.matrix_tmb().sum(axis=1).max()])
ax1.tick_params(axis='y', which='major',
labelsize=ticklabels_fontsize)
# Remove the top right plot.
ax2.remove()
# Create the waterfall plot.
self.plot_waterfall(count=count, ax=ax3, linewidths=1, keep_empty=keep_empty)
ax3.set_xlabel('')
ax3.tick_params(axis='y', which='major', labelrotation=0,
labelsize=ticklabels_fontsize)
# Create the genes plot.
self.plot_genes(count=count, ax=ax4, mode='samples', width=0.95)
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_visible(False)
ax4.spines['top'].set_visible(False)
ax4.set_yticks([])
ax4.set_xlabel('Samples', fontsize=label_fontsize)
ax4.set_xticks([0, self.matrix_genes(
count=10, mode='samples').sum(axis=1).max()])
ax4.set_ylim(-0.5, count-0.5)
ax4.tick_params(axis='x', which='major',
labelsize=ticklabels_fontsize)
# Create the legend.
handles = common.legend_handles(NONSYN_NAMES+['Multi_Hit'],
colors=NONSYN_COLORS+['k'])
ax5.legend(
handles=handles,
title='Variant_Classification',
loc='upper center',
ncol=4,
fontsize=legend_fontsize,
title_fontsize=legend_fontsize
)
ax5.axis('off')
# Remove the bottom right plot.
ax6.remove()
plt.tight_layout()
plt.subplots_adjust(wspace=0.01, hspace=0.01)
def plot_oncoplot_matched(
self, af, patient_col, group_col, group_order, colors='Set2',
figsize=(15, 10), label_fontsize=12, ticklabels_fontsize=12,
legend_fontsize=12
):
"""
Create an oncoplot for mached samples.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
colors : str
Colormap name for the sample groups.
figsize : tuple, default: (15, 10)
Width, height in inches. Format: (float, float).
label_fontsize : float, default: 12
Font size of labels.
ticklabels_fontsize : float, default: 12
Font size of tick labels.
legend_fontsize : float, default: 12
Font size of legend texts.
"""
fig, axes = plt.subplots(3, 2, figsize=figsize,
gridspec_kw={'height_ratios': [1, 10, 1.5], 'width_ratios': [10, 1]}
)
[[ax1, ax2], [ax3, ax4], [ax5, ax6]] = axes
patients = self.matrix_waterfall_matched(af, patient_col, group_col, group_order).columns
self.plot_tmb_matched(
af, patient_col, group_col, group_order=group_order, ax=ax1,
legend=False, patients=patients, width=0.90,
color=sns.color_palette(colors)[:3]
)
ax1.set_xticks([])
ax1.set_xlim(-0.5, 53-0.5)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_ylabel('TMB', fontsize=label_fontsize)
ax1.tick_params(axis='y', which='major',
labelsize=ticklabels_fontsize)
ax2.remove()
self.plot_waterfall_matched(af, patient_col, group_col, group_order=group_order, ax=ax3)
ax3.set_xticks([])
ax3.tick_params(axis='y', which='major', labelrotation=0,
labelsize=ticklabels_fontsize)
self.plot_mutated_matched(
af, patient_col, group_col, group_order=group_order, ax=ax4, palette=colors
)
ax4.set_yticks([])
ax4.legend().remove()
ax4.spines['right'].set_visible(False)
ax4.spines['left'].set_visible(False)
ax4.spines['top'].set_visible(False)
ax4.tick_params(axis='x', which='major',
labelsize=ticklabels_fontsize)
ax4.set_xlabel('Patients', fontsize=label_fontsize)
# Create the legends.
handles1 = common.legend_handles(NONSYN_NAMES+['Multi_Hit'],
colors=NONSYN_COLORS+['k'])
handles2 = common.legend_handles(group_order, colors=colors)
leg1 = ax5.legend(handles=handles1, loc=(0, 0), title='Variant_Classification', ncol=4, fontsize=legend_fontsize, title_fontsize=legend_fontsize)
leg2 = ax5.legend(handles=handles2, loc=(0.8, 0), title=group_col, fontsize=legend_fontsize, title_fontsize=legend_fontsize)
ax5.add_artist(leg1)
ax5.add_artist(leg2)
ax5.axis('off')
# Remove the bottom right plot.
ax6.remove()
plt.tight_layout()
plt.subplots_adjust(wspace=0.01, hspace=0.01)
def plot_clonality(
self, vaf_col, af=None, group_col=None, group_order=None, count=10,
threshold=0.25, subclonal=False, ax=None, figsize=None
):
"""
Create a bar plot summarizing the clonality of variants in top
mutated genes.
Clonality will be calculated based on VAF using
:meth:`MafFrame.compute_clonality`.
Parameters
----------
vaf_col : str
MafFrame column containing VAF data.
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
count : int, defualt: 10
Number of top mutated genes to display.
threshold : float, default: 0.25
VAF threshold percentage.
subclonal : bool, default: False
If True, display subclonality (1 - clonality).
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.compute_clonality
Compute the clonality of variants based on VAF.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_clonality('i_TumorVAF_WU')
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_clonality('i_TumorVAF_WU',
... af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.df.copy()
df['Clonality'] = self.compute_clonality(vaf_col, threshold=threshold)
if group_col is None:
s = df.groupby('Hugo_Symbol')['Clonality'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Hugo_Symbol', columns='Clonality', values='Count')
else:
df = df.merge(af.df[group_col], left_on='Tumor_Sample_Barcode', right_index=True)
s = df.groupby(['Hugo_Symbol', group_col])['Clonality'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index=['Hugo_Symbol', group_col], columns='Clonality', values='Count')
df = df.reset_index()
df = df.fillna(0)
l = ['Clonal', 'Subclonal']
df[l] = df[l].div(df[l].sum(axis=1), axis=0)
genes = self.matrix_genes(count=count).index
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if subclonal:
y = 'Subclonal'
else:
y = 'Clonal'
sns.barplot(
x='Hugo_Symbol', y=y, data=df, order=genes, hue=group_col,
hue_order=group_order, ax=ax
)
ax.set_xlabel('')
return ax
def plot_evolution(
self, samples, vaf_col, anchor=None, normalize=True, count=5,
ax=None, figsize=None, **kwargs
):
"""
Create a line plot visualizing changes in VAF between specified
samples.
Parameters
----------
samples : list
List of samples to display.
vaf_col : str
MafFrame column containing VAF data.
anchor : str, optional
Sample to use as the anchor. If absent, use the first sample in
the list.
normalize : bool, default: True
If False, do not normalize VAF by the maximum value.
count : int, default: 5
Number of top variants to display.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.lineplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.df[self.df.Tumor_Sample_Barcode.isin(samples)]
if df.empty:
message = f'No variants to display for the samples: {samples}.'
raise ValueError(message)
df = df[df.Variant_Classification.isin(NONSYN_NAMES)]
def one_row(r):
if r.Protein_Change == '.':
variant_name = f'{r.Hugo_Symbol} ({r.Variant_Classification})'
else:
variant_name = f'{r.Hugo_Symbol} ({r.Protein_Change})'
return variant_name
df['Variant_Name'] = df.apply(one_row, axis=1)
df = df.pivot(index=['Variant_Name'],
columns=['Tumor_Sample_Barcode'], values=[vaf_col])
df.columns = df.columns.get_level_values(1)
df.columns.name = ''
df = df.fillna(0)
for sample in samples:
if sample not in df.columns:
df[sample] = 0
df = df[samples]
if anchor is None:
anchor = samples[0]
df = df.sort_values(by=anchor, ascending=False)
if normalize:
df = df / df.max()
df = df.fillna(0)
df = df.iloc[:count, :].T
df = df.loc[samples]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.lineplot(data=df, ax=ax, **kwargs)
ax.set_ylabel('Fraction')
return ax
def plot_genepair(
self, x, y, vaf_col, af=None, group_col=None, group_order=None,
ax=None, figsize=None, **kwargs
):
"""
Create a scatter plot of VAF between Gene X and Gene Y.
Parameters
----------
x, y : str
Gene names.
vaf_col : str
MafFrame column containing VAF data.
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.scatterplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_genepair('DNMT3A', 'FLT3', 'i_TumorVAF_WU')
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_genepair('DNMT3A', 'FLT3', 'i_TumorVAF_WU',
... af=af,
... group_col='FAB_classification')
>>> plt.tight_layout()
"""
df = self.df[self.df.Hugo_Symbol.isin([x, y])]
df = df[['Tumor_Sample_Barcode', 'Hugo_Symbol', vaf_col]]
df = df.sort_values(vaf_col, ascending=False)
df = df.drop_duplicates(subset=['Tumor_Sample_Barcode', 'Hugo_Symbol'])
df = df.pivot(index='Tumor_Sample_Barcode',
columns='Hugo_Symbol', values=vaf_col)
df = df.fillna(0)
if group_col is not None:
df = df.merge(af.df[group_col], left_index=True, right_index=True)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.scatterplot(
x=x, y=y, data=df, ax=ax, hue=group_col, hue_order=group_order,
**kwargs
)
# Print summary statistics including R-squared and p-value.
results = smf.ols(f'{y} ~ {x}', data=df).fit()
print(f'Results for {y} ~ {x}:')
print(f'R^2 = {results.rsquared:.2f}')
print(f' P = {results.f_pvalue:.2e}')
return ax
def plot_regplot(
self, af, group_col, a, b, a_size=None, b_size=None, genes=None,
count=10, to_csv=None, ax=None, figsize=None, **kwargs
):
"""
Create a scatter plot with a linear regression model fit visualizing
correlation between gene mutation frequencies in two sample groups
A and B.
Each point in the plot represents a gene.
The method will automatically calculate and print summary statistics
including R-squared and p-value.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
group_col : str
AnnFrame column containing sample group information.
a, b : str
Sample group names.
a_size, b_size : int, optional
Sample group sizes to use as denominator. By default, these are
inferred from the MafFrame and AnnFrame objects.
genes : list, optional
Genes to display. When absent, top mutated genes (``count``) will
be used.
count : int, defualt: 10
Number of top mutated genes to display. Ignored if ``genes`` is
specified.
to_csv : str, optional
Write the plot's data to a CSV file.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.regplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_regplot(af, 'FAB_classification', 'M1', 'M2')
Results for M2 ~ M1:
R^2 = 0.43
P = 3.96e-02
>>> plt.tight_layout()
"""
df1 = self.matrix_prevalence()
df2 = af.df[af.df.index.isin(df1.columns)]
i_a = df2[df2[group_col] == a].index
i_b = df2[df2[group_col] == b].index
# Determine which genes to display.
if genes is None:
genes = self.matrix_genes(count=count).index.to_list()
# Determine each group's sample size.
if a_size is None:
a_size = len(i_a)
if b_size is None:
b_size = len(i_b)
f = lambda x: 0 if x == 0 else 1
s_a = df1.T.loc[i_a].applymap(f).sum().loc[genes] / a_size
s_b = df1.T.loc[i_b].applymap(f).sum().loc[genes] / b_size
df3 = pd.concat([s_a, s_b], axis=1)
df3.columns = [a, b]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
# Draw the main plot.
sns.regplot(x=a, y=b, data=df3, ax=ax, **kwargs)
# Write the DataFrame to a CSV file.
if to_csv is not None:
df3.to_csv(to_csv)
# Print summary statistics including R-squared and p-value.
results = smf.ols(f'{b} ~ {a}', data=df3).fit()
print(f'Results for {b} ~ {a}:')
print(f'R^2 = {results.rsquared:.2f}')
print(f' P = {results.f_pvalue:.2e}')
return ax
def plot_interactions(
self, count=10, cmap=None, ax=None, figsize=None, **kwargs
):
"""
Create a heatmap representing mutually exclusive or co-occurring set
of genes.
This method performs pair-wise Fisherโs Exact test to detect such
significant pair of genes.
Parameters
----------
count : int, defualt: 10
Number of top mutated genes to display.
cmap : str, optional
Color map.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.heatmap`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_interactions(count=25, cmap='BrBG')
>>> plt.tight_layout()
"""
df = self.matrix_prevalence()
genes = self.matrix_genes(count=count, mode='samples').index.to_list()
df = df.loc[genes]
df = df.applymap(lambda x: True if x else False)
df = df.T
pairs = list(itertools.combinations(genes, 2))
data = []
def one_pair(a, b):
s_a = df[a].to_list()
s_b = df[b].to_list()
ab = 0
AB = 0
aB = 0
Ab = 0
for i in range(len(s_a)):
if s_a[i] and s_b[i]:
AB += 1
elif s_a[i] and not s_b[i]:
Ab += 1
elif not s_a[i] and s_b[i]:
aB += 1
else:
ab += 1
return (ab, AB, aB, Ab)
for pair in pairs:
a = pair[0]
b = pair[1]
ab, AB, aB, Ab = one_pair(a, b)
event = 'Co_Occurence' if AB else 'Mutually_Exclusive'
data.append([a, b, ab, AB, aB, Ab, event])
df = pd.DataFrame(data,
columns=['A', 'B', 'ab', 'AB', 'aB', 'Ab', 'Event'])
def one_row(r):
oddsr, p = fisher_exact([[r.AB, r.aB], [r.Ab, r.ab]],
alternative='two-sided')
return pd.Series([oddsr, p], index=['Odds_Ratio', 'P_Value'])
df = pd.concat([df.apply(one_row, axis=1), df], axis=1)
df = df.sort_values('P_Value')
def one_row(r):
r['Log_P_Value'] = -np.log10(r.P_Value)
if r.P_Value < 0.05:
r['Label'] = '*'
elif r.P_Value < 0.1:
r['Label'] = '.'
else:
r['Label'] = ''
if r.Event == 'Mutually_Exclusive':
r.Log_P_Value *= -1
return r
df = df.apply(one_row, axis=1)
annot = df.pivot(index='A', columns='B', values='Label')
annot = annot.fillna('')
df = df.pivot(index='A', columns='B', values='Log_P_Value')
df = df.fillna(0)
for gene in genes:
if gene not in df.columns:
df[gene] = 0
if gene not in annot.columns:
annot[gene] = ''
df = df.T
annot = annot.T
for gene in genes:
if gene not in df.columns:
df[gene] = 0
if gene not in annot.columns:
annot[gene] = ''
annot = annot[genes]
annot = annot.loc[genes]
df = df[genes]
df = df.loc[genes]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
# Create a mask for the heatmap.
corr = np.corrcoef(np.random.randn(count, 200))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(
df, annot=annot, fmt='', cmap=cmap, mask=mask, vmax=3, vmin=-3,
center=0, ax=ax, **kwargs
)
ax.set_xlabel('')
ax.set_ylabel('')
return ax
def plot_lollipop(
self, gene, alpha=0.7, ax=None, figsize=None, legend=True
):
"""
Create a lollipop or stem plot showing amino acid changes of a gene.
Parameters
----------
gene : str
Name of the gene.
alpha : float, default: 0.7
Set the color transparency. Must be within the 0-1 range,
inclusive.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_lollipop('DNMT3A')
>>> plt.tight_layout()
"""
# Only select variants from the gene.
df1 = self.df[self.df.Hugo_Symbol == gene]
# Raise an error if there are no SNVs to plot.
if df1.empty:
raise ValueError(f"No variants to plot for the gene: '{gene}'.")
# Count each amino acid change.
df2 = df1.Protein_Change.value_counts().to_frame().reset_index()
df2.columns = ['Protein_Change', 'Count']
# Identify variant classification for each amino acid change.
df3 = df1[['Protein_Change', 'Variant_Classification']
].drop_duplicates(subset=['Protein_Change'])
df4 = pd.merge(df2, df3, on='Protein_Change')
# Extract amino acid positions. Sort the counts by position.
def one_row(r):
digits = [x for x in r.Protein_Change if x.isdigit()]
if not digits:
return np.nan
return int(''.join(digits))
df4['Protein_Position'] = df4.apply(one_row, axis=1)
df4 = df4.dropna(subset=['Protein_Position'])
df4 = df4.sort_values(['Protein_Position'])
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
for i, nonsyn_name in enumerate(NONSYN_NAMES):
temp = df4[df4.Variant_Classification == nonsyn_name]
color = NONSYN_COLORS[i]
ax.vlines(temp.Protein_Position, ymin=0, ymax=temp.Count,
alpha=alpha, color=color)
ax.plot(temp.Protein_Position, temp.Count, 'o', alpha=alpha,
color=color, label=nonsyn_name)
ax.set_xlabel('Position')
ax.set_ylabel('Count')
if legend:
ax.legend()
return ax
def plot_mutated(
self, af=None, group_col=None, group_order=None, genes=None,
count=10, ax=None, figsize=None
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
genes : list, optional
Genes to display. When absent, top mutated genes (``count``) will
be used.
count : int, defualt: 10
Number of top mutated genes to display. Ignored if ``genes`` is
specified.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_mutated()
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_mutated(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.matrix_prevalence()
# Determine which genes to display.
if genes is None:
genes = self.matrix_genes(count=count).index.to_list()
df = df.loc[genes]
df = df.applymap(lambda x: True if x else False)
if group_col is None:
df = (df.sum(axis=1) / df.shape[1]).to_frame().reset_index()
df.columns.values[1] = 'Prevalence'
else:
df = df.T
df = pd.merge(df, af.df[group_col], left_index=True, right_index=True)
df = df.groupby([group_col]).mean().reset_index()
df = df.melt(id_vars=[group_col])
df.columns = [group_col, 'Hugo_Symbol', 'Prevalence']
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Hugo_Symbol', y='Prevalence', data=df, hue=group_col,
hue_order=group_order, ax=ax
)
ax.set_xlabel('')
return ax
def plot_mutated_matched(
self, af, patient_col, group_col, group_order, ax=None, figsize=None,
**kwargs
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list
List of sample group names.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.matrix_waterfall_matched(af, patient_col, group_col, group_order)
df = df.applymap(lambda x: 0 if x == 'None' else 1)
s = df.sum(axis=1) / len(df.columns) * 100
s.name = 'Count'
df = s.to_frame().reset_index()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Count', y='Gene', hue='Group', data=df, hue_order=group_order,
orient='h', ax=ax, **kwargs
)
ax.set_xlabel('Patients (%)')
ax.set_ylabel('')
return ax
def plot_rainfall(
self, sample, palette=None, legend='auto', ax=None, figsize=None,
**kwargs
):
"""
Create a rainfall plot visualizing inter-variant distance on a linear
genomic scale for single sample.
Parameters
----------
sample : str
Name of the sample.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
legend : {'auto', 'brief', 'full', False}, default: 'auto'
Display setting of the legend according to
:meth:`seaborn.scatterplot`.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.scatterplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('brca')
>>> maf_file = '~/fuc-data/brca/brca.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_rainfall('TCGA-A8-A08B',
... figsize=(14, 7),
... palette=sns.color_palette('Set2')[:6])
>>> plt.tight_layout()
"""
# Select variants from the sample.
df = self.df[self.df.Tumor_Sample_Barcode == sample]
# Remove indels.
df = df[df.Variant_Type == 'SNP']
# Raise an error if there are no SNVs to plot.
if df.empty:
message = (
'There are no SNVs to be drawn '
f"for the sample: '{sample}'."
)
raise ValueError(message)
# Get SNV class for each variant.
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
df['SNV_Class'] = df.apply(one_row, axis=1)
# Convert string chromosomes to integers for ordering.
def one_row(r):
r.Chromosome = int(r.Chromosome.replace(
'chr', '').replace('X', '23').replace('Y', '24'))
return r
df = df.apply(one_row, axis=1)
df = df[['Chromosome', 'Start_Position', 'SNV_Class']]
df = df.sort_values(['Chromosome', 'Start_Position'])
# Update positions as if all chromosomes are one long molecule.
def one_row(r):
if r.Chromosome == 1:
return r
r.Start_Position += sum(CHROM_LENGTHS['hg19'][:r.Chromosome-1])
return r
df = df.apply(one_row, axis=1)
s = np.diff(df.Start_Position)
s = np.insert(s, 0, 0)
s = np.log10(s + 1)
df['Interevent_Distance'] = s
df = df.reset_index(drop=True)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
bounds = [0] + df.drop_duplicates(subset=['Chromosome'],
keep='last').index.to_list()
xticks = []
for i, bound in enumerate(bounds):
if i == 0:
continue
elif i == 1:
xticks.append(bound / 2)
else:
xticks.append(bounds[i-1] + (bound - bounds[i-1]) / 2)
for bound in bounds:
ax.axvline(x=bound, color='lightgray', zorder=1)
sns.scatterplot(
x=df.index, y='Interevent_Distance', data=df, hue='SNV_Class',
hue_order=SNV_CLASS_ORDER, palette=palette, ax=ax, legend=legend,
zorder=2, **kwargs
)
ax.set_xlabel('Chromosomes')
ax.set_ylabel('Interevent distance')
ax.set_xticks(xticks)
ax.set_xticklabels(['X' if x == 23 else 'Y' if x == 24 else x
for x in df.Chromosome.unique()])
return ax
def plot_snvclsc(
self, af=None, group_col=None, group_order=None, palette=None,
flip=False, ax=None, figsize=None, **kwargs
):
"""
Create a bar plot summarizing the count distrubtions of the six
:ref:`glossary:SNV classes` for all samples.
A grouped bar plot can be created with ``group_col`` (requires an AnnFrame).
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.plot_snvclsp
Create a box plot summarizing the proportion distrubtions of
the six :ref:`glossary:SNV classes` for all sample.
MafFrame.plot_snvclss
Create a bar plot showing the proportions of the six
:ref:`glossary:SNV classes` for individual samples.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_snvclsc(palette=sns.color_palette('Dark2'))
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_snvclsc(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
# Add the SNV_Class column.
df = self.df[self.df.Variant_Type == 'SNP']
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
s = df.apply(one_row, axis=1)
s.name = 'SNV_Class'
df = pd.concat([df, s], axis=1)
# Count the occurance of each SNV class.
if group_col is not None:
df = pd.merge(df, af.df[group_col], left_on='Tumor_Sample_Barcode',
right_index=True)
s = df.groupby([group_col]).SNV_Class.value_counts()
df = s.to_frame().rename(columns={'SNV_Class': 'Count'}
).reset_index()
else:
s = df.SNV_Class.value_counts()
df = s.to_frame().reset_index()
df.columns = ['SNV_Class', 'Count']
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
x, y = 'Count', 'SNV_Class'
xlabel, ylabel = 'Count', ''
else:
x, y = 'SNV_Class', 'Count'
xlabel, ylabel = '', 'Count'
sns.barplot(
x=x, y=y, data=df, ax=ax, hue=group_col, hue_order=group_order,
palette=palette, order=SNV_CLASS_ORDER, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_snvclsp(
self, af=None, group_col=None, group_order=None, palette=None, flip=False,
ax=None, figsize=None, **kwargs
):
"""
Create a box plot summarizing the proportion distrubtions of the six
:ref:`glossary:SNV classes` for all sample.
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.boxplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.plot_snvclsc
Create a bar plot summarizing the count distrubtions of the six
:ref:`glossary:SNV classes` for all samples.
MafFrame.plot_snvclss
Create a bar plot showing the proportions of the six
:ref:`glossary:SNV classes` for individual samples.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_snvclsp(palette=sns.color_palette('Set2'))
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_snvclsp(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
# Add the SNV_Class column.
df = self.df[self.df.Variant_Type == 'SNP']
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
s = df.apply(one_row, axis=1)
s.name = 'SNV_Class'
df = pd.concat([df, s], axis=1)
# Compute the proportions of SNV classes in each sample.
s = df.groupby('Tumor_Sample_Barcode')['SNV_Class'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Tumor_Sample_Barcode', columns='SNV_Class')
df = df.fillna(0)
df = df.apply(lambda r: r/r.sum(), axis=1)
df.columns = df.columns.get_level_values(1)
df.columns.name = ''
if group_col is None:
df = pd.melt(df, var_name='SNV_Class', value_name='Proportion')
else:
df = pd.merge(df, af.df[group_col], left_index=True, right_index=True)
df = pd.melt(df, id_vars=[group_col], var_name='SNV_Class',
value_name='Proportion')
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
x, y = 'Proportion', 'SNV_Class'
xlabel, ylabel = 'Proportion', ''
else:
x, y = 'SNV_Class', 'Proportion'
xlabel, ylabel = '', 'Proportion'
sns.boxplot(
x=x, y=y, data=df, hue=group_col, hue_order=group_order,
palette=palette, ax=ax, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_snvclss(
self, samples=None, color=None, colormap=None, width=0.8,
legend=True, flip=False, to_csv=None, ax=None, figsize=None, **kwargs
):
"""
Create a bar plot showing the proportions of the six
:ref:`glossary:SNV classes` for individual samples.
Parameters
----------
samples : list, optional
List of samples to display (in that order too). If samples that
are absent in the MafFrame are provided, the method will give a
warning but still draw an empty bar for those samples.
color : list, optional
List of color tuples. See the :ref:`tutorials:Control plot
colors` tutorial for details.
colormap : str or matplotlib colormap object, optional
Colormap to select colors from. See the :ref:`tutorials:Control
plot colors` tutorial for details.
width : float, default: 0.8
The width of the bars.
legend : bool, default: True
Place legend on axis subplots.
flip : bool, default: False
If True, flip the x and y axes.
to_csv : str, optional
Write the plot's data to a CSV file.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`pandas.DataFrame.plot.bar` or
:meth:`pandas.DataFrame.plot.barh`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.plot_snvclsc
Create a bar plot summarizing the count distrubtions of the six
:ref:`glossary:SNV classes` for all samples.
MafFrame.plot_snvclsp
Create a box plot summarizing the proportion distrubtions of
the six :ref:`glossary:SNV classes` for all sample.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> ax = mf.plot_snvclss(width=1, color=plt.get_cmap('Set2').colors)
>>> ax.legend(loc='upper right')
>>> plt.tight_layout()
"""
# Add the SNV_Class column.
df = self.df[self.df.Variant_Type == 'SNP']
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
s = df.apply(one_row, axis=1)
s.name = 'SNV_Class'
df = pd.concat([df, s], axis=1)
# Compute the proportions of SNV classes in each sample.
s = df.groupby('Tumor_Sample_Barcode')['SNV_Class'].value_counts()
s.name = 'Count'
df = s.to_frame().reset_index()
df = df.pivot(index='Tumor_Sample_Barcode', columns='SNV_Class')
df = df.fillna(0)
df = df.apply(lambda r: r/r.sum(), axis=1)
df.columns = df.columns.get_level_values(1)
df.columns.name = ''
for x in SNV_CLASS_ORDER:
if x not in df.columns:
df[x] = 0
df = df[SNV_CLASS_ORDER]
# Determine which samples should be displayed.
if samples is not None:
missing_samples = []
missing_data = []
for sample in samples:
if sample not in df.index:
missing_samples.append(sample)
missing_data.append([0] * 6)
if missing_samples:
message = (
'Although the following samples are absent in the '
'MafFrame, they will still be displayed as empty bar: '
f'{missing_samples}.'
)
warnings.warn(message)
temp = pd.DataFrame(missing_data)
temp.index = missing_samples
temp.columns = SNV_CLASS_ORDER
df = pd.concat([df, temp]).loc[samples]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
kind = 'barh'
xlabel, ylabel = 'Proportion', 'Samples'
else:
kind = 'bar'
xlabel, ylabel = 'Samples', 'Proportion'
df.plot(
kind=kind, ax=ax, stacked=True, legend=legend, width=width,
color=color, colormap=colormap, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if flip:
ax.set_yticks([])
else:
ax.set_xticks([])
# Write the DataFrame to a CSV file.
if to_csv is not None:
df.to_csv(to_csv)
return ax
def plot_titv(
self, af=None, group_col=None, group_order=None, flip=False, ax=None,
figsize=None, **kwargs
):
"""
Create a box plot showing the :ref:`Ti/Tv <glossary:Transitions (Ti)
and transversions (Tv)>` proportions of samples.
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.boxplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
fuc.api.pyvcf.VcfFrame.plot_titv
Similar method for the :class:`fuc.api.pyvcf.VcfFrame` class.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_titv()
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_titv(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.df[self.df.Variant_Type == 'SNP']
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['type']
s = df.apply(one_row, axis=1)
s.name = 'SNV_Type'
df = | pd.concat([df, s], axis=1) | pandas.concat |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
# -----------------------------------------------------------------------
filename = 'dt_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test= X[features_list]
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = | pd.Series(importances, self.list_corr_features.columns) | pandas.Series |
import pandas as pd
import time
from datetime import datetime
from datetime import timedelta
import numpy as np
import os
import mysql.connector
import pyodbc
from mktcalendar import *
def get_uni(start, end, lookback, uni_size=1400):
unidate = start - TDay * lookback
t_low_price = 2.0
t_high_price = 500.0
t_min_advp = 1000000.0
sql = ("SELECT g.gvkey, t.tradingItemId 'tid', t.tickerSymbol symbol,"
" t.tradingItemStatusId status, ctr.country,"
" curr.currencyName currency, m.marketCap mkt_cap, p.priceClose 'close'"
" FROM ciqTradingItem t"
" INNER JOIN ciqSecurity s ON t.securityId =s.securityId"
" INNER JOIN ciqCompany co ON s.companyId =co.companyId"
" INNER JOIN ciqCountryGeo ctr ON ctr.countryId =co.countryId"
" INNER JOIN ciqCurrency curr ON t.currencyId =curr.currencyId"
" INNER JOIN ciqMarketCap m ON co.companyId=m.companyId"
" INNER JOIN ciqGvKeyIID g ON g.objectId=t.tradingItemId"
" INNER JOIN ciqPriceEquity2 p ON p.tradingItemId=t.tradingItemId"
" AND p.pricingDate = m.pricingDate"
" WHERE ctr.country= 'United States'"
" AND curr.currencyName = 'US Dollar'"
" AND s.securitySubTypeId = 1"
" AND m.pricingDate = '%s'"
% unidate)
cnxn_s = 'Trusted_Connection=yes;Driver={ODBC Driver 17 for SQL Server};Server=dbDevCapIq;Database=xpressfeed'
cnxn = pyodbc.connect(cnxn_s)
uni_df = pd.read_sql(sql, cnxn, index_col=['gvkey', 'tid'])
cnxn.close()
print("Universe size (US/USD): %d" % len(uni_df))
trailingSt = unidate - TDay * 21
trailingEd = unidate - TDay
sql = ("SELECT g.gvkey, p.tradingItemId 'tid', p.pricingDate, p.volume"
" FROM ciqPriceEquity2 p"
" INNER JOIN ciqGvKeyIID g ON g.objectId = p.tradingItemId"
" WHERE p.pricingDate BETWEEN '%s' AND '%s'"
" AND g.gvkey IN %s"
" AND p.tradingItemId In %s"
% (trailingSt, trailingEd, tuple(uni_df.index.levels[0]), tuple(uni_df.index.levels[1])))
cnxn = pyodbc.connect(cnxn_s)
price_df = pd.read_sql(sql, cnxn, index_col=['gvkey', 'tid'])
cnxn.close()
price_df = pd.merge(uni_df, price_df, on=['gvkey', 'tid'])
uni_df['tradable_med_volume_21'] = price_df['volume'].median(level=['gvkey', 'tid'])
print("Universe size (prices): %d" % len(uni_df))
uni_df = uni_df[(uni_df['close'] > t_low_price) & (uni_df['close'] < t_high_price)]
print("Universe size (price range): %d" % len(uni_df))
uni_df['mdvp'] = uni_df['tradable_med_volume_21'] * uni_df['close']
uni_df = uni_df[uni_df['mdvp'] > t_min_advp]
print("Universe size (mdvp): %d" % len(uni_df))
uni_df.reset_index(level=1, inplace=True)
uni_df.sort_values('mdvp', ascending=False, inplace=True)
uni_df = uni_df[~uni_df.index.duplicated()]
print("Universe size (duplicates): %d" % len(uni_df))
sql = ("SELECT gvkey, gics_sector sector, gics_industry_group 'group'"
" FROM factors.stock_info_v6c"
" WHERE trade_date = '%s'"
% unidate)
cnxn = mysql.connector.connect(host='jv-research', port=3306, user='mek_limited', password='<PASSWORD>$')
secdata_df = pd.read_sql(sql, cnxn)
cnxn.close()
secdata_df['gvkey'] = [element[:-3] for element in secdata_df['gvkey']]
uni_df = pd.merge(uni_df, secdata_df, on='gvkey')
print("Universe size (secdata): %d" % len(uni_df))
uni_df = uni_df[uni_df['group'] != 3520]
print("Universe size (bio): %d" % len(uni_df))
uni_df['rank'] = uni_df['mkt_cap'].fillna(0).rank(ascending=False)
uni_df = uni_df[uni_df['rank'] <= uni_size]
print("Universe size (mktcap): %d" % len(uni_df))
uni_df.set_index('gvkey', inplace=True)
end_s = end.strftime("%Y%m%d")
dir = './%s/' % end_s
if not os.path.exists(dir):
os.makedirs(dir)
uni_df.to_csv(r"%suni_df.csv" % dir, "|")
return uni_df[['symbol', 'sector', 'tid']]
def load_barra(uni_df, start, end):
date = end - TDay
print("Loading barra...")
sql1 = ("SELECT trade_date 'date', gvkey, MO1_4 momentum, BP btop, DYLD divyild,"
" SIZE 'size', EP growth"
" FROM factors.loadings_v6c_xmkt "
" WHERE trade_date BETWEEN '%s' AND '%s'"
% (start, date))
sql2 = ("SELECT trade_date 'date', gvkey, gics_industry_group ind1"
" FROM factors.stock_info_v6c i"
" WHERE trade_date BETWEEN '%s' AND '%s'"
% (start, date))
cnxn = mysql.connector.connect(host='jv-research', port=3306, user='mek_limited', password='<PASSWORD>$')
barra_df1 = pd.read_sql(sql1, cnxn)
barra_df2 = pd.read_sql(sql2, cnxn)
cnxn.close()
barra_df = pd.merge(barra_df1, barra_df2, on=['date', 'gvkey'])
barra_df['gvkey'] = [element[:-3] for element in barra_df['gvkey']]
barra_df = pd.merge(barra_df, uni_df, on='gvkey')
barra_df.set_index(['date', 'gvkey'], inplace=True)
end_s = end.strftime("%Y%m%d")
dir = './%s/' % end_s
if not os.path.exists(dir):
os.makedirs(dir)
barra_df.to_csv(r"%sbarra_df.csv" % dir, "|")
return barra_df
def load_price(uni_df, start, end):
print("Loading daily info...")
date = end - TDay
sql = ("SELECT DISTINCT g.gvkey, p.tradingItemId 'tid', p.priceOpen 'open',"
" p.priceClose 'close', p.priceHigh 'high', p.priceLow 'low', p.volume,"
" sp.latestSplitFactor 'split', d.divAmount 'div', p.pricingDate 'date',"
" m.marketCap 'mkt_cap'"
" FROM ciqPriceEquity2 p"
" INNER JOIN ciqGvKeyIID g ON g.objectId=p.tradingItemId"
" INNER JOIN ciqTradingItem t ON t.tradingItemId=p.tradingItemId"
" INNER JOIN ciqSecurity s ON t.securityId =s.securityId"
" INNER JOIN ciqMarketCap m ON s.companyId=m.companyId"
" AND m.pricingDate = p.pricingDate"
" LEFT JOIN ciqSplitCache sp ON sp.tradingItemId = p.tradingItemId"
" AND sp.SplitDate = p.pricingDate"
" LEFT JOIN ciqDividendCache d ON d.tradingItemId = p.tradingItemId"
" AND d.dividendDate = p.pricingDate"
" WHERE p.pricingDate BETWEEN '%s' AND '%s'"
" AND g.gvkey IN %s"
" AND p.tradingItemId In %s"
% (start, date, tuple(uni_df.index.values), tuple(uni_df['tid'].values)))
cnxn_s = 'Trusted_Connection=yes;Driver={ODBC Driver 17 for SQL Server};Server=dbDevCapIq;Database=xpressfeed'
cnxn = pyodbc.connect(cnxn_s)
price_df = pd.read_sql(sql, cnxn)
cnxn.close()
price_df = pd.merge(uni_df, price_df, on=['gvkey', 'tid'])
price_df.set_index(['date', 'gvkey'], inplace=True)
print("Loading past info...")
prev = start - TDay
tra60Pr = prev - TDay * 60
sql = ("SELECT DISTINCT g.gvkey, p.tradingItemId 'tid', p.pricingDate 'date',"
" p.priceOpen 'open', p.priceClose 'close', p.volume"
" FROM ciqPriceEquity2 p"
" INNER JOIN ciqGvKeyIID g ON g.objectId=p.tradingItemId"
" WHERE pricingDate BETWEEN '%s' AND '%s'"
" AND g.gvkey IN %s"
" AND p.tradingItemId In %s"
% (tra60Pr, date, tuple(uni_df.index.values), tuple(uni_df['tid'].values)))
cnxn = pyodbc.connect(cnxn_s)
past = pd.read_sql(sql, cnxn)
cnxn.close()
past = | pd.merge(uni_df, past, on=['gvkey', 'tid']) | pandas.merge |
import pandas as pd
import numpy as np
from annotations.CONSTANTS import *
import pickle
def save_as_csv(X, Y, feature_name, output_dir, output_filename='features_and_labels.csv'):
# print(X[0], len(X[0]), len(feature_name))
# print('#x', len(X), '#y', len(Y))
data = np.array(X)
pd_data = pd.DataFrame(data=data,columns=feature_name)
pd_data['label'] = [v for v in Y]
# pd_data['event_key'] = [v[1] for v in Y]
pd_data.to_csv(output_dir + output_filename, encoding='utf-8', index=False)
print('---feature saved in ', output_dir + output_filename)
def append_to_file(filename, txt):
fh = open(filename, 'a')
fh.write(txt + '\n')
fh.close()
def save_model_outputs(T, Y, Y_init, Y_smooth, filename):
df = | pd.DataFrame({'T': T, 'Y': Y, 'Y_init': Y_init, 'Y_smooth': Y_smooth}) | pandas.DataFrame |
#!/usr/bin/env python
"""
What
----
vbench is a library which can be used to benchmark the performance
of a codebase over time.
Although vbench can collect data over many commites, generate plots
and other niceties, for Pull-Requests the important thing is the
performance of the HEAD commit against a known-good baseline.
This script tries to automate the process of comparing these
two commits, and is meant to run out of the box on a fresh
clone.
How
---
These are the steps taken:
1) create a temp directory into which vbench will clone the temporary repo.
2) instantiate a vbench runner, using the local repo as the source repo.
3) perform a vbench run for the baseline commit, then the target commit.
4) pull the results for both commits from the db. use pandas to align
everything and calculate a ration for the timing information.
5) print the results to the log file and to stdout.
"""
import shutil
import os
import sys
import argparse
import tempfile
import time
import re
import random
import numpy as np
from pandas import DataFrame, Series
from suite import REPO_PATH
DEFAULT_MIN_DURATION = 0.01
HEAD_COL="head[ms]"
BASE_COL="base[ms]"
class RevParseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
import subprocess
cmd = 'git rev-parse {}'.format(values)
rev_parse = subprocess.check_output(cmd, shell=True)
setattr(namespace, self.dest, rev_parse.strip())
parser = argparse.ArgumentParser(description='Use vbench to measure and compare the performance of commits.')
parser.add_argument('-H', '--head',
help='Execute vbenches using the currently checked out copy.',
dest='head',
action='store_true',
default=False)
parser.add_argument('-b', '--base-commit',
help='The commit serving as performance baseline ',
type=str, action=RevParseAction)
parser.add_argument('-t', '--target-commit',
help='The commit to compare against the baseline (default: HEAD).',
type=str, action=RevParseAction)
parser.add_argument('-m', '--min-duration',
help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION,
type=float,
default=0.01)
parser.add_argument('-o', '--output',
metavar="<file>",
dest='log_file',
help='Path of file in which to save the textual report (default: vb_suite.log).')
parser.add_argument('-d', '--outdf',
metavar="FNAME",
dest='outdf',
default=None,
help='Name of file to df.save() the result table into. Will overwrite')
parser.add_argument('-r', '--regex',
metavar="REGEX",
dest='regex',
default="",
help='Regex pat, only tests whose name matches the regext will be run.')
parser.add_argument('-s', '--seed',
metavar="SEED",
dest='seed',
default=1234,
type=int,
help='Integer value to seed PRNG with')
parser.add_argument('-n', '--repeats',
metavar="N",
dest='repeats',
default=3,
type=int,
help='Number of times to run each vbench, result value is the best of')
parser.add_argument('-c', '--ncalls',
metavar="N",
dest='ncalls',
default=3,
type=int,
help='Number of calls to in each repetition of a vbench')
parser.add_argument('-N', '--hrepeats',
metavar="N",
dest='hrepeats',
default=1,
type=int,
help='implies -H, number of times to run the vbench suite on the head commit.\n'
'Each iteration will yield another column in the output' )
parser.add_argument('-a', '--affinity',
metavar="a",
dest='affinity',
default=1,
type=int,
help='set processor affinity of process by default bind to cpu/core #1 only. '
'Requires the "affinity" or "psutil" python module, will raise Warning otherwise')
parser.add_argument('-u', '--burnin',
metavar="u",
dest='burnin',
default=1,
type=int,
help='Number of extra iteration per benchmark to perform first, then throw away. ' )
parser.add_argument('-S', '--stats',
default=False,
action='store_true',
help='when specified with -N, prints the output of describe() per vbench results. ' )
parser.add_argument('-q', '--quiet',
default=False,
action='store_true',
help='Suppress report output to stdout. ' )
def get_results_df(db, rev):
"""Takes a git commit hash and returns a Dataframe of benchmark results
"""
bench = DataFrame(db.get_benchmarks())
results = DataFrame(map(list,db.get_rev_results(rev).values()))
# Sinch vbench.db._reg_rev_results returns an unlabeled dict,
# we have to break encapsulation a bit.
results.columns = db._results.c.keys()
results = results.join(bench['name'], on='checksum').set_index("checksum")
return results
def prprint(s):
print("*** %s" % s)
def pre_hook():
import gc
gc.disable()
def post_hook():
import gc
gc.enable()
def profile_comparative(benchmarks):
from vbench.api import BenchmarkRunner
from vbench.db import BenchmarkDB
from vbench.git import GitRepo
from suite import BUILD, DB_PATH, PREPARE, dependencies
TMP_DIR = tempfile.mkdtemp()
try:
prprint("Opening DB at '%s'...\n" % DB_PATH)
db = BenchmarkDB(DB_PATH)
prprint("Initializing Runner...")
# all in a good cause...
GitRepo._parse_commit_log = _parse_wrapper(args.base_commit)
runner = BenchmarkRunner(
benchmarks, REPO_PATH, REPO_PATH, BUILD, DB_PATH,
TMP_DIR, PREPARE, always_clean=True,
# run_option='eod', start_date=START_DATE,
module_dependencies=dependencies)
repo = runner.repo # (steal the parsed git repo used by runner)
h_head = args.target_commit or repo.shas[-1]
h_baseline = args.base_commit
# ARGH. reparse the repo, without discarding any commits,
# then overwrite the previous parse results
# prprint ("Slaughtering kittens..." )
(repo.shas, repo.messages,
repo.timestamps, repo.authors) = _parse_commit_log(None,REPO_PATH,
args.base_commit)
prprint('Target [{}] : {}\n'.format(h_head, repo.messages.get(h_head, "")))
prprint('Baseline [{}] : {}\n'.format(h_baseline,
repo.messages.get(h_baseline, "")))
prprint("Removing any previous measurements for the commits.")
db.delete_rev_results(h_baseline)
db.delete_rev_results(h_head)
# TODO: we could skip this, but we need to make sure all
# results are in the DB, which is a little tricky with
# start dates and so on.
prprint("Running benchmarks for baseline [%s]" % h_baseline)
runner._run_and_write_results(h_baseline)
prprint("Running benchmarks for target [%s]" % h_head)
runner._run_and_write_results(h_head)
prprint('Processing results...')
head_res = get_results_df(db, h_head)
baseline_res = get_results_df(db, h_baseline)
ratio = head_res['timing'] / baseline_res['timing']
totals = DataFrame({HEAD_COL:head_res['timing'],
BASE_COL:baseline_res['timing'],
'ratio':ratio,
'name':baseline_res.name},
columns=[HEAD_COL, BASE_COL, "ratio", "name"])
totals = totals.ix[totals[HEAD_COL] > args.min_duration]
# ignore below threshold
totals = totals.dropna(
).sort("ratio").set_index('name') # sort in ascending order
h_msg = repo.messages.get(h_head, "")
b_msg = repo.messages.get(h_baseline, "")
print_report(totals,h_head=h_head,h_msg=h_msg,
h_baseline=h_baseline,b_msg=b_msg)
if args.outdf:
prprint("The results DataFrame was written to '%s'\n" % args.outdf)
totals.save(args.outdf)
finally:
# print("Disposing of TMP_DIR: %s" % TMP_DIR)
shutil.rmtree(TMP_DIR)
def profile_head_single(benchmark):
import gc
results = []
# just in case
gc.collect()
try:
from ctypes import cdll, CDLL
cdll.LoadLibrary("libc.so.6")
libc = CDLL("libc.so.6")
libc.malloc_trim(0)
except:
pass
N = args.hrepeats + args.burnin
results = []
try:
for i in range(N):
gc.disable()
d=dict()
try:
d = benchmark.run()
except KeyboardInterrupt:
raise
except Exception as e: # if a single vbench bursts into flames, don't die.
err=""
try:
err = d.get("traceback")
if err is None:
err = str(e)
except:
pass
print("{} died with:\n{}\nSkipping...\n".format(benchmark.name, err))
results.append(d.get('timing',np.nan))
gc.enable()
gc.collect()
finally:
gc.enable()
if results:
# throw away the burn_in
results = results[args.burnin:]
sys.stdout.write('.')
sys.stdout.flush()
return Series(results, name=benchmark.name)
# df = DataFrame(results)
# df.columns = ["name",HEAD_COL]
# return df.set_index("name")[HEAD_COL]
def profile_head(benchmarks):
print( "Performing %d benchmarks (%d runs each)" % ( len(benchmarks), args.hrepeats))
ss= [profile_head_single(b) for b in benchmarks]
print("\n")
results = | DataFrame(ss) | pandas.DataFrame |
#!/usr/bin/env python3
#
# Create model outputs with P.1203 software.
#
# Copyright 2018 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from itu_p1203.p1203Pv import P1203Pv
from itu_p1203.p1203Pq import P1203Pq
import pandas as pd
import yaml
import argparse
import json
import numpy as np
from tqdm import tqdm
tqdm.pandas()
DB_IDS = ['TR04', 'TR06', 'VL04', 'VL13']
ROOT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
def parse_mode3_features(pvs_id, features_mode3_path):
pvs_features = pd.read_csv(
os.path.join(
features_mode3_path,
pvs_id + '.csv')
)
return pvs_features
def calc_mode0_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]))
return P1203Pv.video_model_function_mode0(*pvs_features)
def calc_mode1_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]),
[],
float(row["iframe_ratio"]))
return P1203Pv.video_model_function_mode1(*pvs_features)
def calc_mode2_O22(row):
# check if fallback is needed
has_bitstream_data = "BS_TwoPercentQP1" in row.keys() and isinstance(row["BS_TwoPercentQP1"], str)
try:
avg_qp = eval(row["BS_TwoPercentQP1"])
except Exception as e:
has_bitstream_data = False
if has_bitstream_data:
frame_types = eval(row["types"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
int(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode2(*pvs_features)
else:
# tqdm.write("Switching back to Mode 1 for PVS {}, sample index {}".format(row["pvs_id"], row["sample_index"]))
return None
def calc_mode3_O22(row):
frame_types = eval(row["types"])
avg_qp = eval(row["BS_Av_QPBB"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
float(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode3(*pvs_features)
def calc_O46(O21, O22, device, stall_vec=[]):
l_buff = []
p_buff = []
if stall_vec:
for l, p in stall_vec:
l_buff.append(l)
p_buff.append(p)
pq_fun = P1203Pq(O21, O22, l_buff, p_buff, device)
return pq_fun.calculate()
def main(args):
db_data = pd.DataFrame()
O21_path = os.path.join(ROOT_PATH, 'data', 'O21.csv')
stalling_dir_path = os.path.join(ROOT_PATH, 'data', 'test_configs')
features_mode0_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode0.csv')
features_mode1_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode1.csv')
features_mode2_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode2')
features_mode3_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode3')
# read in data
# O21
O21_data = pd.read_csv(O21_path)
# stalling
yaml_per_db = {}
for db_id in DB_IDS:
yaml_per_db[db_id] = yaml.load(
open(os.path.join(stalling_dir_path, db_id + '-config.yaml')))
# read in from hdf-files if they exist, otherwise run pv-calc
if args.create_hdfs:
print('Calculating O22 scores for all modes ...')
# mode0 features
print('Reading mode 0 features ...')
mode0_features = pd.read_csv(features_mode0_path)
# mode1 features
print('Reading mode 1 features ...')
mode1_features = pd.read_csv(features_mode1_path)
# mode2 features
print('Reading mode 2 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode2 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode2_path, pvs_id + '.csv'))
if "BS_TwoPercentQP1" in pvs_data_all.keys():
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "quant", "coding_res", "display_res", "BS_TwoPercentQP1"
]].copy()
)
else:
# no bitstream data available
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "coding_res", "display_res"
]].copy()
)
mode2_features = pd.concat(list_of_dataframes_for_mode2, ignore_index=True)
# mode3 features
print('Reading mode 3 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode3 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode3_path, pvs_id + '.csv'))
list_of_dataframes_for_mode3.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "quant", "coding_res", "display_res", "BS_Av_QPBB"
]].copy()
)
mode3_features = pd.concat(list_of_dataframes_for_mode3, ignore_index=True)
# calc Pv
# mode0
print('Calculating mode 0 Pv')
mode0_features['O22'] = mode0_features.progress_apply(calc_mode0_O22, axis=1)
# mode1
print('Calculating mode 1 Pv')
mode1_features['O22'] = mode1_features.progress_apply(calc_mode1_O22, axis=1)
# mode2
print('Calculating mode 2 Pv')
mode2_features['O22'] = mode2_features.progress_apply(calc_mode2_O22, axis=1)
missing_values_indices = np.where(pd.isnull(mode2_features.O22))[0]
# go through each sample index that has no value yet
print('Re-calculating mode 2 Pv missing values')
for idx in tqdm(missing_values_indices):
# get required features from mode 1, ...
pvs_id = mode2_features.iloc[idx]['pvs_id']
sample_index = mode2_features.iloc[idx]['sample_index']
row = mode1_features.loc[(mode1_features["pvs_id"] == pvs_id) & (mode1_features["sample_index"] == sample_index)]
# and calculate Mode 1 score instead
mode1_O22 = calc_mode1_O22(row)
# overwrite data in Mode 2 data frame
# https://stackoverflow.com/a/43968774/435093
mode2_features.iat[idx, mode2_features.columns.get_loc("O22")] = mode1_O22
# mode3
print('Calculating mode 3 Pv')
mode3_features['O22'] = mode3_features.progress_apply(calc_mode3_O22, axis=1)
mode0_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode0')
mode1_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode1')
mode2_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode2')
mode3_features.to_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode3')
else:
if os.path.isfile(os.path.join(ROOT_PATH, "data_original", "save.h5")):
mode0_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode0')
mode1_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode1')
mode2_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode2')
mode3_features = pd.read_hdf(os.path.join(ROOT_PATH, "data_original", "save.h5"), key='mode3')
else:
print('No h5 file found, please rerun with -c flag')
quit()
# parse buffering data from -config.yaml
stalling_per_hrc = {}
for db_id in yaml_per_db:
for hrc_id in yaml_per_db[db_id]['hrcList']:
buffts = 0
buff_events = [] # ts, dur
for (event, ts) in yaml_per_db[db_id]['hrcList'][hrc_id]["eventList"]:
if event in ['stall', 'buffering']:
buff_events.append([buffts, ts])
else:
buffts += ts
stalling_per_hrc[hrc_id] = buff_events
pvss = mode1_features["pvs_id"].unique()
per_pvs_data = {}
print('Creating O21/22-json files ...')
O22_tocsv = | pd.DataFrame(columns=['pvs_id', 'mode', 'sample_index', 'O22']) | pandas.DataFrame |
"""
Visualize the sensor data for the De Morton exercises.
TODOs:
- Should df_vital and df_raw be merged after resampling?
"""
import logging
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
import context
from mhealth.utils.maths import split_contiguous
from mhealth.utils.context_info import dump_context
from mhealth.utils.commons import create_progress_bar
from mhealth.patient.imove_label_loader import merge_labels
from mhealth.utils.file_helper import ensure_dir, write_hdf
from mhealth.utils.plotter_helper import save_figure, setup_plotting
from mhealth.utils.commons import print_title, print_subtitle, setup_logging
# This list must contain at least one vital parameter and at least one.
# "raw" parameter. (Required by the lazy-loading mechanism)
DEFAULT_COLUMNS = [ "HR", "HRQ", "SpO2", "SpO2Q", "Activity",
"Classification", "QualityClassification",
"RespRate", "HRV", "AX", "AY", "AZ", "A" ]
# Update this list with metrics that should be made available.
METRICS_AT_50HZ = { "AX", "AY", "AZ", "A" }
METRICS_AT_01HZ = { "HR", "HRQ", "SpO2", "SpO2Q",
"BloodPressure", "BloodPerfusion", "Activity",
"Classification", "QualityClassification",
"RespRate", "HRV", "LocalTemp", "ObjTemp" }
logger = logging.getLogger("imove")
def select_df(df_vital, df_raw, column):
if column in METRICS_AT_50HZ:
return df_raw
if column in METRICS_AT_01HZ:
return df_vital
else:
msg = "Update METRICS_AT_01HZ or METRICS_AT_50HZ with: %s"
assert False, msg % column
###############################################################################
def read_data(data_dir, out_dir, columns, resample,
side="both", forced=False, n_pats=None,
pats=None, labels=None):
def _resample(df, resample, group):
if group == "vital" and (resample is None or resample<=1):
return df
if group == "raw" and (resample is None or resample<=(1/50)):
return df
if resample and resample>0:
if resample < 1:
offset = "%dms" % int(1000*resample)
else:
offset = "%ds" % resample
df = df.resample(offset).mean()
return df
def _derived_metrics_01Hz(df, group):
if group=="vital":
pass
return df
def _derived_metrics_50Hz(df, group):
if group=="raw":
df.loc[:, "A"] = np.linalg.norm(df[["AX", "AY", "AZ"]].values,
axis=1)
return df
def _set_timestamp_index(df, group):
# Before setting the index, introduce sub-second resolution.
# This applies mainly to the 50Hz data, for which the timestamps
# unfortunately are resolved only up to seconds.
# This could go to preprocessing as well, but will also increase
# (I suppose) the file sizes.
if group=="raw":
def _subseconds(series):
shifts = np.linspace(0,1,len(series), endpoint=False)
series += pd.to_timedelta(shifts, unit="second")
return series
tol = pd.Timedelta(seconds=1)
chunks = split_contiguous(df["timestamp"], tol=tol, inclusive=True)
chunks = map(_subseconds, chunks)
df["timestamp"] = pd.concat(chunks, axis=0)
elif group=="vital":
pass
else:
assert False, "This case is not implemented yet"
df = df.set_index("timestamp")
return df
def _split_by_sampling_rate(columns):
_01Hz, _50Hz = [], []
for col in columns:
if col in METRICS_AT_50HZ:
_50Hz.append(col)
elif col in METRICS_AT_01HZ:
_01Hz.append(col)
else:
msg = "Update METRICS_AT_01HZ or METRICS_AT_50HZ with: %s"
assert False, msg % col
return _01Hz, _50Hz
def _append_data_side(dfs, store, group, side, key,
pat_id, cols, resample):
if key in store:
df = store.get(key)
df = df.reset_index(drop=True)
df = _set_timestamp_index(df=df, group=group)
df = _derived_metrics_01Hz(df=df, group=group)
df = _derived_metrics_50Hz(df=df, group=group)
df = df[cols].copy()
# Only a bit slow (1-2s, if not a no-op)
df = _resample(df=df, resample=resample, group=group)
assert("exercises" in store)
# We cannot resample labels. Therefore re-extract the
# De Morton labels. Unfortunately, this step is slow.
df = merge_labels(df=df, df_labels=store.get("exercises"))
df["Side"] = side
df["Patient"] = pat_id
dfs.append(df)
def _append_data(dfs, store, group, side, pat_id, cols, resample):
assert group in ("vital", "raw")
if side in ("left", "both"):
key = f"{group}/left"
_append_data_side(dfs=dfs, store=store, group=group, side="left",
key=key, pat_id=pat_id, cols=cols,
resample=resample)
if side in ("right", "both"):
key = f"{group}/right"
_append_data_side(dfs=dfs, store=store, group=group, side="right",
key=key, pat_id=pat_id, cols=cols,
resample=resample)
else:
assert False
def _read_data_stores(data_dir, cols_01Hz, cols_50Hz,
resample, side, n_pats=None,
pats=None, labels=None):
"""
n_pats is ignored if pats is not None
"""
dfs_01Hz = []
dfs_50Hz = []
logger.info("Reading data...")
files = list(sorted(Path(data_dir).glob("*.h5")))
if len(files) == 0:
msg = "No files HDF stores found under path: %s"
raise RuntimeError(msg % data_dir)
prefix = "Patient {variables.pat_id:<3}... "
n_files = n_pats if n_pats is not None else len(files)
progress = create_progress_bar(label=None,
size=n_files,
prefix=prefix,
variables={"pat_id": "N/A"})
progress.start()
for i, path in enumerate(files):
pat_id = path.stem
if pats is not None and pat_id not in pats:
continue
elif n_pats is not None and n_pats and i>=n_pats:
break
progress.update(i, pat_id=pat_id)
store = pd.HDFStore(path, mode="r")
if cols_01Hz:
_append_data(dfs=dfs_01Hz, store=store,
group="vital", side=side,
pat_id=pat_id, cols=cols_01Hz,
resample=resample)
if cols_50Hz:
_append_data(dfs=dfs_50Hz, store=store,
group="raw", side=side,
pat_id=pat_id, cols=cols_50Hz,
resample=resample)
store.close()
progress.finish()
logger.info("Done!")
logger.info("Concatenating data...")
dfs_01Hz = [df for df in dfs_01Hz if df is not None]
dfs_50Hz = [df for df in dfs_50Hz if df is not None]
df_01Hz = None
df_50Hz = None
is_clipped = False
if dfs_01Hz:
df_01Hz = pd.concat(dfs_01Hz, axis=0)
if dfs_50Hz:
df_50Hz = pd.concat(dfs_50Hz, axis=0)
if labels is not None:
logger.info("Reading data for De Morton labels: %s",
", ".join(labels))
# This clips data: include sensor data only when exercising.
df_01Hz = df_01Hz[df_01Hz["DeMortonLabel"].isin(labels)]
df_50Hz = df_50Hz[df_50Hz["DeMortonLabel"].isin(labels)]
is_clipped = True
logger.info("Done!")
return df_01Hz, df_50Hz, is_clipped
def _read_data_lazily(out_dir, cols_01Hz, cols_50Hz,
n_pats=None, pats=None, labels=None,
mode="from_exercises"):
"""
Arguments:
mode: Two modes are available:
- "all": read store/demorton.h5
- "from_exercises": read from store/ex-*.h5
Note: demorton.h5 contains all data that was extracted by
preprocessing/extract_demorton_data.py. It allows to select
data ยฑmargin around the exercise sessions.
labels: Optional list of exercises/labels to include. Is ignored
if mode != "from_exercises"
"""
assert mode in ("all", "from_exercises")
is_clipped = False
if mode == "all":
filepath = out_dir / "store" / "demorton.h5"
if not filepath.is_file():
return None, None, False
logger.info("Reading data lazily...")
store = pd.HDFStore(filepath, mode="r")
df_vital = store["vital"]
df_raw = store["raw"]
store.close()
# We don't know how the data was stored.
is_clipped = "maybe"
if labels is not None:
logger.info("Reading data for De Morton labels: %s",
", ".join(labels))
df_vital = df_vital[df_vital["DeMortonLabel"].isin(labels)]
df_raw = df_raw[df_raw["DeMortonLabel"].isin(labels)]
is_clipped = True
if mode == "from_exercises":
files = sorted((out_dir / "store").glob("ex*.h5"))
if not files:
return None, None, False
dfs_vital = []
dfs_raw = []
if labels is not None:
logger.info("Lazy loading data for De Morton labels: %s",
", ".join(labels))
# In mode==from_exercises, the data is clipped, regardless of
# the value of labels. Clipped data means: sensor data only
# when patient is exercising.
is_clipped = True
for filepath in files:
label = filepath.stem
label = label.replace("ex-", "")
if labels is not None and label not in labels:
continue
store = pd.HDFStore(filepath, mode="r")
dfs_vital.append(store["vital"])
dfs_raw.append(store["raw"])
store.close()
df_vital = pd.concat(dfs_vital, axis=0)
df_raw = pd.concat(dfs_raw, axis=0)
pats_vital = df_vital["Patient"]
pats_unique = pats_vital.unique() # keeps order of appearance!
choice = None
if pats is not None:
diff = set(pats) - set(pats_unique)
if len(diff)>0 and labels is None:
msg = ("Requested patients cannot be loaded lazily.\n"
" Use flag --force-read to avoid this warning.\n"
" Missing patients: %s")
logger.warning(msg % ", ".join(diff))
elif len(diff)>0 :
msg = ("Requested patients cannot be loaded lazily. "
"Try flag --force-read to avoid this warning.\n"
" Note: It could be that no data is "
"available for the selected labels.\n"
" Missing patients: %s\n"
" Selected labels: %s")
logger.warning(msg, ", ".join(diff), ", ".join(labels))
choice = pats
elif n_pats is not None:
if len(pats_unique) < n_pats:
msg = ("WARNING: Requested %d patients, but the lazily loaded "
"data contains only data from %d patients.")
logger.warning(msg % (n_pats, len(pats_unique)))
choice = pats_unique[:n_pats]
if choice is not None:
df_vital = df_vital[pats_vital.isin(choice)]
pats_raw = df_raw["Patient"]
df_raw = df_raw[pats_raw.isin(choice)]
store.close()
if set(cols_01Hz) - set(df_vital.columns):
# Force re-reading.
df_vital = None
if set(cols_50Hz) - set(df_raw.columns):
# Force re-reading.
df_raw = None
logger.info("Done!")
return df_vital, df_raw, is_clipped
def _save_data(out_dir, df_vital, df_raw, split_exercises=True):
logger.info("Writing data...")
if split_exercises:
# This drops all data for which no De Morton label is set.
for label, dfv in df_vital.groupby("DeMortonLabel"):
filepath = out_dir / "store" / f"ex-{label}.h5"
write_hdf(df=dfv, path=filepath, key="vital")
for label, dfr in df_raw.groupby("DeMortonLabel"):
filepath = out_dir / "store" / f"ex-{label}.h5"
write_hdf(df=dfr, path=filepath, key="raw")
# This stores all data!
filepath = out_dir / "store" / "demorton.h5"
write_hdf(df=df_vital, path=filepath, key="vital")
write_hdf(df=df_raw, path=filepath, key="raw")
logger.info("Done!")
#######################################################
assert side in ("left", "right", "both")
columns_requested = columns
columns = set(DEFAULT_COLUMNS)
if columns_requested:
columns |= set(columns_requested)
columns = sorted(columns)
cols_01Hz, cols_50Hz = _split_by_sampling_rate(columns)
df_vital = df_raw = None
if not forced:
lazy_mode = "from_exercises" if labels else "all"
ret = _read_data_lazily(out_dir=out_dir,
cols_01Hz=cols_01Hz,
cols_50Hz=cols_50Hz,
n_pats=n_pats,
pats=pats,
labels=labels,
mode=lazy_mode)
df_vital, df_raw, is_clipped = ret
if df_vital is None or df_raw is None:
ret = _read_data_stores(data_dir=data_dir/"store",
cols_01Hz=cols_01Hz,
cols_50Hz=cols_50Hz,
resample=resample,
side=side,
n_pats=n_pats,
pats=pats,
labels=labels)
df_vital, df_raw, is_clipped = ret
# Save for lazy loading.
_save_data(out_dir=out_dir,
df_vital=df_vital,
df_raw=df_raw)
# --labels enables clipping.
if is_clipped == True:
logger.warning("Sensor data is clipped with exercise windows.")
elif is_clipped == "maybe":
logger.info("Lazily loaded sensor data is not clipped...")
logger.info("...unless it was clipped when creating the store.")
else:
logger.info("Sensor data is not clipped.")
df_ex = pd.read_csv(data_dir/"exercises.csv")
df_ex["Patient"] = df_ex["Patient"].map("{:03d}".format)
df_ex["StartDate"] = | pd.to_datetime(df_ex["StartDate"], utc=True) | pandas.to_datetime |
import os
import unittest
import httpagentparser
import pandas
from geolite2 import geolite2
from yieldify_exercise.etl_system import ETL_Metrics
class TestEtlSystem(unittest.TestCase):
"""
This class holds all the tests for each method within the elt_system class
"""
def setUp(self):
"""
This setup method allows us to setup an instance of our etl_metrics class and then call the individual methods
within it for our tests below.
"""
self.etl = ETL_Metrics()
self.etl.path = os.getcwd() + "/test/resources/test_input_data.gz"
def test_read_file(self):
"""
This test checks that the read_file method is working as it should by comparing it to the expected data.
"""
# Creating expected data
expected_data = [
{"date": "2014-10-12", "time": "17:01:01", "user": "f4fdd9e55192e94758eb079ec6e24b219fe7d71e",
"url": "http://92392832.2323.2323", "IP": "172.16.58.3",
"user_agent_string": "Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X)"},
{"date": "2014-10-12", "time": "17:01:01", "user": "0ae531264993367571e487fb486b13ea412aae3d",
"url": "http://95454545.2323.2323", "IP": "867.454.34.65",
"user_agent_string": "Mozilla/5.0 ((Linux; Android 4.4.2; GT-I9505 Build/KOT49H)"},
{"date": "2014-10-12", "time": "17:01:01", "user": "c5ac174ee1<PASSWORD>7e<PASSWORD>acf",
"url": "http://92354545.2323.2323", "IP": "172.16.31.10",
"user_agent_string": "Chrome/5.0 (Linux; Android 4.4.2; GT-I9505 Build/KOT49H)"},
{"date": "2014-10-12", "time": "17:01:01", "user": "2<PASSWORD>",
"url": "http://94857364.2323.2323", "IP": "192.168.127.12",
"user_agent_string": "Chrome/5.0 (iPad; CPU OS 7_1_2 like Mac OS X)"},
{"date": "2014-10-12", "time": "17:01:01", "user": "3938fffe5c0a131f51df5c4ce3128c5edaf572c8",
"url": "http://95937726.2323.2323", "IP": "192.34.563.64",
"user_agent_string": "Chrome/5.0 (iPad; CPU OS 7_1_2 like Mac OS X)"}
]
expected_df = pandas.DataFrame(expected_data)[["date", "time", "user", "url", "IP", "user_agent_string"]]
# Calling the read in method
self.etl.read_file()
# Asserting that the two dataframe are equal
assert (expected_df.equals(self.etl.df[["date", "time", "user", "url", "IP", "user_agent_string"]]))
def test_none_check(self):
"""
This test check whether the none_check returns what it is suppose to depending on the input parameters.
"""
reader = geolite2.reader()
# Asserting that the correct country comes out
ip_right = reader.get("172.16.58.3")
assert (self.etl.none_check(ip_right, "IP", "country") == "United Kingdom")
# Asserting that when there is no key with that name or similar the method returns none
assert (self.etl.none_check(ip_right, "IP", "browser") == None)
geolite2.close()
# Asserting the none_check gets the name correctly
user_agent_right = httpagentparser.detect(
"Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
{}).get("browser")
assert (self.etl.none_check(user_agent_right, "user_agent_str", "browser") == "Safari")
# Asserting that when a none value is passed for both method types, then none is returned back
value_none = None
assert (self.etl.none_check(value_none, "IP", "country") == None)
assert (self.etl.none_check(value_none, "user_agent_str", "browser") == None)
# Asserting that a differnet method type has been chosen, it will reutrned back none
assert (self.etl.none_check(value_none, "mutliply", "country") == None)
def test_check_ip(self):
"""
This test checks whether the check_ip method in the ETL class is producing the correct outputs for the given
parameters
"""
# Checking to see if there is no ip that the method returns none
only_unknown = "unknown"
assert (self.etl.check_ip(only_unknown) == None)
# Checking to see if there is a unknown first in the string then a ip ,that the ip is returned
first_unknown = "unknown, 172.16.58.3"
assert (self.etl.check_ip(first_unknown) == "172.16.58.3")
# Checking to see if there is a unknown second in the string then a ip, that the ip is returned
second_unknown = "172.16.58.3, unknown"
assert (self.etl.check_ip(second_unknown) == "172.16.58.3")
# Checking to see if there are only ip in the string then the first ip is returned
no_unknown = "172.16.58.3, 192.168.3.11"
assert (self.etl.check_ip(no_unknown) == "172.16.58.3")
def test_setup_data(self):
"""
This test checks that the setup_data method is converting IPs and user_agent_strings into their correct
components.
"""
## Creating the input data for that method
input_data = [
{"IP": '172.16.58.3, 192.168.3.11',
"user_agent_string": "Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53"},
{"IP": '192.168.3.11',
"user_agent_string": "Mozilla/5.0 (Linux; Android 4.4.2; GT-I9505 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.102 Mobile Safari/537.36"},
{"IP": '172.16.17.32',
"user_agent_string": "Mozilla/5.0 (Linux; Android 4.4.4; Nexus 7 Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.102 Safari/537.36"},
{"IP": '-', "user_agent_string": '-'}
]
self.etl.df = pandas.DataFrame(input_data)
# Running the method
self.etl.setup_data()
# Creating expected data
expected_data = [
{"IP": "172.16.58.3, 192.168.3.11",
"user_agent_string": "Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
"country": "United Kingdom", "city": "Jarrow", "browser_family": "Safari", "os_family": "iOS"},
{"IP": "192.168.3.11",
"user_agent_string": "Mozilla/5.0 (Linux; Android 4.4.2; GT-I9505 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.102 Mobile Safari/537.36",
"country": "United Kingdom", "city": "Manchester", "browser_family": "Chrome", "os_family": "Linux"},
{"IP": "172.16.17.32",
"user_agent_string": "Mozilla/5.0 (Linux; Android 4.4.4; Nexus 7 Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.102 Safari/537.36",
"country": "United Kingdom", "city": "Liverpool", "browser_family": "Chrome", "os_family": "Linux"},
{"IP": '-', "user_agent_string": '-', "country": None, "city": None, "browser_family": None,
"os_family": None}
]
expected_df = | pandas.DataFrame(expected_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from visions.application.summaries.functional import (
summarize_frame,
summarize_series,
summarize,
)
from visions.application.summaries.summary import CompleteSummary
from visions.core.implementations.types import visions_string, visions_integer
def test_summarize_frame():
df = pd.DataFrame(
{
"Brand": ["Honda Civic", "Toyota Corolla", "Ford Focus", "Audi A4"],
"Price": [22000, 25000, 27000, 35000],
},
columns=["Brand", "Price"],
)
summary = summarize_frame(
df, {"Brand": visions_string, "Price": visions_integer}, CompleteSummary()
)
summary.pop("type_counts")
assert summary == {
"n_observations": 4,
"n_variables": 2,
"memory_size": 430,
"na_count": 0,
"n_vars_missing": 0,
}
def test_summarize_series():
brand_series = | pd.Series(["Honda Civic", "Toyota Corolla", "Ford Focus", "Audi A4"]) | pandas.Series |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["ใ", "ใใใ", "ใ", "ใใใใใใ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na ใ 1\n"
"bb ใใใ 222\nc ใ 33333\n"
"ddd ใใใใใใ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 ใ\n"
"bb 222 ใใใ\nc 33333 ใ\n"
"ddd 4 ใใใใใใ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na ใใใใใ ใ\n"
"bb ใ ใใใ\nc ใ ใ\n"
"ddd ใใใ ใใใใใใ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"], "ใใใใใ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b ใใใใใ\na ใ 1\n"
"bb ใใใ 222\nc ใ 33333\n"
"ddd ใใใใใใ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["ใใใ", "ใใใใใใ", "ใใ", "ใ"],
)
expected = (
" a b\nใใใ ใใใใใ ใ\n"
"ใใใใใใ ใ ใใใ\nใใ ใ ใ\n"
"ใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=Index(["ใ", "ใ", "ใใ", "ใ"], name="ใใใใ"),
)
expected = (
" a b\n"
"ใใใใ \n"
"ใ ใใใใใ ใ\n"
"ใ ใ ใใใ\n"
"ใใ ใ ใ\n"
"ใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"ใใใ": ["ใใใ", "ใ", "ใ", "ใใใใใ"], "ใใใใใ": ["ใ", "ใใใ", "ใ", "ใใ"]},
index=Index(["ใ", "ใใใ", "ใใ", "ใ"], name="ใ"),
)
expected = (
" ใใใ ใใใใใ\n"
"ใ \n"
"ใ ใใใ ใ\n"
"ใใใ ใ ใใใ\n"
"ใใ ใ ใ\n"
"ใ ใใใใใ ใใ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("ใ", "ใใ"), ("ใ", "ใ"), ("ใใใ", "ใใใใ"), ("ใ", "ใใ")]
)
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=idx,
)
expected = (
" a b\n"
"ใ ใใ ใใใใใ ใ\n"
"ใ ใ ใ ใใใ\n"
"ใใใ ใใใใ ใ ใ\n"
"ใ ใใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"],
"b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"],
"c": ["ใ", "ใ", "ใใใ", "ใใใใใใ"],
"ใใใใ": ["ใ", "ใ", "ใ", "ใ"],
},
columns=["a", "b", "c", "ใใใใ"],
)
expected = (
" a ... ใใใใ\n0 ใใใใใ ... ใ\n"
".. ... ... ...\n3 ใใใ ... ใ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["ใใใ", "ใใใใ", "ใ", "aaa"]
expected = (
" a ... ใใใใ\nใใใ ใใใใใ ... ใ\n"
".. ... ... ...\naaa ใใใ ... ใ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["ใ", "ใใใ", "ใ", "ใใใใใใ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na ใ 1\n"
"bb ใใใ 222\nc ใ 33333\n"
"ddd ใใใใใใ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 ใ\n"
"bb 222 ใใใ\nc 33333 ใ\n"
"ddd 4 ใใใใใใ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a ใใใใใ ใ\n"
"bb ใ ใใใ\n"
"c ใ ใ\n"
"ddd ใใใ ใใใใใใ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"], "ใใใใใ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b ใใใใใ\n"
"a ใ 1\n"
"bb ใใใ 222\n"
"c ใ 33333\n"
"ddd ใใใใใใ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=["ใใใ", "ใใใใใใ", "ใใ", "ใ"],
)
expected = (
" a b\n"
"ใใใ ใใใใใ ใ\n"
"ใใใใใใ ใ ใใใ\n"
"ใใ ใ ใ\n"
"ใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=Index(["ใ", "ใ", "ใใ", "ใ"], name="ใใใใ"),
)
expected = (
" a b\n"
"ใใใใ \n"
"ใ ใใใใใ ใ\n"
"ใ ใ ใใใ\n"
"ใใ ใ ใ\n"
"ใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"ใใใ": ["ใใใ", "ใ", "ใ", "ใใใใใ"], "ใใใใใ": ["ใ", "ใใใ", "ใ", "ใใ"]},
index=Index(["ใ", "ใใใ", "ใใ", "ใ"], name="ใ"),
)
expected = (
" ใใใ ใใใใใ\n"
"ใ \n"
"ใ ใใใ ใ\n"
"ใใใ ใ ใใใ\n"
"ใใ ใ ใ\n"
"ใ ใใใใใ ใใ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("ใ", "ใใ"), ("ใ", "ใ"), ("ใใใ", "ใใใใ"), ("ใ", "ใใ")]
)
df = DataFrame(
{"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"], "b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"]},
index=idx,
)
expected = (
" a b\n"
"ใ ใใ ใใใใใ ใ\n"
"ใ ใ ใ ใใใ\n"
"ใใใ ใใใใ ใ ใ\n"
"ใ ใใ ใใใ ใใใใใใ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["ใใใใใ", "ใ", "ใ", "ใใใ"],
"b": ["ใ", "ใใใ", "ใ", "ใใใใใใ"],
"c": ["ใ", "ใ", "ใใใ", "ใใใใใใ"],
"ใใใใ": ["ใ", "ใ", "ใ", "ใ"],
},
columns=["a", "b", "c", "ใใใใ"],
)
expected = (
" a ... ใใใใ\n"
"0 ใใใใใ ... ใ\n"
".. ... ... ...\n"
"3 ใใใ ... ใ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["ใใใ", "ใใใใ", "ใ", "aaa"]
expected = (
" a ... ใใใใ\n"
"ใใใ ใใใใใ ... ใ\n"
"... ... ... ...\n"
"aaa ใใใ ... ใ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["ใ", "ใใใ", "ยกยก", "ใใใใใใ"], "ใใใใใ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ยกยกยก"],
)
expected = (
" b ใใใใใ\n"
"a ใ 1\n"
"bb ใใใ 222\n"
"c ยกยก 33333\n"
"ยกยกยก ใใใใใใ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = | DataFrame({"x": [0.19999]}) | pandas.DataFrame |
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def add_prices_n_returns(
prediction_result : Input[Dataset],
price_n_return_updated_dataset : Output[Dataset]
):
import pandas as pd
import FinanceDataReader as fdr
from multiprocessing import Pool
df_pred_all = pd.read_pickle(prediction_result.path)
date_start = sorted(df_pred_all.date.unique())[0]
codes_to_update = df_pred_all.code.unique().tolist()
def get_price_adj(code, start):
return fdr.DataReader(code, start=start)
def get_price(codes, date_start):
df_price = | pd.DataFrame() | pandas.DataFrame |
"""
Script to analyse text and gain insights into unstructured data such as Sentiment and Emotion.
The complete tutorial can be found at: https://sourcedexter.com/product-review-sentiment-analysis-with-ibm-nlu
Author: <NAME>
Twitter: @sourcedexter
Website: https://sourcedexter.com
Email: <EMAIL>
"""
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 import Features, EmotionOptions, SentimentOptions
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import operator
def read_csv_file(file_path):
"""
method to read a csv file and return an iterable object
:param file_path: path to the dataset file
:return: iterable object
"""
# read the file and store it as a dataframe
csv_iterator = pd.read_csv(file_path)
# print the number of rows found in the file:
num_rows, num_cols = csv_iterator.shape
print(f"the number of rows found in file: {num_rows}")
# print all the column headings
print("column headings from raw dataset: ", list(csv_iterator.columns.values))
return csv_iterator
def get_distribution(dataframe, target_column):
"""
method to find the distribution of a certain column in a given dataframe.
Shows the generated visualization to the user.
:param dataframe:
:param target_column: column upon which the distribution needs to be applied
:return: dictionary of unique values from target column and its count in the dataset.
"""
# get the count of unique products in the dataset
df_clean = dataframe[target_column].value_counts()
print("number of unique products found: ", len(df_clean.values))
# building a scatter plot to show the distribution of products
x = df_clean.values # the x axis shows the count of reviews per product
y = np.random.rand(len(df_clean.values)) # y axis does not have any significance here. so setting random values
z = df_clean.values # the size of each bubble in the scatter plot corresponds to the count of reviews.
# use the scatter function to create a plot and show it.
plt.scatter(x, y, s=z * 5, alpha=0.5)
plt.show()
# return the aggregation as a dictionary
return df_clean.to_dict()
def preprocess_data(dataset_file_path, features_included):
"""
:param dataset_file_path: path to the dataset
:param features_included: list of column names to keep. For example : ["name", "review.txt", "date"]
:return: python dict with product name as key and dataframe with reviews in date sorted order.
"""
# read the dataset file
csv_dataframe = read_csv_file(dataset_file_path)
# keep only those columns which we need
cleaned_frame = csv_dataframe[features_included]
# check to see if the column names are what we wanted
print("column headings from cleaned frame: ", list(cleaned_frame.columns.values))
# get the count of reviews for each product
distribution_result = get_distribution(cleaned_frame, "name")
# get the names of products who have more than 300 reviews
products_to_use = []
for name, count in distribution_result.items():
if count < 300:
products_to_use.append(name)
# get only those rows which have the products that we want to use for our analysis
cleaned_frame = cleaned_frame.loc[cleaned_frame['name'].isin(products_to_use)]
# data structure to store the individual product details dataframe
product_data_store = {}
for product in products_to_use:
# get all rows for the product
temp_df = cleaned_frame.loc[cleaned_frame["name"] == product]
# the date column is in string format, convert it to datetime
temp_df["date"] = pd.to_datetime(temp_df["reviews.date"])
# sort the reviews in reverse chronological order
temp_df.sort_values(by='date')
# store the dataframe to the product store
product_data_store[product] = temp_df.copy()
return product_data_store
def perform_text_analysis(text):
"""
method that accepts a piece of text and returns the results for sentiment analysis and emotion recognition.
:param text: string that needs to be analyzed
:return: dictionary with sentiment analysis result and emotion recognition result
"""
# initialize IBM NLU client
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2018-11-16',
iam_apikey='your_api_key_here',
url='https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
)
# send text to IBM Cloud to fetch analysis result
response = natural_language_understanding.analyze(text=text, features=Features(
emotion=EmotionOptions(), sentiment=SentimentOptions())).get_result()
return response
def aggregate_analysis_result(product_dataframe):
"""
method to analyse and aggregate analysis results for a given product.
:param product_dataframe: preprocessed dataframe for one product
:return:
"""
# data structure to aggregated result
product_analysis_data = {}
count = 0
print("shape of dataframe", product_dataframe.shape)
# iterate through the reviews in the dataframe row-wise
for row_index, row in product_dataframe.iterrows():
print(count + 1)
count += 1
review_text = row["reviews.text"]
date = row["reviews.date"]
# get the sentiment result.
analysis = perform_text_analysis(review_text)
sentiment_value = analysis["sentiment"]["document"]["score"]
# emotion of the text is the emotion that has the maximum value in the response.
# Example dict: {"joy":0.567, "anger":0.34, "sadness":0.8,"disgust":0.4}.
# in the dict above, the emotion is "Sadness" because it has the max value of 0.8
emotion_dict = analysis["emotion"]["document"]["emotion"]
# get emotion which has max value within the dict
emotion = max(emotion_dict.items(), key=operator.itemgetter(1))[0]
# check if review on date exists. if yes: update values, if no: create new entry in dict
if date in product_analysis_data:
product_analysis_data[date]["sentiment"].append(sentiment_value)
product_analysis_data[date]["emotion"].append(emotion)
else:
product_analysis_data[date] = {}
product_analysis_data[date]["sentiment"] = [sentiment_value]
product_analysis_data[date]["emotion"] = [emotion]
# find the average sentiment for each date and update the dict.
for date in product_analysis_data.keys():
sentiment_avg = sum(product_analysis_data[date]["sentiment"]) / len(
product_analysis_data[date]["sentiment"])
product_analysis_data[date]["sentiment"] = sentiment_avg
return product_analysis_data
def visualize_sentiment_data(prod_sentiment_data):
"""
takes in the sentiment data and produces a time series visualization.
:param prod_sentiment_data:
:return: None. visualization is showed
"""
# to visualize, we will build a data frame and then plot the data.
# initialize empty dataframe with columns needed
df = pd.DataFrame(columns=["date", "value"])
# add data to the data frame
dates_present = prod_sentiment_data.keys()
for count, date in enumerate(dates_present):
df.loc[count] = [date, prod_sentiment_data[date]["sentiment"]]
# set the date column as a datetime field
df["date"] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import os
from datetime import datetime, timedelta
from decimal import getcontext, ROUND_UP, Decimal
from pathlib import Path
import runpy
import sys
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from _pytest.pytester import Testdir
from py._path.local import LocalPath
from flaky_tests_detection.check_flakes import (
calc_fliprate,
calculate_n_days_fliprate_table,
calculate_n_runs_fliprate_table,
get_image_tables_from_fliprate_table,
get_top_fliprates,
non_overlapping_window_fliprate,
parse_junit_to_df,
)
def create_long_test_history_df() -> pd.DataFrame:
time_format = "%Y-%m-%d %H:%M:%S"
timestamp = datetime.strptime("2021-07-01 07:00:00", time_format)
test_id1 = "test1"
test_id2 = "test2"
timestamps = []
test_identifiers = []
test_statutes = []
for index in range(1, 101):
timestamps.append(timestamp + timedelta(days=index))
if index % 2 == 0:
test_identifiers.append(test_id2)
test_statutes.append("pass")
else:
test_identifiers.append(test_id1)
if index % 11 == 0:
test_statutes.append("fail")
else:
test_statutes.append("pass")
df = pd.DataFrame(
{
"timestamp": timestamps,
"test_identifier": test_identifiers,
"test_status": test_statutes,
}
)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df = df.set_index("timestamp").sort_index()
return df
def create_test_history_df() -> pd.DataFrame:
"""Create some fake test history.
test1 is flaky.
"""
timestamps = [
"2021-07-01 07:00:00",
"2021-07-01 07:00:00",
"2021-07-01 08:00:00",
"2021-07-01 08:00:00",
"2021-07-02 07:00:00",
"2021-07-02 07:00:00",
"2021-07-02 08:00:00",
"2021-07-02 08:00:00",
"2021-07-03 07:00:00",
"2021-07-03 07:00:00",
"2021-07-03 08:00:00",
"2021-07-03 08:00:00",
"2021-07-03 09:00:00",
]
test_identifiers = [
"test1",
"test2",
"test1",
"test2",
"test1",
"test2",
"test1",
"test2",
"test1",
"test2",
"test1",
"test2",
"test1",
]
test_statutes = [
"pass",
"pass",
"fail",
"pass",
"pass",
"pass",
"fail",
"pass",
"pass",
"pass",
"pass",
"pass",
"fail",
]
df = pd.DataFrame(
{
"timestamp": timestamps,
"test_identifier": test_identifiers,
"test_status": test_statutes,
}
)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df = df.set_index("timestamp").sort_index()
return df
def create_fliprate_table_by_days() -> pd.DataFrame:
"""Create a fliprate table for tests with grouping by days"""
fliprate_table = pd.DataFrame(
{
"timestamp": pd.to_datetime(
[
"2021-07-01",
"2021-07-01",
"2021-07-01",
"2021-07-02",
"2021-07-02",
"2021-07-02",
"2021-07-03",
"2021-07-03",
"2021-07-03",
]
),
"test_identifier": [
"test1",
"test2",
"test3",
"test1",
"test2",
"test3",
"test1",
"test2",
"test3",
],
"flip_rate": [0.0, 0.0, 0.5, 1.0, 0.0, 0.0, 0.5, 0.0, 0.3],
"flip_rate_ewm": [0.0, 0.0, 0.5, 0.95, 0.0, 0.5, 0.7, 0.0, 0.2],
}
)
return fliprate_table
def create_fliprate_table_by_runs() -> pd.DataFrame:
"""Create a fliprate table for tests with grouping by runs"""
fliprate_table = pd.DataFrame(
{
"test_identifier": ["test1", "test2", "test1", "test2", "test1", "test2"],
"window": [1, 1, 2, 2, 3, 3],
"flip_rate": [0.0, 0.0, 1.0, 0.0, 0.5, 0.0],
"flip_rate_ewm": [0.0, 0.0, 0.95, 0.0, 0.7, 0.0],
}
)
return fliprate_table
@pytest.mark.parametrize(
"test_input,expected",
[
(["pass"], 0.0),
(["fail", "fail"], 0.0),
(["pass", "fail", "fail"], 0.5),
([0, 1, 0, 1], 1),
],
)
def test_calc_fliprate(test_input, expected):
"""Test fliprate calculation for different test histories"""
test_results = pd.Series(test_input)
assert calc_fliprate(test_results) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
(
(["pass", "pass", "fail", "pass", "pass", "fail"], 2, 3),
([1, 2, 3], [0.0, 1.0, 1.0]),
),
(
(["pass", "pass"], 2, 1),
([1], [0.0]),
),
(
([0], 15, 1),
([1], [0.0]),
),
(
(["fail", "fail"], 2, 5),
([5], [0.0]),
),
],
)
def test_non_overlapping_window_fliprate(test_input, expected):
"""Test different window fliprate calculations"""
test_results = pd.Series(test_input[0])
window_size = test_input[1]
window_count = test_input[2]
result = non_overlapping_window_fliprate(test_results, window_size, window_count)
expected_result = pd.Series(index=expected[0], data=expected[1])
assert_series_equal(result, expected_result)
def test_calculate_n_days_fliprate_table():
"""Test calculation of the fliprate table with valid daily grouping settings.
Ignore checking correctness of flip_rate and flip_rate_ewm numeric values.
"""
df = create_test_history_df()
result_fliprate_table = calculate_n_days_fliprate_table(df, 1, 3)
# check correct columns
assert list(result_fliprate_table.columns) == [
"timestamp",
"test_identifier",
"flip_rate",
"flip_rate_ewm",
]
result_fliprate_table = result_fliprate_table.drop(["flip_rate", "flip_rate_ewm"], axis=1)
expected_fliprate_table = pd.DataFrame(
{
"timestamp": pd.to_datetime(
[
"2021-07-01",
"2021-07-02",
"2021-07-03",
]
),
"test_identifier": ["test1", "test1", "test1"],
},
index=[0, 2, 4],
)
# check other than fliprate values correctness
assert_frame_equal(result_fliprate_table, expected_fliprate_table)
def test_calculate_n_runs_fliprate_table():
"""Test calculation of the fliprate table with valid grouping by runs settings.
Ignore checking correctness of flip_rate and flip_rate_ewm numeric values.
"""
df = create_test_history_df()
result_fliprate_table = calculate_n_runs_fliprate_table(df, 2, 3)
# check correct columns
assert list(result_fliprate_table.columns) == [
"test_identifier",
"window",
"flip_rate",
"flip_rate_ewm",
]
result_fliprate_table = result_fliprate_table.drop(["flip_rate", "flip_rate_ewm"], axis=1)
expected_fliprate_table = pd.DataFrame(
{
"test_identifier": ["test1", "test1", "test1"],
"window": [1, 2, 3],
}
)
# check other than fliprate values correctness
assert_frame_equal(result_fliprate_table, expected_fliprate_table)
def test_no_zero_score_from_day_windows():
df = create_test_history_df()
result_fliprate_table = calculate_n_days_fliprate_table(df, 1, 3)
expected_fliprate_table = pd.DataFrame(
{
"timestamp": pd.to_datetime(
[
"2021-07-01",
"2021-07-02",
"2021-07-03",
]
),
"test_identifier": ["test1", "test1", "test1"],
"flip_rate": [1.0, 1.0, 0.5],
"flip_rate_ewm": [1.0, 1.0, 0.95],
},
index=[0, 2, 4],
)
# check other than fliprate values correctness
| assert_frame_equal(result_fliprate_table, expected_fliprate_table) | pandas.testing.assert_frame_equal |
from pathlib import Path
import pytest
import numpy as np
import pandas as pd
from jinja2 import Template
from ploomber.clients import SQLAlchemyClient
from ploomber import testing
def test_can_check_nulls(tmp_directory):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = pd.DataFrame({'no_nas': [1, 2, 1], 'nas': [1, np.nan, 1]})
df.to_sql('my_table', client.engine)
assert not testing.sql.nulls_in_columns(client, ['no_nas'], 'my_table')
assert testing.sql.nulls_in_columns(client, ['nas'], 'my_table')
assert testing.sql.nulls_in_columns(client, ['no_nas', 'nas'], 'my_table')
def test_can_check_distinct(tmp_directory):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = pd.DataFrame({'no_nas': [1, 2, 1], 'nas': [1, np.nan, 1]})
df.to_sql('my_table', client.engine)
assert (testing.sql.distinct_values_in_column(client, 'no_nas',
'my_table') == {1, 2})
assert (testing.sql.distinct_values_in_column(client, 'nas',
'my_table') == {1.0, None})
def test_can_check_duplicates(tmp_directory):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = pd.DataFrame({'duplicates': [1, 1], 'no_duplicates': [1, 2]})
df.to_sql('my_table', client.engine)
assert not testing.sql.duplicates_in_column(client, 'no_duplicates',
'my_table')
assert testing.sql.duplicates_in_column(client, 'duplicates', 'my_table')
# check cols that have duplicates but do not have duplicate pairs
df = pd.DataFrame({'a': [1, 1, 1], 'b': [1, 2, 3]})
df.to_sql('another_table', client.engine)
assert not testing.sql.duplicates_in_column(client, ['a', 'b'],
'another_table')
@pytest.mark.parametrize('stats', [False, True])
def test_assert_no_duplicates_in_column(tmp_directory, stats):
client = SQLAlchemyClient('sqlite:///' + str(Path(tmp_directory, 'db.db')))
df = | pd.DataFrame({'a': [1, 1], 'b': [1, 2]}) | pandas.DataFrame |
import math
import sys
import pandas as pd
import plotly.express as px
import os
import json
if __name__ == '__main__':
rootpath = ""
while not os.path.isdir(rootpath):
rootpath = input("Enter root of discord data: ") + "/messages"
timezone = input("Enter time Zone, empty for UTC (this wont be checked): ") or "UTC"
combined = pd.Series([])
channels = {}
channellist = []
guilds = {}
guildlist = []
for root, dirs, files in os.walk(rootpath):
for filename in files:
if filename == "channel.json":
with open(os.path.join(root, filename), 'r', encoding='UTF-8') as channelfile:
channeldata = json.load(channelfile)
if "guild" in channeldata:
channellist.append(channeldata)
guilds[channeldata["guild"]["id"]] = channeldata["guild"]
selection = None
i = 0
for guildid in guilds:
print("%d: %s"%(i + 1, guilds[guildid]["name"]))
guildlist.append(guilds[guildid])
i += 1
while selection == None:
try:
selection = guildlist[int(input("Select Guild Nr.: ")) - 1]["id"]
except:
()
print("calculating...")
i = 0
for channel in channellist:
if channel["guild"]["id"] == selection:
with open(os.path.join(rootpath, channel["id"], "messages.csv"), newline='') as csvfile:
try:
data = pd.read_csv(csvfile, parse_dates=[1])["Timestamp"].dt.tz_convert(timezone)
hours = data.dt.hour
minutes = data.dt.minute
prepared = minutes.combine(hours, lambda a, b: a + b * 60).value_counts().sort_index()
prepared.index = prepared.index.map(
lambda a: pd.Timedelta(seconds=(a * 60)) + pd.to_datetime('1970/01/01'))
prepared.dropna()
# print(prepared.index.max())
combined = prepared.combine(other=combined, func=lambda a, b: a + b, fill_value=0)
channels[channel["name"]] = prepared.rolling('60min').mean()
except:
print("couldn't parse data for channel " + channel["name"] + " cause: " + str(sys.exc_info()[0]))
print(sys.exc_info()[0])
#print("%d/%d"%(i, len(channellist)), end="\r")
i += 1
sys.stdout.write("\r%d/%d"%(i, len(channellist)))
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
channels["total"] = combined.rolling('60min').mean()
finaldata = | pd.concat(channels, axis=1) | pandas.concat |
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
studyids=box.getredcapids()
hcpa=box.getredcapfields(fieldlist=['misscat','data_status'],study='hcpa')
ssaga=box.getredcapfields(fieldlist=[],study='ssaga')
check=pd.merge(hcpa[['flagged','gender','site','study','subject','misscat___8','data_status']],ssaga[['hcpa_id','subject']],on='subject',how='outer',indicator=True)
check.loc[(check._merge=='left_only') & (check.flagged.isnull()==True) & (check.misscat___8=='0')][['site','subject','data_status']]
#SEE SSAGA TRACKING EMAIL FROM CINDY 3/31/20
##########################
#nothing new in corrected this round.
#stopped here -- keeping code below to pick up where left off...which is ...not at all
#figure out the ids we have and see what we need to pull from elsewhere
#These are the subjects we're expecting (misscat___3=1 for perm missing toolb0x)
hcpa=box.getredcapfields(fieldlist=['misscat', 'data_status'], study='hcpa')
hcpa=hcpa.loc[hcpa.flagged.isnull()==True].copy()
hcpa=hcpa.loc[(hcpa.data_status.isin(['1','2'])) & (~(hcpa.misscat___3=='1'))].copy()
hcpd=box.getredcapfields(fieldlist=['misscat', 'data_status'], study='hcpdchild')
hcpd18=box.getredcapfields(fieldlist=['misscat', 'data_status'], study='hcpd18')
hcpdparent=box.getredcapfields(fieldlist=[], study='hcpdparent')
hcpdparent=hcpdparent[['parent_id','subject','study']].rename(columns={'subject':'child_id'})
hcpd=pd.concat([hcpd,hcpd18],axis=0)
hcpd=hcpd.loc[hcpd.flagged.isnull()==True].copy()
hcpd=hcpd.loc[(hcpd.data_status.isin(['1','2'])) & (~(hcpd.misscat___3=='1'))].copy()
#Harvard
Harv=82803734267
Harvattn=96013516511
Harvcorr=84800505740
harvcleandata,harvcleanscores=box2dataframe(fileid=Harv)
len(harvcleandata.PIN.unique()) #551
len(harvcleanscores.PIN.unique()) #551
H=pd.DataFrame(harvcleanscores.PIN.unique(),columns={"PIN"})
H['site']='Harvard'
MGH2=82761770877
MGHattn=96148925420
MHGcorr=84799213727
mghdata,mghscores=box2dataframe(fileid=MGH2)
len(mghdata.PIN.unique())
len(mghscores.PIN.unique()) #230 in each now
M=pd.DataFrame(mghscores.PIN.unique(),columns={"PIN"})
M['site']='MGH'
WashUD=82804015457
WashUDattn=96147128675
WUDcorr=84801037257
wuddata,wudscores=box2dataframe(fileid=WashUD) #301 in each now
len(wuddata.PIN.unique())
len(wudscores.PIN.unique())
WD=pd.DataFrame(wudscores.PIN.unique(),columns={"PIN"})
WD['site']='WashUD'
WashUA=82804729845
WashUAattn=96149947498
WUAcorr=84799623206
wuadata,wuascores=box2dataframe(fileid=WashUA)
len(wuadata.PIN.unique()) #238 in each now
len(wuascores.PIN.unique()) #238 in each now
WA=pd.DataFrame(wuascores.PIN.unique(),columns={"PIN"})
WA['site']='WashUA'
UMNA=82803665867
UMNAattn=96153923311
UMNAcorr=84799599800
umnadata,umnascores=box2dataframe(fileid=UMNA)
len(umnadata.PIN.unique()) #288 in each now
len(umnascores.PIN.unique()) #288 in each now
UMA=pd.DataFrame(umnascores.PIN.unique(),columns={"PIN"})
UMA['site']='UMNA'
UMND=82805151056
UMNDattn=96155708581
UMNDcorr=84799525828
umnddata,umndscores=box2dataframe(fileid=UMND)
len(umnddata.PIN.unique()) #270 in each now
len(umndscores.PIN.unique()) #270 in each now
UMD=pd.DataFrame(umndscores.PIN.unique(),columns={"PIN"})
UMD['site']='UMND'
UCLAA=82807223120
UCLAAattn=96154919803
UCLAAcorr=84799075673
uclaadata,uclaascores=box2dataframe(fileid=UCLAA)
len(uclaadata.PIN.unique()) #207
len(uclaascores.PIN.unique())
UCA=pd.DataFrame(uclaascores.PIN.unique(),columns={"PIN"})
UCA['site']='UCLAA'
UCLAD=82805124019
UCLADattn=96162759127
UCLADcorr=84800272537
ucladdata,ucladscores=box2dataframe(fileid=UCLAD)
len(ucladdata.PIN.unique()) #350
len(ucladscores.PIN.unique())
UCD=pd.DataFrame(ucladscores.PIN.unique(),columns={"PIN"})
UCD['site']='UCLAD'
allcurated=pd.concat([H,M,WD,WA,UMA,UMD,UCA,UCD],axis=0)
###########################################
ucladdata,ucladscores
uclaadata,uclaascores
umnddata,umndscores
umnadata,umnascores
wuadata,wuascores
wuddata,wudscores
mghdata,mghscores
harvcleandata,harvcleanscores
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
#raw:
allrawdataHCAorBoth=pd.concat([uclaadata,umnadata,wuadata,mghdata],axis=0)
allrawdataHCD=pd.concat([ucladdata,umnddata,wuddata,harvcleandata],axis=0)
#scores:
allscoresHCAorBoth=pd.concat([uclaascores,umnascores,wuascores,mghscores],axis=0)
allscoresHCD=pd.concat([ucladscores,umndscores,wudscores,harvcleanscores],axis=0)
######################
#make csv
allrawdataHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allrawdataHCD.to_csv(box_temp+'/HCD_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allscoresHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
allscoresHCD.to_csv(box_temp+'/HCD_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
##############################
def box2dataframe(fileid):
harvardfiles, harvardfolders = foldercontents(fileid)
data4process = harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE') == True)]
scores4process = harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE') == True]
data4process=data4process.reset_index()
scores4process = scores4process.reset_index()
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
harvcleandata = pd.read_csv(box_temp+'/'+ data4process.filename[0], header=0, low_memory=False)
harvcleanscores = pd.read_csv(box_temp+'/'+ scores4process.filename[0], header=0, low_memory=False)
return harvcleandata,harvcleanscores
def inventory(hdata,hscores,study,site,v):
curated = findpairs(hdata, hscores) # this is the list of ids in both scored and raw corrected data
curatedDF = pd.DataFrame(curated, columns={'PIN'})
curatedDF[['subject','visit']]=curatedDF.PIN.str.split("_",1,expand=True)
curatedvisit=curatedDF.loc[curatedDF.visit==v]
curatedvisit=pd.merge(curatedvisit,study.loc[study.site==site],on='subject',how='outer',indicator=True)
findelsewhere=curatedvisit.loc[curatedvisit._merge=='right_only'] #these are the ones that I need to get from endpoint
return findelsewhere[['subject','visit','site','study']]
def grabfromallsites(pdfind,pdscores,pddata,pairs):
catscores=pd.DataFrame()
catdata=pd.DataFrame()
pdfind['PIN']=pdfind['subject']+'_V1'
grablist=pdfind.PIN.unique()
for pinno in grablist:
if pinno in pairs:
print("Found PIN:" + pinno)
catscores=pd.concat([catscores,pdscores.loc[pdscores.PIN==pinno]],axis=0)
catdata = pd.concat([catdata,pddata.loc[pddata.PIN == pinno]],axis=0)
return catscores,catdata
def sendtocorrected(scoresfound,datafound,fname,fnumber):
scoresfound.to_csv(box_temp+'/'+fname+'_scores_'+snapshotdate+'.csv',index=False)
box.upload_file(box_temp+'/'+fname+'_scores_'+snapshotdate+'.csv',fnumber)
datafound.to_csv(box_temp+'/'+fname+'_data_'+snapshotdate+'.csv',index=False)
box.upload_file(box_temp+'/'+fname+'_data_'+snapshotdate+'.csv',fnumber)
def curatedandcorrected(curatedfolderid,needsattnfolder):
harvardfiles, harvardfolders=foldercontents(curatedfolderid)
#dont grab files that need attention
harvardfolders=harvardfolders.loc[~(harvardfolders.foldername.str.contains('needs_attention'))]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfolders.foldername,harvardfolders.folder_id)
harvardfiles=pd.concat([harvardfiles,harvardfiles2],axis=0,sort=True)
data4process=harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE')==True]
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
#trick the catcontents macro to create catable dataset, but dont actually cat until you remove the
#PINS in the corrected file from the curated file
#step1 - separate data4process/scores4process into corrected and old curated data
cdata=data4process.loc[data4process.filename.str.contains('corrected')]
cscores=scores4process.loc[scores4process.filename.str.contains('corrected')]
olddata=data4process.loc[~(data4process.filename.str.contains('corrected'))]
oldscores=scores4process.loc[~(scores4process.filename.str.contains('corrected'))]
#create catable dataset for corrected data
hdatainitcorr=catcontents(cdata,box_temp)
hscoreinitcorr=catcontents(cscores,box_temp)
#get list of ids in this corrected data #60 for Harvard
corrl=findpairs(hdatainitcorr,hscoreinitcorr) #this is the list of ids in both scored and raw corrected data
#create catable dataset for old curated data
hdatainitold=catcontents(olddata,box_temp)
hscoreinitold=catcontents(oldscores,box_temp)
#remove the data with PINS from corrected
hdatainitoldsub=hdatainitold[~(hdatainitold.PIN.isin(corrl))]
hscoreinitoldsub=hscoreinitold[~(hscoreinitold.PIN.isin(corrl))]
#now cat the two datasets together
hdatainit= | pd.concat([hdatainitcorr,hdatainitoldsub],axis=0,sort=True) | pandas.concat |
__author__ = "<NAME>"
__version__ = ".2"
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
class MetricsFunctions:
def average_los_in_es_shelter(self, entries_df, cleaned=False):
"""
Used For:
:param entries_df:
:return:
"""
stays = entries_df[
(
entries_df["Entry Exit Provider Id"].str.contains("Hansen") |
entries_df["Entry Exit Provider Id"].str.contains("Columbia") |
entries_df["Entry Exit Provider Id"].str.contains("Willamette") |
entries_df["Entry Exit Provider Id"].str.contains("SOS") |
entries_df["Entry Exit Provider Id"].str.contains("5th")
)
]
stays["Entry Date"] = pd.to_datetime(stays["Entry Exit Entry Date"]).dt.date
stays["Exit Date"] = | pd.to_datetime(stays["Entry Exit Exit Date"]) | pandas.to_datetime |
"""<NAME>., 2019 - 2020. All rights reserved."""
import datetime
import fnmatch
import subprocess
import os
import re
import sys
import time
import pandas as pd
import extractor_log as cl
LOG = cl.get_logger()
DELTA_BODY = []
UID_LIST = []
FILE_TYPE = ["JAVA", "CPP", "C", "CS", "PY", "TS", "JS"] # pragma: no mutate
def get_file_names(dir_path):
""" Function used for getting all the valid file names from the given directory
@parameters
dir_path: Path to the repository
@return
This function returns all the files in the given directory"""
path_list = [os.path.join(dirpath, filename) for dirpath, _, filenames in
os.walk(dir_path) for filename in filenames]
return path_list
def filter_reg_files(allfiles, reg_pattern):
""" Function used to filter requested file patterns
from the files in the given directory
@parameters
allfiles: list of all files in the repository
@return
This function returns filtered files in the given directory"""
cmd = "" # pragma: no mutate
regex, filtered_files = [], []
if reg_pattern is None:
filtered_files = allfiles
else:
reg_pattern = reg_pattern.split(",")
for i in range(len(reg_pattern).__trunc__()):
cmd = "{} " + cmd # pragma: no mutate
regex.append(fnmatch.translate(reg_pattern[i]))
cmd = "(" + cmd[:-1].replace(" ", "|") + ")" # pragma: no mutate
re_obj = re.compile(cmd.format(*regex))
[filtered_files.append(allfiles[i]) if
re.match(re_obj, allfiles[i]) is None else None for i in range(len(allfiles))]
return filtered_files
def run_ctags_cmd(file_ext, file_names, find):
""" Function to execute ctags command
@parameters
file_ext: file type(.py, .java etc)
file_names: path to file
find: keyword to run ctags command
@return
This function returns ctags output"""
if file_ext.upper() == "PY":
cmd = 'ctags -x "%s"' % file_names # pragma: no mutate
elif file_ext.upper() in ["TS", "JS"]: # pragma: no mutate
cmd = 'ctags --language-force=java -x "%s" | grep %s' % (file_names, find) # pragma: no mutate
else:
cmd = 'ctags -x "%s" | grep %s' % (file_names, find) # pragma: no mutate
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
return proc
def get_function_names(file_names):
""" Function to get method/function names from the input files in the given repository
@parameters
file_names: Path to the file
@return
This function returns function/method names and line numbers of all the given files"""
file_ext = file_names.split('.')[-1].upper()
find = "function" if file_ext.upper() == "CPP" or file_ext.upper() == "C" \
else ["member", "function", "class"] if file_ext.upper() == "PY" else "method" # pragma: no mutate
proc = run_ctags_cmd(file_ext, file_names, find)
process = str(proc.stdout.read(), 'utf-8') # pragma: no mutate
return process_function_names(process, find)
def process_function_names(func_data, find):
""" This function cleans the ctags output to get function/method names and line numbers
@parameters
func_data: Ctags output
find: keyword of method type(member/function/class/method)
@return
This function returns list of function names and line numbers"""
if func_data is not None:
process_list = re.findall(r'\w+', func_data)
if find == ["member", "function", "class"]: # pragma: no mutate
val = [index for index, _ in enumerate(process_list) if
process_list[index - 1] in find and process_list[index].isdigit()]
else:
val = process_ctags_output(find, process_list)
function_list = get_sorted_func_list(process_list, val)
line_numbers = get_func_line_num_list(process_list, val)
line_numbers.sort()
else:
print("Input files doesn't have valid methods") # pragma: no mutate
sys.exit(1) # pragma: no mutate
return function_list, line_numbers
def process_ctags_output(find, process_list):
""" This function cleans the ctags output to get function/method names and line numbers
@parameters
process_list: Ctags output in list format
find: keyword of method type(member/function/class/method)
@return
This function returns list of function names and line numbers"""
return [index for index, _ in enumerate(process_list) if
process_list[index - 1] == find and process_list[index].isdigit()]
def get_sorted_func_list(process_list, val):
""" This function sorts function names with respective to line numbers
@parameters
process_list: Ctags output in list format
val: filtered variable to get function name
@return
This function returns list of sorted function names based on line numbers"""
return [val for _, val in
sorted(zip(get_func_line_num_list(process_list, val), get_func_list(process_list, val)))]
def get_func_list(process_list, val):
""" This function refines the ctags output to get function names
@parameters
process_list: Ctags output in list format
val: filtered variable to get function name
@return
This function returns list of function"""
function_list = []
[function_list.append(process_list[val[i] - 2]) for i in range(len(val))]
return function_list
def get_func_line_num_list(process_list, val):
""" This function refines the ctags output to get function line numbers
@parameters
process_list: Ctags output in list format
val: filtered variable to get function name
@return
This function returns list of function line numbers"""
line_numbers = []
[line_numbers.append(int(process_list[val[i]])) for i in range(len(val))]
return line_numbers
def check_annot(filename, line_num, annot):
""" Function checks for the annotation condition
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
@return
This function returns function/method definitions that has the given annotation"""
ret_val = None
if annot is None:
ret_val = get_func_body(filename, line_num)
else:
ret_val = get_annot_methods(filename, line_num, annot)
return ret_val
def get_file_content(filename):
""" Function reads the given file
@parameters
filename: Path to the file
@return
This function returns content of the file inputed"""
with open(filename, encoding='utf-8', errors='ignore') as file_data: # pragma: no mutate
return file_data.readlines()
def get_annot_methods(filename, line_num, annot):
""" This function gets the methods that have given annotation
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
@return
This function returns function/method definitions that has the given annotation"""
file_content = get_file_content(filename)
iterator = int(line_num) - 2 # Iterating through lines to check for annotations
try:
for _ in range(int(line_num) - 2): # pragma: no mutate
data = str(file_content[iterator]).strip()
iterator = iterator - 1
ret_val = process_annot_method_body(annot, data, filename, line_num)
if ret_val != "continue": # pragma: no mutate
return ret_val
except IndexError as exc:
LOG.info("error while processing file_line: %s", filename + "_" + line_num) # pragma: no mutate
LOG.info(exc) # pragma: no mutate
def process_annotation(annot):
""" This function process the annotation to extract methods having given annotation
@parameters
annot: Annotation condition (Ex: @Test)
@return
This function returns starting and ending character of the annotation"""
annot_start = annot[0]
annot_end = annot[len(annot) - 1]
if annot_end.isalpha():
annot_end = None # pragma: no mutate
return annot_start, annot_end
def process_annot_method_body(annot, data, filename, line_num):
""" This function process the function/method to check if it has the given annotation
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
data: Content of the given file
@return
This function returns function/method definitions that has the given annotation"""
ret_val = "continue" # pragma: no mutate
annot_start, annot_end = process_annotation(annot)
if annot.strip(annot_start).strip(annot_end).upper() in data.strip(annot_start) \
.strip(annot_end).upper().split(",") and data.strip().startswith(annot_start):
body = get_func_body(filename, line_num)
if body is None:
body = "" # pragma: no mutate
ret_val = data + os.linesep + str(body)
elif data[:1] != "@" and str(data).strip() == "}" or str(data).strip() == "{": # pragma: no mutate
ret_val = None # pragma: no mutate
return ret_val
def check_py_annot(file_name, annot):
""" Function checks for the annotation condition in python files
@parameters
filename: Path to the file
annot: Annotation condition (Ex: @Test)
@return
This function returns function/method names that has the given annotation"""
line_data = list(
[line.rstrip() for line in open(file_name, encoding='utf-8', errors='ignore')]) # pragma: no mutate
val = 1 # pragma: no mutate
if annot[0] != "@": # Making use of annotation search function for function start with feature too
annot = "def " + annot.lower() # pragma: no mutate
val = -1 # pragma: no mutate
return get_py_annot_method_names(line_data, annot, val)
def get_py_annot_method_names(line_data, annot, val):
""" Function checks for the annotation condition in python files
@parameters
line_data: File content in list format
annot: Annotation condition (Ex: @Test)
val: index pointer that helps in getting method name
@return
This function returns function/method names that has the given annotation"""
data = []
for i, _ in enumerate(line_data):
if annot in line_data[i]:
if str(line_data[i]).strip().split(" ")[0] == "def": # pragma: no mutate
func_name = line_data[i + 1 + val].strip().split(" ")[1].split("(")[0] # pragma: no mutate
data.append(func_name)
else:
for j in range(i, len(line_data)):
if str(line_data[j]).strip().split(" ")[0] == "def": # pragma: no mutate
func_name = line_data[j].strip().split(" ")[1].split("(")[0] # pragma: no mutate
data.append(func_name)
break
return data
def get_func_body(filename, line_num):
""" Function to get method/function body from files
@parameters
filename, line_num: Path to the file, function/method line number
@return
This function returns function/method definitions of all the given files"""
line_num = int(line_num)
code = "" # pragma: no mutate
cnt_braket = 0
found_start = False
return_val = None
with open(filename, "r", encoding='utf-8', errors='ignore') as files: # pragma: no mutate
for i, line in enumerate(files):
if i >= (line_num - 1):
code += line
if line.count("{") > 0:
found_start = True
cnt_braket += line.count("{")
if line.count("}") > 0:
cnt_braket -= line.count("}")
if cnt_braket == 0 and found_start is True:
return_val = code
break
return return_val
def get_py_func_body(line_numbers, file_name, annot):
""" Function to get method/function body from files
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
@return
This function returns python function/method definitions in the given files"""
func_name = []
func_body = []
line_data = list([line.rstrip() for line
in open(file_name, encoding='utf-8', errors='ignore')]) # pragma: no mutate
data, data_func_name = process_py_methods(file_name, line_numbers, line_data)
if annot is not None:
data_func_name, data = get_py_annot_methods(file_name, data_func_name, data, annot)
if len(data_func_name).__trunc__() != 0:
func_name, func_body = process_py_func_body(data, data_func_name)
return func_name, func_body
def process_py_methods(file_name, line_numbers, line_data):
""" This Function refines the python function names to remove any class or lamida functions
@parameters
filename: Path to the file
line_num: function/method line number
line_data: File content in list format
@return
This function returns processed python function/method names and definitions in the given files"""
data = []
data_func_name = []
for i, _ in enumerate(line_numbers):
start = line_numbers[i]
stop = len(line_data) if i == len(line_numbers) - 1 else line_numbers[i + 1] - 1
data.append(os.linesep.join(line_data[start - 1:stop])) # pragma: no mutate
data_func_name.append(str(file_name) + "_" + str(line_data[start - 1].strip().split(" ")[1].split("(")[0]))
if data[len(data) - 1].startswith("class") or "lambda" in data[len(data) - 1]:
data.remove(data[len(data) - 1])
data_func_name.pop(len(data_func_name) - 1)
return data, data_func_name
def get_py_annot_methods(file_name, data_func_name, data, annot):
""" This function filters the python functions to get methods that have given annotation
@parameters
filename: Path to the file
data_func_name: list of all function names in the file
data: File content in list format
annot: Annotation condition (Ex: @staticmethod)
@return
This function returns python function/method names and definitions that have the given annotation"""
annot_meth_line_num = check_py_annot(file_name, annot)
annot_meth_name = []
annot_meth_body = []
for k, _ in enumerate(annot_meth_line_num):
for j, _ in enumerate(data_func_name):
if str(annot_meth_line_num[k]) in str(data_func_name[j]):
annot_meth_body.append(data[j])
annot_meth_name.append(data_func_name[j])
break
return annot_meth_name, annot_meth_body
def process_py_func_body(data_body, data_name):
""" This function processes the collected python function definitions to put then in a organized way
@parameters
data_body: list of all function definitions in the file
data_name: list of all function names in the file
@return
This function returns python function/method definitions in a organized format"""
for i, _ in enumerate(data_body):
data_body[i] = os.linesep.join([s for s in str(data_body[i]).splitlines() if s]) # pragma: no mutate
data_body = clean_py_methods(data_body)
return data_name, data_body
def clean_py_methods(data_body):
""" This function cleans the collected python function definitions to remove any junk content entered into method
while collecting
@parameters
data_body: list of all function definitions in the file
data_name: list of all function names in the file
@return
This function returns python function/method definitions in a organized format"""
for j, _ in enumerate(data_body):
data_list = list(str(data_body[j]).split(os.linesep))
count = len(data_list[0]) - len(data_list[0].lstrip())
i = 0 # pragma: no mutate
for i, _ in enumerate(data_list):
if i == len(data_list) - 1 or len(data_list[i + 1]) \
- len(data_list[i + 1].lstrip()) <= count: # pragma: no mutate
break
del data_list[i + 1:]
data_body[j] = str(os.linesep.join(data_list))
return data_body
def filter_files(list_files):
""" Function to filter required files from list of all files
@parameters
list_files: List of all files that the given repository contains
@return
This function returns the list of required file(.java, .cpp, .c, .cs, .py) paths """
local_files = []
for files in list_files:
extension = files.split('.')[-1].upper() # pragma: no mutate
if len(extension).__trunc__() > 0:
if extension in FILE_TYPE:
local_files.append(files)
return local_files
def get_delta_lines(file_name, annot, delta):
""" Function to get + and - delta number of lines from the annoted method/function
@parameters
filename, annot, delta: Path to the file, required annotation, required lines from method """
try:
line_data = list(filter(None, [line.rstrip() for
line in
open(file_name, encoding='utf-8', errors='ignore')])) # pragma: no mutate
data = []
for num, line in enumerate(line_data, 1):
process_delta_lines_body(annot, line, delta, num, line_data, data, file_name)
data = []
except IOError as exc:
LOG.info("Cannot read file: %s", file_name) # pragma: no mutate
LOG.info(exc) # pragma: no mutate
def process_delta_lines_body(annot, line, delta, num, line_data, data, file_name):
""" Function to get + and - delta number of lines from the annoted method/function
@parameters
file_name: Path to the file
annot: Required annotation
delta: Required lines from method
line_data: File content in list format
data: variable that holds delta lines data"""
if annot.upper() in line.strip().upper():
for i in range(0, (int(delta) * 2) + 1):
if num - (int(delta) + 1) + i >= len(line_data): # pragma: no mutate
break
data.append(line_data[num - (int(delta) + 1) + i])
DELTA_BODY.append("\n".join(data))
UID_LIST.append(os.path.basename(file_name) + "_")
def get_flat_list(data_list):
""" Function that generates a list by merging a list of sub lists
@parameters
data_list: list of sub lists
@return
This function returns a flattened list"""
flattened_list = []
for val in data_list:
if str(type(val)) != "<class 'list'>":
flattened_list.append(val)
if str(type(val)) == "<class 'list'>":
for sub_val in val:
flattened_list.append(sub_val)
return flattened_list
def process_delta_lines_data():
""" This function processes delta lines data to generate a dataframe
@return
This function returns a dataframe of delta lines data"""
data = {'Uniq ID': UID_LIST, 'Code': DELTA_BODY}
data_frame = pd.DataFrame(data)
UID_LIST.clear()
mask = data_frame['Uniq ID'].duplicated(keep=False)
data_frame.loc[mask, 'Uniq ID'] += data_frame.groupby('Uniq ID').cumcount().add(1).astype(str)
return data_frame.sort_values('Uniq ID')
def process_final_data(code_list):
""" This function processes function/method data to generate a dataframe
@return
This function returns a dataframe of function/method data"""
flat_uid_list = get_flat_list(UID_LIST)
flat_code_list = get_flat_list(code_list)
data = {'Uniq ID': flat_uid_list, 'Code': flat_code_list}
data_frame = | pd.DataFrame.from_dict(data, orient='index') | pandas.DataFrame.from_dict |
# import word_emo.framework.reference_methods.embedding_transformer
import itertools
import tensorflow as tf
import numpy as np
import pandas as pd
import scipy.stats as st
from numpy.linalg import svd
import naacl.framework.util as util
from sklearn.model_selection import KFold
class Densifier():
def __init__(self, embeddings):
self.embeddings=embeddings
self.d=self.embeddings.m.shape[1]
self.P=np.zeros(shape=[self.d,1])
self.P[0,0]=1.
self.Qs={} #mapping from emotional variable to matrix
self.induced_lexicon=None #pandas data frame matching seed lexicon with
# all words from embeddings
self.seed_lexicon=None
def fit(self, seed_lexicon, binarization_threshold=.5, alpha=.7):
tf.reset_default_graph()
# print(seed_lexicon)
self.seed_lexicon=seed_lexicon
self.induced_lexicon=pd.DataFrame( columns=self.seed_lexicon.columns,
index=self.embeddings.iw)
binarized_lexicon=self.binarize(sd_threshold=binarization_threshold)
for var in list(self.induced_lexicon):
self.Qs[var]=self.train_Q( pos=binarized_lexicon[var]['pos'],
neg=binarized_lexicon[var]['neg'],
batch_size=100,
optimizer='sgd',
orthogonalize=False,
alpha=alpha,
training_steps=3000)
self.induced_lexicon[var]=self.embeddings.m.dot(self.Qs[var]).dot(self.P)
def predict(self, words):
preds=self.induced_lexicon.loc[words]
### If word is not in the original embeddings space, give mean of induced values instead
means=self.induced_lexicon.mean(axis=0)
for word in words:
if not word in self.induced_lexicon.index:
preds.loc[word]=means
### drop duplicated entries. Migrate to embedding module!
preds=preds[~preds.index.duplicated(keep='first')]
###rescaling data to fit the range of the seed lexicon
preds=util.scale_predictions_to_seeds(preds=preds,
seed_lexicon=self.seed_lexicon)
##########
return preds
def eval(self, gold_lex):
if self.induced_lexicon is None:
raise ValueError('Embeddings need to be transformed first! Run "fit"!')
else:
return(util.eval(gold_lex, self.predict(gold_lex.index)))
def crossvalidate(self, labels, k_folds=10):
'''
lexicon Pandas data frame.
'''
results_df=pd.DataFrame(columns=labels.columns)
k=0
kf=KFold(n_splits=k_folds, shuffle=True).split(labels)
for __, split in enumerate(kf):
train=labels.iloc[split[0]]
test=labels.iloc[split[1]]
k+=1
print(k)
self.fit(train)
results_df.loc[k]=self.eval(test)
print(results_df)
results_df=util.average_results_df(results_df)
return results_df
def vec(self, word):
return self.embeddings.represent(word)
def train_Q(self, pos, neg, alpha, batch_size=100, optimizer='sgd', orthogonalize=True, training_steps=4000):
'''
Takes positive and negatives seeds to learn orthogonal transformation.
'''
#building cartesian product of positive and negative seeds
with tf.Graph().as_default():
alpha=tf.constant(alpha, dtype=tf.float32)
pairs_separate=[i for i in itertools.product(pos, neg)]
print('len data separate: ', len(pairs_separate))
data_separate=pd.DataFrame(pairs_separate)
del pairs_separate
#same classes
print('beginning to work on aligned pairs...')
pairs_align=combinations(pos)+combinations(neg)
print('Lenght of pairs_align: ', len(pairs_align))
data_align=pd.DataFrame(pairs_align)
del pairs_align
# setting up tensorflow graph
Q=tf.Variable(tf.random_normal(shape=[self.d, self.d], stddev=1), name='Q')
P=tf.constant(self.P, dtype=tf.float32) #must be column vecotr now that e_w/v are row vectors
e_diff=tf.placeholder(tf.float32, shape=[None, self.d], name='e_diff') #e_w - e_v for w,v are from different class
e_same=tf.placeholder(tf.float32, shape=[None, self.d], name='e_same') # e_w - e_v for w,v are from same class
# loss function
QxP=tf.matmul(Q,P)
loss_separate = -tf.reduce_sum(
tf.matmul(e_diff,QxP)
)
loss_align = tf.reduce_sum(
tf.matmul(e_same, QxP)
)
loss=(alpha*loss_separate)+((1-alpha)*loss_align)
### Define optimization
if optimizer=='sgd':
## CLASSICAL SGD (according to paper)
global_step=tf.Variable(0, trainable=False)
starter_learning_rate=5.
learning_rate=tf.train.exponential_decay(
learning_rate=starter_learning_rate,
global_step=global_step,
decay_steps=1,
decay_rate=.99,
staircase=True)
##Passing global_step to minimize() will increment it at each step.
learning_step=(
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(loss,global_step=global_step)
)
### same with ADAM
elif optimizer=='adam':
learning_rate=tf.constant(1e-3)
learning_step=(
tf.train.AdamOptimizer(learning_rate)
.minimize(loss))
else:
raise NotImplementedError
with tf.Session() as sess:
init=tf.global_variables_initializer()
sess.run(init)
gen_separate=Batch_Gen(data=data_separate, random=True, caller=self)
gen_align=Batch_Gen(data=data_align, random=True, caller=self)
last_Q=Q.eval()
for i_step in range(training_steps):
if orthogonalize:
# re-orthogonalize matrix
u,s,v_T=svd(Q.eval())
new_q = u.dot(v_T.T)
Q.assign(new_q).eval()
curr_separate=gen_separate.next(n=batch_size)
curr_align=gen_align.next(n=batch_size)
curr_loss, __=sess.run( [loss, learning_step],
feed_dict={ 'e_diff:0':curr_separate,
'e_same:0':curr_align})
if i_step%100==0:
curr_Q=Q.eval(session=sess)
Q_diff=np.sum(abs(last_Q-curr_Q))
print(i_step, curr_loss, learning_rate.eval(), Q_diff)
last_Q=curr_Q
print('Success')
return Q.eval()
def binarize(self, sd_threshold):
'''
ARGS:
lexicon Pandas Data Frame.
sd_threshold The fraction of the standard deviation above and below
the mean which gives the binarization threshold.
RETURNS
Dictionary of dictionary containing the indices (referring to self.
seed_lexicon)
'''
lexicon=self.seed_lexicon
means=lexicon.mean(axis=0)
sds=lexicon.std(axis=0)
# print(means, sds)
binarized={var:{'pos':[], 'neg':[]} for var in list(lexicon)}
# print(binarized)
for i_word in range(len(lexicon)):
#word=lexicon.index[i]
for i_var in range(len(list(lexicon))):
var=list(lexicon)[i_var]
mean=means.iloc[i_var]
sd=sds.iloc[i_var]
# print(var,word)
# print(word, var, mean, sd_threshold, sd)
if lexicon.iloc[i_word,i_var]> (mean + sd_threshold*sd):
binarized[var]['pos']+=[i_word]
elif lexicon.iloc[i_word,i_var]< (mean - sd_threshold*sd):
binarized[var]['neg']+=[i_word]
return binarized
def combinations(it):
out=[]
for j in range(len(it)):
for i in range(len(it)):
if j>i:
out+=[(it[i],it[j])]
return out
class Batch_Gen():
def __init__(self, data, caller, random=False):
self.data= | pd.DataFrame(data) | pandas.DataFrame |
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
edgar_files = ['CH4', 'CO2_excl_short-cycle_org_C', 'CO2_org_short-cycle_C', 'N2O']
ef = edgar_files[0]
data = []
for ef in edgar_files:
logger(ef)
ey = 2018 if ef == 'CO2_excl_short-cycle_org_C' else 2015
frame = pd.read_excel(f'{root}\\edgar_v5.0\\v50_{ef}_1970_{ey}.xls', sheet_name='TOTALS BY COUNTRY',
header=9)
frame = frame[['ISO_A3'] + list(range(1970, ey + 1))].rename(columns={'ISO_A3': 'code'}).set_index('code')
data.append(frame)
# data.append(np.sum(frame.T, axis=1).rename(ef))
df = data[1]
# so here I have edgar CO2 up to 2018
# what do I want to do with it?
countries = pd.read_csv('D:\\projects\\fakta-o-klimatu\\work\\emission-intensity\\countries.csv')
countries.show()
countries = countries.rename(columns={'country_name': 'country', 'final_region': 'cont', 'final_region_en': 'cont_en',
'second_chart_region': 'region'}).drop(columns=['world_bank_region', 'wiki_region', 'final_region_full'])
regions = countries[['code', 'final_region']].rename(columns={'final_region': 'region'})
selected = ['ฤรญna', 'Evropskรก unie', 'Indie', 'Rusko', 'Spojenรฉ stรกty americkรฉ']
regions = regions[regions.region.isin(selected)].copy()
# what about Great Britain?
regions = regions.query('code != "GBR"').reset_index(drop=True).copy()
regions.shape
regions.show()
df = pd.merge(regions, df.reset_index())
df.show()
cze = df.iloc[[0]].copy()
cze.loc[0, 'region'] = 'ฤeskรก republika'
co2 = | pd.concat([df, cze]) | pandas.concat |
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import psycopg2.extensions
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool, Value
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Persistant():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, worker_type, data_tables=[],operations_tables=[]):
self.db_schema = None
self.helper_schema = None
self.worker_type = worker_type
#For database functionality
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Persistant.ROOT_AUGUR_DIR
# count of tuples inserted in the database ( to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host')
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
#add credentials to db config. Goes to databaseable
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
#Return string representation of an object with all information needed to recreate the object (Think of it like a pickle made out of text)
#Called using repr(*object*). eval(repr(*object*)) == *object*
def __repr__(self):
return f"{self.config['id']}"
def initialize_logging(self):
#Get the log level in upper case from the augur config's logging section.
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
#Use stock python formatter for stdout
formatter = Formatter(fmt=format_string)
#User custom for stderr, Gives more info than verbose_format_string
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
#Create more complex sublogs in the logfile directory determined by the AugurLogging class
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
#database interface, the git interfaceable adds additional function to the super method.
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
self.db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.db_schema)})
# , 'client_encoding': 'utf8'
self.helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.logger.info("Trying to find max id of table...")
try:
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
except Exception as e:
self.logger.info(f"Could not find max id. ERROR: {e}")
#25151
#self.logger.info(f"Good, passed the max id getter. Max id: {self.history_id}")
#Make sure the type used to store date is synced with the worker?
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
## Getting rid of nan's and NoneTypes across the dataframe to start:
subject = subject.fillna(value=numpy.nan)
source = source.fillna(value=numpy.nan)
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
## Dealing with an error coming from paginate endpoint and the GitHub issue worker
### For a release in mid september, 2021. #SPG This did not work on Ints or Floats
# if type(source[source_columns[index]].values[0]).isnull():
# subject[subject_columns[index]] = pd.fillna(value=np.nan)
# source[source_columns[index]] = pd.fillna(value=np.nan)
# continue
source_index = source_columns[index]
try:
source_index = source_columns[index]
type_dict[subject_columns[index]] = type(source[source_index].values[0])
#self.logger.info(f"Source data column is {source[source_index].values[0]}")
#self.logger.info(f"Type dict at {subject_columns[index]} is : {type(source[source_index].values[0])}")
except Exception as e:
self.logger.info(f"Source data registered exception: {source[source_index]}")
self.print_traceback("", e, True)
subject = subject.astype(type_dict)
return subject, source
#Convert safely from sql type to python type?
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, action_map={}, in_memory=True
):
"""
This method determines which rows need to be inserted into the database (ensures data ins't inserted more than once)
and determines which rows have data that needs to be updated
:param new_data: list of dictionaries - needs to be compared with data in database to see if any updates are
needed or if the data needs to be inserted
:param table_values: list of SQLAlchemy tuples - data that is currently in the database
:param action_map: dict with two keys (insert and update) and each key's value contains a list of the fields
that are needed to determine if a row is unique or if a row needs to be updated
:param in_memory: boolean - determines whether the method is done is memory or database
(currently everything keeps the default of in_memory=True)
:return: list of dictionaries that contain data that needs to be inserted into the database
:return: list of dictionaries that contain data that needs to be updated in the database
"""
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from DataSubsetter import DataSubsetter
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
class SubsetModelTrainer(DataSubsetter):
def __init__(self, df, y, columns, model, comb_size = None,
data_col = None, combs=True, library='scikit',
kwargs={}, train_test_split = False, ttprop = 0.2, fit_type = 'fit',
stats_to_df = True, drop_subset_column = True, q=4):
DataSubsetter.__init__(self, df, columns, comb_size, q)
self.model = model
self.data_col = data_col
self.combs = combs
self.kwargs = kwargs
self.y = y
self.train_test_split = train_test_split
self.ttprop = ttprop
self.library = library
self.fit_type = fit_type
self.stats_to_df = stats_to_df
self.drop_subset_column = drop_subset_column
if data_col:
self.data_col = self.data_col
else:
self.data_col = list(df)
def fitStatsModel(self,x, y):
#print(y)#.astype(float).value_counts())
setup = self.model(endog=y.astype(float), exog=x.astype(float))
if self.fit_type == 'fit':
trained = setup.fit(**self.kwargs)
elif self.fit_type == 'fit_regularized':
trained = setup.fit_regularized(**self.kwargs)
return trained
def fitSciKit(self, x, y):
# This might not work
model = self.model(**self.kwargs)
model.fit(x, y)
return model
def modTest(self, x, y):
if self.train_test_split:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=self.ttprop)
else:
X_train = X_test = x
y_train = y_test = y
print(' Number of rows in training set:', len(X_train))
if self.library == 'statsmodels':
model = self.fitStatsModel(X_train, y_train)
elif self.library == 'scikit':
model = self.fitSciKit(X_train, y_train)
else:
print(self.library, " is not not implemented")
raise NotImplementedError
pred = pd.Series(model.predict(X_test.astype(float)))
pred = round(pred)
stats = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
return model, stats
def train(self):
# Make data subsets
subset_datum = self.makeTestDataSubset()
models = {}
statistics = {}
for self.key in subset_datum:
print("Training subset: ", self.key)
subset_x = subset_datum[self.key][self.data_col]
subset_y = self.y[self.y.index.isin(subset_x.index)]
if self.drop_subset_column:
# As everything is now a single value, we need
# to drop these columns to avoid singular matricies
drop_cols = []
for col in self.columns:
if self.typeCheck(self.df[col]) == 'int':
drop_cols.append(col)
print(' Removing filter column', col, "from model")
subset_x = subset_x.drop(drop_cols, axis = 1)
temp_model, stats = self.modTest(subset_x, subset_y)
models[self.key] = temp_model
statistics[self.key] = stats
# convert to easy to read DF
self.models = models
if self.stats_to_df:
statistics = pd.concat({k: | pd.DataFrame(v) | pandas.DataFrame |
"""
This module includes two types of discrete state-space formulations for biogas plants.
The anaerobic digestion model in FlexibleBiogasPlantModel is based on the work in
https://doi.org/10.1016/j.energy.2017.12.073 and ISBN: 978-3-319-16192-1
The module is designed to work with fledge: https://doi.org/10.5281/zenodo.3715873
The code is organized and implemented based on the flexible building model cobmo: https://zenodo.org/record/3523539
"""
import numpy as np
import pandas as pd
import scipy.linalg
import os
import inspect
import sys
import datetime as dt
import pyomo.environ as pyo
import bipmo.utils
class BiogasPlantModel(object):
"""
BiogasPlantModel represents all attributes and functions that all biogas plants have in common. It is the basis for
every model that inherits from it. Caution: It does not work as a standalone model!
"""
model_type: str = None
der_name: str = 'Biogas Plant'
plant_scenarios: pd.DataFrame
states: pd.Index
controls: pd.Index
outputs: pd.Index
switches: pd.Index
chp_schedule: pd.DataFrame
disturbances: pd.Index
state_vector_initial: pd.Series
state_matrix: pd.DataFrame
control_matrix: pd.DataFrame
disturbance_matrix: pd.DataFrame
state_output_matrix: pd.DataFrame
control_output_matrix: pd.DataFrame
disturbance_output_matrix: pd.DataFrame
timestep_start: pd.Timestamp
timestep_end: pd.Timestamp
timestep_interval: pd.Timedelta
timesteps: pd.Index
disturbance_timeseries: pd.DataFrame
output_maximum_timeseries: pd.DataFrame
output_minimum_timeseries: pd.DataFrame
marginal_cost: float
lhv_table: pd.DataFrame
temp_in: float
cp_water: float
feedstock_limit_type: str
available_feedstock: float
def __init__(
self,
scenario_name: str,
timestep_start=None,
timestep_end=None,
timestep_interval=None,
connect_electric_grid=True,
):
# Scenario name.
self.scenario_name = scenario_name
# Define the biogas plant model (change paths accordingly).
base_path = os.path.dirname(os.path.dirname(os.path.normpath(__file__)))
# Load the scenario.
self.plant_scenarios = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_scenario.csv')
)
self.plant_scenarios = self.plant_scenarios[
self.plant_scenarios['scenario_name'] == self.scenario_name]
self.plant_scenarios.index = pd.Index([self.scenario_name])
# Load marginal cost
self.marginal_cost = self.plant_scenarios.loc[
self.scenario_name, 'marginal_cost_EUR_Wh-1']
# Load feedstock data used in the scenario.
self.plant_feedstock = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_feedstock.csv')
)
self.plant_feedstock = self.plant_feedstock[
self.plant_feedstock['feedstock_type']
== self.plant_scenarios.loc[self.scenario_name, 'feedstock_type']
]
self.plant_feedstock.index = pd.Index([self.scenario_name])
self.feedstock_limit_type = self.plant_scenarios.loc[
self.scenario_name, 'availability_limit_type']
self.available_feedstock = self.plant_scenarios.loc[
self.scenario_name, 'availability_substrate_ton_per_year']
# Load CHP data used in the scenario.
self.CHP_list = self.plant_scenarios.CHP_name[self.scenario_name].split()
self.number_CHP = len(self.CHP_list)
self.plant_CHP_source = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_CHP.csv')
)
self.plant_CHP = pd.DataFrame(columns=self.plant_CHP_source.columns)
for i in self.CHP_list:
self.plant_CHP = pd.concat([
self.plant_CHP,
self.plant_CHP_source[self.plant_CHP_source['CHP_name'] == i]
])
self.plant_CHP.index = self.plant_CHP['CHP_name']
self.elec_cap_list = pd.DataFrame([cap for cap in self.plant_CHP.elec_cap_Wel],
index=self.CHP_list,
columns=['elec_cap_Wel'])
self.ramp_rate_list = pd.DataFrame([rate for rate in self.plant_CHP.ramp_capacity_W_min],
index=self.CHP_list,
columns=['ramp_rate_W_min'])
# Load storage data used in the scenario.
self.plant_storage = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_storage.csv')
)
self.plant_storage = self.plant_storage[
self.plant_storage['storage_name']
== self.plant_scenarios.loc[self.scenario_name, 'storage_name']
]
self.plant_storage.index = pd.Index([self.scenario_name])
# Define useful values.
self.lhv_table = pd.DataFrame(
# Lower heating value of methane in J/m3.
[35.8e6],
pd.Index(['LHV_methane']),
pd.Index(['LHV value (in J/m^3)'])
)
self.temp_in = self.plant_scenarios.loc[
# Temperature of the digestion process in ยฐC.
self.scenario_name, 'digester_temp']
self.cp_water = 4182 # Specific heat of water in J/(K*kg) at 20ยฐC.
# Define CHP coefficients
self.set_gains = pd.Index([])
# Define the heat and power CHP coefficients.
for i in range(len(self.CHP_list)):
self.set_gains = pd.Index([
self.plant_CHP['CHP_name'][i] + '_biogas_volume_inflow_m3_s-1'
]).union(self.set_gains)
self.gain_heat = pd.DataFrame(
0.0,
pd.Index([0]),
pd.Index(range(0, self.set_gains.size))
)
self.gain_power = pd.DataFrame(
0.0,
pd.Index([0]),
pd.Index(range(0, self.set_gains.size))
)
for i in range(0, self.number_CHP):
for j in range(0, self.lhv_table.size):
self.gain_heat[self.lhv_table.size * i + j] = self.plant_CHP['therm_eff'][i] * \
self.lhv_table['LHV value (in J/m^3)'][j] * \
self.plant_feedstock['methane_content'][
self.scenario_name]
self.gain_power[self.lhv_table.size * i + j] = self.plant_CHP['elec_eff'][i] * \
self.lhv_table['LHV value (in J/m^3)'][j] * \
self.plant_feedstock['methane_content'][
self.scenario_name]
self.gain_heat.columns = self.set_gains
self.gain_power.columns = self.set_gains
# Empty control variables (are added in the inherited classes)
self.controls = pd.Index(
[],
name='control_name'
)
# Add the chp controls (every biogas plant has at least one CHP)
for i in range(len(self.CHP_list)):
self.controls = pd.Index([
# CHPs Biogas inflows
self.plant_CHP['CHP_name'][i] + '_biogas_volume_inflow_m3_s-1'
]).union(self.controls)
# State variable for storage (every bg has a storage)
self.states = pd.Index(
# Storage biogas content.
self.plant_scenarios['scenario_name'] + '_storage_content_m3',
name='state_name'
)
# Output variables.
self.outputs = pd.Index(
# Storage biogas content.
self.plant_scenarios['scenario_name']
+ '_storage_content_m3',
name='output_name'
)
self.outputs = pd.Index([
# net active power output
'active_power',
# net reactive power output
'reactive_power',
# net thermal output (heat)
'thermal_power'
]).union(self.outputs)
self.switches = pd.Index([])
for i in range(len(self.CHP_list)):
self.outputs = pd.Index([
# CHPs active power production.
self.plant_CHP['CHP_name'][i] + '_active_power_Wel',
# CHPs reactive power production.
self.plant_CHP['CHP_name'][i] + '_react_power_Var',
# CHPs heat power production.
self.plant_CHP['CHP_name'][i] + '_heat_Wth'
]).union(self.outputs)
self.switches = pd.Index([
# CHP switch to turn on/off
self.plant_CHP['CHP_name'][i] + '_switch',
]).union(self.switches)
# Define timesteps.
if timestep_start is not None:
self.timestep_start = pd.Timestamp(timestep_start)
else:
self.timestep_start = pd.Timestamp(self.plant_scenarios.loc[self.scenario_name, 'time_start'])
if timestep_end is not None:
self.timestep_end = pd.Timestamp(timestep_end)
else:
self.timestep_end = pd.Timestamp(self.plant_scenarios.loc[self.scenario_name, 'time_end'])
if timestep_interval is not None:
self.timestep_interval = pd.Timedelta(timestep_interval)
else:
self.timestep_interval = | pd.Timedelta(self.plant_scenarios.loc[self.scenario_name, 'time_step']) | pandas.Timedelta |
import string
import warnings
import numpy as np
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
date_range,
isnull,
period_range,
timedelta_range,
)
from .pandas_vb_common import tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh"))
self.df["foo"] = "bar"
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype="object"
)
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype="object"
)
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.idx_cols = np.random.randint(0, N, N)
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx_cols)
def time_reindex_axis1_missing(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx_cols)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(
np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)]
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
for name, col in self.df.items():
pass
def time_items_cached(self):
for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range("2000", periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class ToDict:
params = [["dict", "list", "series", "split", "records", "index"]]
param_names = ["orient"]
def setup(self, orient):
data = np.random.randint(0, 1000, size=(10000, 4))
self.int_df = DataFrame(data)
self.datetimelike_df = self.int_df.astype("timedelta64[ns]")
def time_to_dict_ints(self, orient):
self.int_df.to_dict(orient=orient)
def time_to_dict_datetimelike(self, orient):
self.datetimelike_df.to_dict(orient=orient)
class ToNumpy:
def setup(self):
N = 10000
M = 10
self.df_tall = DataFrame(np.random.randn(N, M))
self.df_wide = DataFrame(np.random.randn(M, N))
self.df_mixed_tall = self.df_tall.copy()
self.df_mixed_tall["foo"] = "bar"
self.df_mixed_tall[0] = period_range("2000", periods=N)
self.df_mixed_tall[1] = range(N)
self.df_mixed_wide = self.df_wide.copy()
self.df_mixed_wide["foo"] = "bar"
self.df_mixed_wide[0] = period_range("2000", periods=M)
self.df_mixed_wide[1] = range(M)
def time_to_numpy_tall(self):
self.df_tall.to_numpy()
def time_to_numpy_wide(self):
self.df_wide.to_numpy()
def time_to_numpy_mixed_tall(self):
self.df_mixed_tall.to_numpy()
def time_to_numpy_mixed_wide(self):
self.df_mixed_wide.to_numpy()
def time_values_tall(self):
self.df_tall.values
def time_values_wide(self):
self.df_wide.values
def time_values_mixed_tall(self):
self.df_mixed_tall.values
def time_values_mixed_wide(self):
self.df_mixed_wide.values
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10 ** 3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array(
[
NaT,
np.nan,
None,
np.datetime64("NaT"),
np.timedelta64("NaT"),
0,
1,
2.0,
"",
"abcd",
]
)
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna:
params = (
[True, False],
["pad", "bfill"],
[
"float64",
"float32",
"object",
"Int64",
"Float64",
"datetime64[ns]",
"datetime64[ns, tz]",
"timedelta64[ns]",
],
)
param_names = ["inplace", "method", "dtype"]
def setup(self, inplace, method, dtype):
N, M = 10000, 100
if dtype in ("datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]"):
data = {
"datetime64[ns]": date_range("2011-01-01", freq="H", periods=N),
"datetime64[ns, tz]": date_range(
"2011-01-01", freq="H", periods=N, tz="Asia/Tokyo"
),
"timedelta64[ns]": | timedelta_range(start="1 day", periods=N, freq="1D") | pandas.timedelta_range |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from gensim.utils import tokenize
from gensim.parsing.preprocessing import remove_stopwords
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import pandas as pd
import numpy as np
from textblob import TextBlob
import spacy
from sklearn.feature_extraction.text import CountVectorizer
nlp = spacy.load('en_core_web_sm')
from sklearn.model_selection import TimeSeriesSplit
from src.data.cleaning import clean_news_headline
from src.utils import backend, home
def split_train_test(comb):
split = TimeSeriesSplit(n_splits=2)
for tr_idx, te_idx in split.split(comb):
if backend == 'pandas':
tr = comb.iloc[tr_idx, :]
te = comb.iloc[te_idx, :]
else:
tr = comb.loc[tr_idx.tolist()]
te = comb.iloc[te_idx, :]
assert tr.shape[1] == te.shape[1]
assert tr.shape[0] + te.shape[0] == comb.shape[0]
return tr, te
def split_features_target(combined, name, load=False):
print('splitting {} into x, y'.format(name))
if load:
print('loading from data/interim')
corpus = pd.read_csv(home / 'data' / 'interim' / '{}-features.csv'.format(name), index_col=0)
target = pd.read_csv(home / 'data' / 'interim' / '{}-target.csv'.format(name), index_col=0)
return corpus, target
target = combined.loc[:, 'Label'].to_frame()
target.columns = ['target']
corpus = combined.drop(['Label'], axis=1)
corpus = corpus.agg(' '.join, axis=1)
print('cleaning news headlines')
corpus = corpus.apply(clean_news_headline)
corpus = corpus.to_frame()
corpus.columns = ['news']
print('target shape {} distribution - {}'.format(target.shape, np.mean(target.values)))
print('saving to data/interim')
corpus.to_csv(home / 'data' / 'interim' / '{}-features.csv'.format(name))
target.to_csv(home / 'data' / 'interim' / '{}-target.csv'.format(name))
return corpus, target
def gensim_tokenize(docs):
tokens = []
for doc in docs:
doc = remove_stopwords(doc)
tokens.append(list(tokenize(doc, lower=True)))
return tokens
def get_doc_vecs(docs, model):
vecs = []
for sample in docs:
vecs.append(model.infer_vector(sample))
return np.array(vecs)
def make_document_vectors(x_tr, x_te):
tr_tokens = gensim_tokenize(x_tr.loc[:, 'news'].values)
te_tokens = gensim_tokenize(x_te.loc[:, 'news'].values)
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(tr_tokens)]
model = Doc2Vec(documents, vector_size=32, window=3, min_count=1, workers=4, verbose=1)
tr_vecs = get_doc_vecs(tr_tokens, model)
te_vecs = get_doc_vecs(te_tokens, model)
cols = ['doc2vec-{}'.format(i) for i in range(tr_vecs.shape[1])]
tr_vecs = | pd.DataFrame(tr_vecs, index=x_tr.index, columns=cols) | pandas.DataFrame |
import glob
import icecap as icp
import numpy as np
import os
import fnmatch
import subradar as sr
import rsr
import pandas as pd
def params():
"""get various parameters defining the season
"""
out = {'code_path':os.getcwd()}
out['season'] = out['code_path'].split('/')[-3]
out['process'] = out['code_path'].split('/')[-1]
out['root_path'] = '/'.join(out['code_path'].split('/')[0:-5])
out['norm_path'] = out['root_path'] + '/targ/norm'
out['rsr_path'] = out['code_path'].replace('code', 'targ')
out['cmp_path'] = out['rsr_path'].replace('RSR', 'CMP')
out['pik_path'] = out['root_path'] + '/orig/xtra/'+out['season']+'/PIK/' + out['process']
out['foc_path'] = out['root_path'] + '/targ/xtra/' + out['season']+ '/FOC/Best_Versions/S1_POS'
out['sweep_path'] = out['root_path'] + '/targ/xtra/' + out['season']+ '/FOC/Best_Versions/S5_VEW'
out['tpro_path'] = out['root_path'] + '/targ/tpro'
out['treg_path'] = out['root_path'] + '/targ/treg'
out['season_flight_pst'] = out['root_path'] + '/syst/linux/lib/dbase/season_flight_pst'
return out
def pik(pst, process=None, **kwargs):
"""Get available PIK files for a PST
"""
p = icp.get.params()
if process is None:
process = p['process']
folder = '/'.join([p['pik_path'].replace('/'+p['process'],''), process, pst])
files = glob.glob(folder + '/*.*')
names = [i.split('/')[-1] for i in files]
products = [i.split('.')[0] for i in names]
pik = [i.split('.')[1] for i in names]
return products, pik
def cmp(pst, process=None, **kwargs):
"""Get available radar data in CMP for a PST
"""
p = icp.get.params()
if process is None:
process = p['process']
folder = '/'.join([p['cmp_path'].replace('/'+p['process'],''), process, pst])
files = glob.glob(folder + '/*[!.meta]')
products = [i.split('/')[-1] for i in files]
return products
def pst(pattern, **kwargs):
"""Get PSTs for the current season that match a given pattern (regex)
"""
p = icp.get.params()
data = np.genfromtxt(p['season_flight_pst'], delimiter=' ', dtype=np.str)
i = np.where(data[:,2] == p['season'])
pst = data[i,0]
return fnmatch.filter(pst.flatten(), pattern)
def sweep(pst, **kwargs):
"""Get available sweeps files for a PST
"""
p = icp.get.params()
folder = '/'.join([p['sweep_path'], pst])
files = glob.glob(folder + '/*sweeps*')
products = [i.split('/')[-1] for i in files]
return products
def rsr(pst, process=None, **kwargs):
"""Get available rsr files
"""
p = icp.get.params()
if process is None:
process = p['process']
folder = '/'.join([p['rsr_path'].replace('/'+p['process'],''), process, pst])
files = glob.glob(folder + '/*.*')
products = [i.split('/')[-1] for i in files]
pik = [i.split('.')[1] for i in products if len(i.split('.')) == 2]
return pik
def rsr_data(pst, **kwargs):
"""Display data avaialble to launch RSR
"""
psts = icp.get.pst(pst)
cmps = [ icp.get.cmp(i, process='pik1') for i in psts ]
cmps_1m = [ icp.get.cmp(i, process='pik1.1m') for i in psts ]
piks = [ icp.get.pik(i, process='pik1')[1] for i in psts]
piks_1m = [ icp.get.pik(i, process='pik1.1m')[1] for i in psts]
sweeps = [ icp.get.sweep(i) for i in psts ]
rsr_1m = [ icp.get.rsr(i, process='pik1.1m') for i in psts ]
d = {'PST':psts}
df = pd.DataFrame(d)
#df['CMP_pik1'] = cmps
df['sweeps'] = sweeps
df['CMP_pik1.1m'] = cmps_1m
df['PIK_pik1'] = piks
df['PIK1_pik1.1m'] = piks_1m
df['RSR_1m'] = rsr_1m
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
import pandas as pd
import numpy as np
# TODO: fix 'skips', add remaining rows once scrape completes
df_list = []
# 87 turned out weird, figure out what happened here
skips = [87, 101, 144, 215, 347, 350, 360,374]
for i in range(600):
if i in skips:
print('skipping {}'.format(i))
pass
else:
df1 = pd.read_csv('coffee_{}_table_0.csv'.format(i))
df2 = pd.read_csv('coffee_{}_table_1.csv'.format(i))
df3 = pd.read_csv('coffee_{}_table_2.csv'.format(i))
df4 = pd.read_csv('coffee_{}_table_3.csv'.format(i))
df5 = pd.read_csv('coffee_{}_table_4.csv'.format(i))
# df1
"""
Unnamed: 0 0 1
0 0 90.58 NaN
1 1 View Q Arabica Certificate NaN
2 2 Print Q Arabica Certificate NaN
3 3 Cupping Protocol and Descriptors NaN
4 4 View Green Analysis Details NaN
5 5 Request a Sample NaN
6 6 Species Arabica
7 7 Owner metad plc
"""
df1.columns = ['one','two','three']
colnames1 = df1['two'].tolist()
# these names are inconistent, but the data doesn't look important
colnames1[1] = 'view_certificate_1'
colnames1[2] = 'view_certificate_2'
data1 = df1['three'].tolist()
data1[0] = colnames1[0]
colnames1[0] = 'quality_score'
df1_processed = pd.DataFrame([data1],columns=colnames1)
# df2
"""
Unnamed: 0 0 1 \
0 0 Country of Origin Ethiopia
1 1 Farm Name METAD PLC
2 2 Lot Number NaN
3 3 Mill METAD PLC
4 4 ICO Number 2014/2015
5 5 Company METAD Agricultural Developmet plc
6 6 Altitude 1950-2200
7 7 Region GUJI-HAMBELA/GOYO
8 8 Producer METAD PLC
2 3
0 Number of Bags 300
1 Bag Weight 60 kg
2 In-Country Partner METAD Agricultural Development plc
3 Harvest Year 2014
4 Grading Date April 4th, 2015
5 Owner metad plc
6 Variety NaN
7 Status Completed
8 Processing Method Washed / Wet
"""
df2.columns = ['one','two','three','four','five']
colnames1 = df2['two'].tolist()
colnames2 = df2['four'].tolist()
data1 = df2['three'].tolist()
data2 = df2['five'].tolist()
df2_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df3
"""
Unnamed: 0 0 1 2 3
0 0 NaN Sample NaN Sample
1 1 Aroma 8.67 Uniformity 10.00
2 2 Flavor 8.83 Clean Cup 10.00
3 3 Aftertaste 8.67 Sweetness 10.00
4 4 Acidity 8.75 Cupper Points 8.75
5 5 Body 8.50 Total Cup Points Sample 90.58
6 6 Balance 8.42 NaN NaN
"""
df3.columns = ['one','two','three','four','five']
colnames1 = df3['two'].tolist()
colnames2 = df3['four'].tolist()
data1 = df3['three'].tolist()
data2 = df3['five'].tolist()
df3_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df4
"""
Unnamed: 0 0 1 2 \
0 0 Moisture 12 % Color
1 1 Category One Defects 0 full defects Category Two Defects
2 2 Quakers 0 NaN
3
0 Green
1 0 full defects
2 NaN
"""
df4.columns = ['one','two','three','four','five']
colnames1 = df4['two'].tolist()
colnames2 = df4['four'].tolist()
data1 = df4['three'].tolist()
data2 = df4['five'].tolist()
df4_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df5
"""
Unnamed: 0 0 \
0 0 Expiration
1 1 Certification Body
2 2 Certification Address
3 3 Certification Contact
1
0 April 3rd, 2016
1 METAD Agricultural Development plc
2 BAWA Center, 3rd Floor (Gerji), Addis Ababa, E...
3 <NAME> (Emebet Dinku) - +251-116-292534, ...
"""
df5.columns = ['one','two','three']
colnames1 = df5['two'].tolist()
data1 = df5['three'].tolist()
if i > 1:
prev_cols = df.columns # cols before repalcing df with next coffee
df5_processed = pd.DataFrame([data1],columns=colnames1)
df = | pd.concat([df1_processed,df2_processed,df3_processed,df4_processed,df5_processed],1) | pandas.concat |
# -*- coding: utf-8 -*-
# import pytest
import pandas as pd
import pandas.testing as tm
import xnd
from pandas.core.internals import ExtensionBlock
import numpy as np
import xndframes as xf
TEST_ARRAY = ["Test", "string", None]
def test_constructors():
v1 = xf.XndframesArray(TEST_ARRAY)
assert isinstance(v1.dtype, xf.XndframesDtype)
v2 = xf.XndframesArray(np.array(TEST_ARRAY))
assert isinstance(v2.dtype, xf.XndframesDtype)
v3 = xf.XndframesArray(xnd.xnd(TEST_ARRAY))
assert isinstance(v3.dtype, xf.XndframesDtype)
def test_concatenate_blocks():
v1 = xf.XndframesArray(TEST_ARRAY)
sa = | pd.Series(v1) | pandas.Series |
#ๅข้/ๅ
จ้จ่ทๅ่กไปทๆฅ็บฟไปทๆ ผ
#selenium with headless browser phamtomJS daily
#from 0 to today
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import pandas as pd
import os
from datetime import timedelta
import datetime
from aushare.stock import cons as ct
DAILY_PRICE_DELTA = "https://au.finance.yahoo.com/quote/%s.AX/history?period1=%s&period2=%s&interval=1d&filter=history&frequency=1d"
DAILY_PRICE ='https://au.finance.yahoo.com/quote/%s.AX/history?period1=0&period2=%s&interval=1d&filter=history&frequency=1d'
ASXLIST_FILE_NAME = './data/ASXlistcodes.csv'
def _parser(date):
try:
return pd.datetime.strptime(date, '%d %b %Y')
except:
try:
return pd.datetime.strptime(date, '%d/%m/%Y')
except:
return pd.datetime.strptime(date, '%d %b. %Y')
#if __name__ == "__main__":
df =pd.read_csv(ASXLIST_FILE_NAME,header=1)
codelist = df['ASX code'].values
#codelist =['IRE','TTT','Z1P']
for symbol in codelist:
file_name = ct.DAILY_PRICE_FILE%symbol
s2 = datetime.datetime.now()
print(s2)
period2= int(time.mktime(s2.timetuple()))
if os.path.isfile(file_name):
df = pd.read_csv(file_name,header=0, index_col =0,date_parser=_parser,skipfooter =1,engine='python')
if (df.empty):
continue
df.reset_index(inplace =True)
recent_date = df['Date'].max()
print(recent_date)
s1 = recent_date +timedelta(days=1)
print(s1)
period1= int(time.mktime(s1.timetuple()))
url = DAILY_PRICE_DELTA%(symbol,period1,period2)
no_of_pagedowns = 2
else:
period2= int(time.mktime(s2.timetuple()))
url = DAILY_PRICE%(symbol,period2)
no_of_pagedowns = 50
browser = webdriver.PhantomJS(executable_path='/usr/local/bin/phantomjs')
print(url)
browser.get(url)
time.sleep(1)
elem = browser.find_element_by_tag_name("body")
while no_of_pagedowns:
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(0.5)
no_of_pagedowns-=1
source_data = browser.page_source
try:
soup = BeautifulSoup(source_data, "lxml")
tb = soup.find("table",attrs = {"data-test":"historical-prices"})
na_values = ['NaN', 'N/A', '-']
df1 = pd.read_html(str(tb),header=0,index_col=0,na_values=na_values)
if df1[0] is not None:
if os.path.isfile(file_name):
df2 = df1[0][:-2]
print(df2)
df = | pd.read_csv(file_name,header=0, index_col =0) | pandas.read_csv |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: ไธๆน่ดขๅฏ-ๆฒชๆทฑๆฟๅ-ๆฆๅฟตๆฟๅ
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import pandas as pd
def stock_board_concept_name_em() -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ-ๆฒชๆทฑๆฟๅ-ๆฆๅฟตๆฟๅ-ๅ็งฐ
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: ๆฆๅฟตๆฟๅ-ๅ็งฐ
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"ๆๅ",
"ๆๆฐไปท",
"ๆถจ่ทๅน
",
"ๆถจ่ท้ข",
"ๆขๆ็",
"_",
"ๆฟๅไปฃ็ ",
"ๆฟๅๅ็งฐ",
"_",
"_",
"_",
"_",
"ๆปๅธๅผ",
"_",
"_",
"_",
"_",
"_",
"_",
"ไธๆถจๅฎถๆฐ",
"ไธ่ทๅฎถๆฐ",
"_",
"_",
"้ขๆถจ่ก็ฅจ",
"_",
"_",
"้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
",
]
temp_df = temp_df[
[
"ๆๅ",
"ๆฟๅๅ็งฐ",
"ๆฟๅไปฃ็ ",
"ๆๆฐไปท",
"ๆถจ่ท้ข",
"ๆถจ่ทๅน
",
"ๆปๅธๅผ",
"ๆขๆ็",
"ไธๆถจๅฎถๆฐ",
"ไธ่ทๅฎถๆฐ",
"้ขๆถจ่ก็ฅจ",
"้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
",
]
]
temp_df["ๆๆฐไปท"] = pd.to_numeric(temp_df["ๆๆฐไปท"])
temp_df["ๆถจ่ท้ข"] = pd.to_numeric(temp_df["ๆถจ่ท้ข"])
temp_df["ๆถจ่ทๅน
"] = pd.to_numeric(temp_df["ๆถจ่ทๅน
"])
temp_df["ๆปๅธๅผ"] = pd.to_numeric(temp_df["ๆปๅธๅผ"])
temp_df["ๆขๆ็"] = pd.to_numeric(temp_df["ๆขๆ็"])
temp_df["ไธๆถจๅฎถๆฐ"] = pd.to_numeric(temp_df["ไธๆถจๅฎถๆฐ"])
temp_df["ไธ่ทๅฎถๆฐ"] = pd.to_nu | meric(temp_df["ไธ่ทๅฎถๆฐ"]) | pandas.to_numeric |
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
import theano.tensor as tt
def fit_spindle_density_prior():
#data from purcell
data = [[85, 177],
[89, 148],
[93, 115],
[98, 71],
[105, 42],
[117, 20],
[134, 17],
[148, 27],
[157, 39],
[165, 53],
[170, 68],
[174, 84],
[180, 102],
[184, 123],
[190, 143],
[196, 156],
[202, 165],
[210, 173],
[217, 176],
[222, 177]]
xscale = [0, 4]
yscale = [0, 800]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Beta('spindle_density', alpha=a, beta=b, observed=sample_data)
trace = pm.sample(2000)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Beta('spindle_density_mean_params', alpha=a_est, beta=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
# pm.traceplot(trace)
# plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True)
x = data_df['x']
y = data_df['y']*len(samples)*(x[1]-x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
sp_per_epoch = xscale[1]*outcome['spindle_density_mean_params']*25/60
counts, bins, patches = plt.hist(sp_per_epoch, np.arange(0, 8)-0.5, density=True)
sns.distplot(sp_per_epoch, kde=True, hist=False)
plt.show()
print(counts, bins)
def fit_spindle_duration():
data = [
[78, 163],
[80, 30],
[81, 15],
[83, 6],
[86, 8],
[91, 26],
[101, 51],
[114, 85],
[124, 105],
[137, 126],
[150, 139],
[164, 150],
[177, 156],
[194, 160],
[208, 163]
]
xscale = [0.4, 2]
yscale = [0, 4000]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Gamma('spindle_duration', alpha=a, beta=b, observed=sample_data)
trace = pm.sample(2000, njobs=1)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Gamma('spindle_density_mean_params', alpha=a_est, beta=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
pm.traceplot(trace)
plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True)
x = data_df['x']
y = data_df['y'] * len(samples) * (x[1] - x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
return samples*(2-0.4)+0.4
def fit_spindle_refractory():
data = [[88, 317],
[118, 99],
[125, 93],
[131, 97],
[137, 115],
[144, 143],
[151, 194],
[158, 223],
[175, 245],
[197, 265],
[239, 287],
[285, 297],
[355, 304],
[432, 307],
[454, 313]]
xscale = [0, 30]
yscale = [0, 0.08]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Wald('spindle_duration', mu=a, lam=b, observed=sample_data)
trace = pm.sample(2000, njobs=1)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Wald('spindle_density_mean_params', mu=a_est, lam=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
# pm.traceplot(trace)
# plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True, bins=100)
x = data_df['x']
y = data_df['y'] * len(samples) * (x[1] - x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
return samples*30+0.5
def get_samples_for_refractory():
samples = fit_spindle_refractory() + fit_spindle_duration()
pd.DataFrame({'samples': samples}).to_pickle('../data/raw/refractory_prior_samples.pkl')
def fit_refractory_minus_duration():
sample_data = pd.read_pickle('../data/raw/refractory_prior_samples.pkl')['samples'].values
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Wald('prior', mu=a, lam=b, observed=sample_data)
trace = pm.sample(2000, njobs=1)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Wald('prior_check', mu=a_est, lam=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
samples = outcome['prior_check']
sns.distplot(samples, kde=True)
sns.distplot(sample_data, kde=True)
plt.show()
print(summary_df)
def get_target_curve(data, xscale=None, yscale=None, scale=True):
df = pd.DataFrame(data, columns=['x','y'])
df['y'] = df['y'].max() - df['y']
ranges = df.agg(lambda x: x.max() - x.min())
df = df - df.min()
if scale:
real_range = pd.Series([xscale[1] - xscale[0], yscale[1] - yscale[0]], index=['x','y'])
real_offset = np.array([xscale[0],yscale[0]])
x = np.linspace(*xscale, 99)
else:
real_range = np.array([1,1])
real_offset = np.array([0,0])
x = np.linspace(0, 1, 99)
df = real_range*df/ranges + real_offset
y = np.interp(x, df['x'], df['y'])
df_interp = | pd.DataFrame({'x':x,'y':y}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# time
import time
import datetime
# system
import os
import sys
import re
from IPython.display import HTML
# databases
import MySQLdb as mdb, MySQLdb.cursors as mdb_cursors
import sqlite3
# files
import codecs
# requests and others
import requests
import urllib
# data
import pandas as pd
# pywikibot
import pywikibot
PYWIKIBOT2_DIR = '/srv/wcdo/src_viz/user-config.py'
# scripts
sys.path.insert(0, '/srv/wcdo/src_data')
import wikilanguages_utils
class Logger(object): # this prints both the output to a file and to the terminal screen.
def __init__(self):
self.terminal = sys.stdout
self.log = open("meta_update.out", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self): pass
# MAIN
######################## WCDO CREATION SCRIPT ########################
def main():
publish_missing_ccc_articles_lists()
return
while true:
time.sleep(84600)
print ("Good morning. It is: "+time.today()+". Let's see if today is the day to publish some stats in meta...")
# CHAINED TO CCC CREATION (ONCE A MONTH) AND TOP CCC
if verify_time_for_iteration():
publish_wcdo_update_meta_pages()
######################################################################
# In this function we create the table language_territories_mapping. # CEE Spring.
def make_table_links_CEE():
territories = wikilanguages_utils.load_languageterritories_mapping()
languages_df = wikilanguages_utils.load_wiki_projects_information(territories);
languages = ['en','az','ba','be','be-tarask','bs','bg','crh','de','el','eo','et','hr','hsb','hu','hy','ka','kk','lt','lv','mk','myv','pl','ro','ru','sh','sq','sr','tr','tt','uk']
langu = ['az','ba','be','be_x_old','bs','bg','crh','de','el','et','hr','hsb','hu','hy','ka','kk','lt','lv','mk','myv','pl','ro','ru','sh','sq','sr','tr','tt','uk']
rows_langs = {'az':'Azerbaijan','ba':'Bashkortostan','be':'Belarus','be_x_old':'Belarus','bs':'Bosnia and Herzegovina','bg':'Bulgaria','crh':'','de':'Austria','eo':'','el':'Greece','et':'Estonia','hr':'Croatia','hsb':'Germany','hu':'Hungary','hy':'Armenia','ka':'Georgia','kk':'Kazakhstan','lt':'Lithuania','lv':'Latvia','mk':'Macedonia','myv':'Russia','pl':'Poland','ro':'','ru':'Russia','sh':'','sq':'Albania','sr':'Serbia','tr':'Turkey','tt':'Tatarstan','uk':'Ukrania'}
country_iso = {'Azerbaijan':'AZ','Belarus':'BY','Bosnia and Herzegovina':'BA','Bulgaria':'BG','Austria':'AT','Greece':'GR','Estonia':'EE','Croatia':'HR','Germany':'DE','Hungary':'HU','Armernia':'AM','Georgia':'GE','Kazakhstan':'KZ','Lithuania':'LT','Latvia':'LV','Macedonia':'MK','Russia':'RU','Poland':'PL','Albania':'AL','Serbia':'SR','Turkey':'TR'}
lists = ['editors', 'featured', 'geolocated', 'keywords', 'women', 'men', 'created_first_three_years', 'created_last_year', 'pageviews', 'discussions']
lists_dict = {'editors':'Editors', 'featured':'Featured', 'geolocated':'Geolocated', 'keywords':'Keywords', 'women':'Women', 'men':'Men', 'created_first_three_years':'Created First Three Years', 'created_last_year':'Created Last Year', 'pageviews':'Pageviews', 'discussions':'Discussions'}
columns_final = ['List']+languages
df_columns_list = columns_final
wikitext = ''
for language in langu:
wikitext+= "==="+languages_df.loc[language]['languagename']+"===\n"
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for lista in lists:
midline = '|-\n'
row_string = '|'
row_string += lists_dict[lista]+'||'
for row in languages:
if row == 'uk': add = ''
else: add = '||'
# create the URL
string = "https://wcdo.wmflabs.org/top_ccc_articles/?list="+lista
string += "&target_lang="+row
string += "&source_lang="+language
if rows_langs[language] in country_iso:
string += "&source_country=" + country_iso[rows_langs[language]].lower()
URL = '['+string+' '+'โโ '+']'
row_string = row_string + str(URL) + add # here is the value
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
wikitext += wiki_table_string+'\n\n'
return wikitext
def publish_missing_ccc_articles_lists():
glow_langs = ['sd','id', 'jv', 'su', 'hi', 'ta', 'te', 'mr', 'kn', 'ml', 'or', 'pa', 'sa', 'gu', 'en', 'ar', 'es']
# glow_langs = ['sd']
# Bahsa Indonesia id, Bahsa Jawa jv, Bahsa Sunda su, Hindi hi, Tamil ta, Telugu te, Marathi mr, Kannada kn, Malyalam ml, Odia or, Punjabi pa, Sanskrit sa, Gujarati gu, English - Geolocated for Nigeria en, Arabic - Jordan, Egypt and Tunisia ar, Spanish - Geolocated for Argentina es, Sindhi sd.
for languagecode in glow_langs:
source_lang = 'None'
languagename = languages.loc[languagecode]['languagename']
try: qitems = territories.loc[languagecode]['QitemTerritory'].tolist()
except: qitems = [territories.loc[languagecode]['QitemTerritory']]
wikitext = ' = '+languagename+' Wikipedia Missing local articles =\n'
line = 'Language '+languagename+' is spoken in: '
i=0
for qitem in qitems:
i=i+1
regional = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['regional']
if regional == 'yes': regional = 'region'
else:
regional = 'country'
territoryname = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['territoryname']
ISO = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['ISO31662']
if ISO == '' or ISO == None:
ISO = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['ISO3166']
# if territoryname == None: territoryname = ''
if i==len(qitems)-1:
line = line + territoryname + ' ('+regional+' with ISO code '+ISO+') and '
else:
line = line + territoryname + ' ('+regional+' with ISO code '+ISO+'), '
line = line[:len(line)-2]+'.'
wikitext += 'This is the local content from '+languagename+' related territories that does not exist in '+languagename+' Wikipedia and yet it exists in other language editions, especially those of languages that are spoken also in these territories.\n'
wikitext += line+'\n\n'
# make_table_missing_ccc_articles(topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country):
wikitext += '== 500 Geolocated articles ==\n'
# 500 places
# GEOLOCATED
# 100 amb mรฉs interwiki
# 50 amb mรฉs inlinks from CCC
# 25 amb mรฉs bytes
# 25 amb mรฉs discussions
wikitext = wikitext + '=== 100 Geolocated articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_interwiki', 100, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 50 Geolocated articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_inlinks_from_original_CCC', 50, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Geolocated articles sorted by number of Bytes ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_bytes', 25, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Geolocated articles sorted by number of Edits in Talk Page ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_discussions', 25, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
# MONUMENTS AND BUILDINGS
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 Monuments and buildings articles == \n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# GLAM
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 GLAM articles ==\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# EARTH
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 Earth articles ==\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 200 people
# MEN
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 Men articles ==\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# WOMEN
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 Women articles ==\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 100 other topics
wikitext += '== 100 Food, music, paintings and sports articles ==\n'
# FOOD
# 25 amb mรฉs pageviews
wikitext = wikitext + '=== 25 Food articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('food', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# MUSIC
# 25 amb mรฉs pageviews
wikitext = wikitext + '=== 25 Music articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('music_creations_and_organizations', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# PAINTINGS
# 25 amb mรฉs pageviews
wikitext = wikitext + '=== 25 Paintings sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('paintings', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# SPORTS AND TEAMS
# 25 amb mรฉs pageviews
wikitext = wikitext + '=== 25 Sports sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('sport_and_teams', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 100 general
# KEYWORDS
# 25 amb mรฉs interwiki
# 25 amb mรฉs inlinks from CCC
# 25 amb mรฉs pageviews
# 25 amb mรฉs referรจncies
wikitext += '== 100 General language context-based articles ==\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_interwiki', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_pageviews', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_references', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
# new_path = languagecode+'.txt'
# new_days = open(new_path,'w')
# new_days.write(wikitext)
site = pywikibot.Site('meta','meta')
page = pywikibot.Page(site, 'User:Marcmiquel' + '/' + 'test'+'/'+languagecode)
page.save(summary="X", watch=None, minor=False,botflag=False, force=False, asynchronous=False, callback=None,apply_cosmetic_changes=None, text=wikitext)
def make_table_missing_ccc_articles(topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country):
print (topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country)
e = (topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country)
charac = '_'.join(map(str,e))
conn = sqlite3.connect(databases_path + 'missing_ccc.db'); cur = conn.cursor()
# TARGET LANGUAGE
target_language = languages.loc[target_lang]['languagename']
if 'target_country' != 'None':
target_country = target_country.upper()
if target_country == 'NONE' or target_country == 'ALL': target_country = 'all'
else:
target_country = 'all'
if 'target_region' != 'None':
target_region = target_region.upper()
if target_region == 'NONE' or target_region == 'ALL': target_region = 'all'
else:
target_region = 'all'
# TOPIC
type = "missing"
# SOURCE lANGUAGE
source_lang=source_lang.lower() #
# CREATING THE QUERY
query = 'SELECT '
columns = ['num','source_lang','page_title','num_interwiki','num_pageviews']
query += '"[[:" || languagecode || ":|" || languagecode || "]]" as source_lang, "[{{fullurl:" || languagecode || ":"|| page_title ||"}} " || REPLACE(page_title,"_"," ") || "]" as page_title, num_pageviews, num_interwiki, '
if order_by in ['num_outlinks','num_inlinks','num_wdproperty','num_discussions','num_inlinks_from_original_CCC','num_outlinks_to_original_CCC','num_bytes','num_references']:
query += order_by+', '
columns = columns + [order_by]
query += '("label" || " " || "(" || label_lang || ")" ) as label_lang, " [{{fullurl:" || "wikidata" || ":" || qitem || "}} " || REPLACE(qitem,"_"," ") || "]" as qitem '
columns = columns + ['label_lang','qitem']
query += 'FROM '+target_lang+'wiki '
query += 'WHERE (page_title_original_lang IS NULL or page_id_original_lang IS NULL) '
if ccc_segment == 'keywords':
query += 'AND keyword_title IS NOT NULL '
if ccc_segment == 'geolocated':
query += 'AND (geocoordinates IS NOT NULL OR location_wd IS NOT NULL) '
if target_country != "none" and target_country != "all":
query += 'AND iso3166 = "'+target_country+'" '
if target_region != "none" and target_region != "all":
query += 'AND iso31662 = "'+target_region+'" '
if topic != "none" and topic != "None" and topic != "all":
if topic == 'men': # male
query += 'AND gender = "Q6581097" '
elif topic == 'women': # female
query += 'AND gender = "Q6581072" '
elif topic == 'people':
query += 'AND gender IS NOT NULL '
else:
query += 'AND '+topic+' IS NOT NULL '
if source_lang == 'coexist':
query += 'AND non_language_pairs IS NULL '
elif source_lang == 'nocoexist':
query += 'AND non_language_pairs == 1 '
elif source_lang != "none":
query += 'AND languagecode = "'+source_lang+'" '
query += 'AND (num_inlinks_from_original_CCC!=0 OR num_outlinks_to_original_CCC!=0) '
if order_by == "none" or order_by == "None":
query += 'ORDER BY num_pageviews DESC '
else:
query += 'ORDER BY '+order_by+' DESC '
query += 'LIMIT 500;'
# if limit == "none":
# query += 'LIMIT 100;'
# else:
# query += 'LIMIT '+str(limit)+';'
print(query)
df = pd.read_sql_query(query, conn)#, parameters)
df = df.fillna(0)
if len(df)==0: return ''
page_titles = df.page_title.tolist()
for i in range(0,len(page_titles)-1):
page_title = page_titles[i].split('}}')[1].strip()
page_titles[i] = page_title[:len(page_title)-1]
# print (page_titles)
mysql_con_read = wikilanguages_utils.establish_mysql_connection_read(target_lang); mysql_cur_read = mysql_con_read.cursor()
page_titles_existing = []
page_asstring = ','.join( ['%s'] * len(page_titles) )
query = 'SELECT ll_title FROM langlinks WHERE ll_title IN (%s)' % page_asstring
mysql_cur_read.execute(query, page_titles) # Extreure
result = mysql_cur_read.fetchall()
for row in result:
page_titles_existing.append(row[0].decode('utf-8'))
df.num_pageviews = df.num_pageviews.astype('int64')
i = 0
target_langy = '('+target_lang +')'
qitems_list = []
for index, row in df.iterrows():
page_title = row['page_title'].split('}}')[1].strip()
page_title = page_title[:len(page_title)-1]
label_lang = row['label_lang']
if label_lang == 0 or target_langy not in label_lang:
df.loc[index, 'label_lang'] = ''
else:
label_lang = label_lang.split('(')[0].strip()
df.loc[index, 'label_lang'] = '[{{fullurl:'+target_lang+':'+label_lang.replace(' ','_')+'}} '+label_lang+']'
if row['qitem'] in qitems_list or i>=limit or page_title in page_titles_existing:
df.drop(index, inplace=True)
else:
# print ((row['page_title']))
qitems_list.append(row['qitem'])
i+=1
column_list_dict = {'source_lang':'Wiki','page_title':'Title','num_pageviews':'Pageviews','num_interwiki':'Interwiki', 'num_inlinks_from_original_CCC':'Inlinks CCC','num_references':'References','num_bytes':'Bytes','num_discussions':'Discussions','label_lang':target_language+' WD Label','qitem':'WD Qitem'}
df=df.rename(columns=column_list_dict)
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
path = '/srv/wcdo/src_viz/missing_ccc'
if not os.path.exists(path):
os.makedirs(path)
path2 = path+'/'+target_lang
if not os.path.exists(path2):
os.makedirs(path2)
file_name = path2+'/missing_ccc_'+target_lang+'_'+charac+'.txt'
df.to_csv(file_name, sep='\t', encoding='utf-8')
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
row_string = row_string + str(value) + add # here is the value
# here we might add colors. -> it would be nice to make a different colour for each language background, so it would be easy to see when one starts and another finishes.
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
if len(df_rows)==0:
wiki_table_string = ''
return wiki_table_string
### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### ---
# TABLES
# function name composition rule: x, y, (rows, columns)
# In this function we create the table language_territories_mapping.
def make_table_language_territories_mapping():
df = pd.read_csv(databases_path + 'language_territories_mapping.csv',sep='\t',na_filter = False)
df = df[['territoryname','territorynameNative','QitemTerritory','languagenameEnglishethnologue','WikimediaLanguagecode','demonym','demonymNative','ISO3166','ISO31662','regional','country','indigenous','languagestatuscountry','officialnationalorregional']]
territorylanguagecodes_original = list(df.WikimediaLanguagecode.values)
indexs = df.index.values.tolist()
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('-','_')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('be_tarask', 'be_x_old')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('nan', 'zh_min_nan')
languagenames={}
updated_langcodes = list(df.WikimediaLanguagecode.values)
for x in range(0,len(territorylanguagecodes_original)):
curindex = indexs[x]
languagenames[curindex]=languages.loc[updated_langcodes[x]]['languagename']
df['Language Name'] = pd.Series(languagenames)
languagecodes={}
for x in range(0,len(territorylanguagecodes_original)):
curindex = indexs[x]
curlanguagecode = territorylanguagecodes_original[x]
languagecodes[curindex]=curlanguagecode
df['WikimediaLanguagecode'] = pd.Series(languagecodes)
# languagenames_local={}
# for languagecode in territorylanguagecodes:
# languagenames_local[languagecode]=languages.loc[languagecode]['languagename']
# df['Language Local'] = pd.Series(languagenames_local)
df = df.reset_index()
df = df.fillna('')
qitems={}
indexs = df.index.tolist()
qitems_list = list(df.QitemTerritory.values)
for x in range(0,len(qitems_list)):
curqitem = qitems_list[x]
curindex = indexs[x]
if curqitem != None and curqitem!='': qitems[curindex]='[[wikidata:'+curqitem+'|'+curqitem+']]'
else: qitems[curindex]=''
df['Qitems'] = pd.Series(qitems)
columns = ['Language Name','WikimediaLanguagecode','Qitems','territorynameNative','demonymNative','ISO3166','ISO31662']
# columns = ['Language Name','WikimediaLanguagecode','Qitems','territoryname','territorynameNative','demonymNative','ISO3166','ISO31662','country']
df = df[columns] # selecting the parameters to export
# print (df.head())
columns_dict = {'Language Name':'Language','WikimediaLanguagecode':'Wiki','Qitems':'WD Qitem','territoryname':'Territory','territorynameNative':'Territory (Local)','demonymNative':'Demonyms (Local)','ISO3166':'ISO 3166', 'ISO3662':'ISO 3166-2','country':'Country'}
df=df.rename(columns=columns_dict)
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
row_string = row_string + str(value) + add # here is the value
# here we might add colors. -> it would be nice to make a different colour for each language background, so it would be easy to see when one starts and another finishes.
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
wikitext = '* Generated at '+datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S')+'\n'
wikitext += wiki_table_string
return wikitext
def make_table_ccc_extent_all_languages():
# QUESTION: What is the extent of cultural context content in each language edition?
# percentatge de contingut รบnic (sense cap ILL) -> pensar si posar-lo a la taula de extent. โจ
# OBTAIN AND FORMAT THE DATA.
conn = sqlite3.connect(databases_path + stats_db); cursor = conn.cursor()
df = pd.DataFrame(wikilanguagecodes)
df = df.set_index(0)
reformatted_wp_numberarticles = {}
for languagecode,value in wikipedialanguage_numberarticles.items():
reformatted_wp_numberarticles[languagecode]='{:,}'.format(int(value))
df['wp_number_articles']= pd.Series(reformatted_wp_numberarticles)
# CCC %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
rank_dict = {}; i=1
lang_dict = {}
abs_rel_value_dict = {}
for row in cursor.execute(query):
if row[0] in languageswithoutterritory: continue
lang_dict[row[0]]=languages.loc[row[0]]['languagename']
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
rank_dict[row[0]]=i
i=i+1
df['Language'] = pd.Series(lang_dict)
df['Nยบ'] = pd.Series(rank_dict)
df['ccc_number_articles'] = pd.Series(abs_rel_value_dict)
# CCC GL %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc_geolocated" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
abs_rel_value_dict = {}
for row in cursor.execute(query):
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
df['geolocated_number_articles'] = | pd.Series(abs_rel_value_dict) | pandas.Series |
"""
This file contains functions that allows running adaptive
selection in parallel.
@author: <NAME>
"""
from typing import List, Any, Optional
import pandas as pd
from sklearn.base import clone
# It can serialize class methods and lambda functions.
import pathos.multiprocessing as mp
def add_partition_key(
df: pd.DataFrame,
series_keys: List[str],
n_partitions: int
) -> pd.DataFrame:
"""
Add to `df` a new column that helps to balance load between
different processes uniformly.
:param df:
data to be transformed in long format
:param series_keys:
columns that are identifiers of unique time series
:param n_partitions:
number of processes that will be used for parallel
execution
:return:
DataFrame with a new column named 'partition_key'
"""
keys_df = df[series_keys].drop_duplicates()
keys_df = (
keys_df
.reset_index()
.rename(columns={'index': 'partition_key'})
)
keys_df['partition_key'] = keys_df['partition_key'].apply(
lambda x: x % n_partitions
)
df = df.merge(keys_df, on=series_keys)
return df
def fit_selector_in_parallel(
selector_instance: Any,
df: pd.DataFrame,
name_of_target: str,
series_keys: List[str],
scoring_keys: Optional[List[str]] = None,
n_processes: int = 1
) -> 'type(selector_instance)':
"""
Create a new selector of specified parameters and fit it with
paralleling based on enumeration of unique time series.
:param selector_instance:
instance that specifies class of resulting selector
and its initial parameters
:param df:
DataFrame in long format that contains time series
:param name_of_target:
name of target column
:param series_keys:
columns that are identifiers of unique time series
:param scoring_keys:
identifiers of groups such that best forecasters are
selected per a group, not per an individual time series,
see more in documentation on `fit` method of selector
:param n_processes:
number of parallel processes, default is 1
:return:
new fitted instance of selector
"""
fit_kwargs = {
'name_of_target': name_of_target,
'series_keys': series_keys,
'scoring_keys': scoring_keys or series_keys
}
try:
df = add_partition_key(df, series_keys, n_processes)
selectors = mp.Pool(n_processes).map(
lambda x: clone(selector_instance).fit(x, **fit_kwargs),
[group for _, group in df.groupby('partition_key', as_index=False)]
) # pragma: no cover (`coverage` has issues with multiprocessing)
results_tables = [
selector.best_scores_ for selector in selectors
]
best_scores = | pd.concat(results_tables) | pandas.concat |
from PyQt5 import QtWidgets, uic, QtCore, Qt
from PyQt5.QtWidgets import QAction, QMessageBox, QFileDialog, QDesktopWidget, QColorDialog, QFontDialog, QDialog, QTableWidgetItem, QVBoxLayout, QSplashScreen, QProgressBar
from PyQt5.QtGui import QIcon, QPixmap
import sys, os, time
from webbrowser import open_new_tab
import xlwt
import subprocess as sp
from plotting import *
from mode import *
from recorder import *
from read_outfiles import *
from utilities import *
import matplotlib as mpl
from RS import*
import numpy as np
import pandas as pd
from pyvistaqt import QtInteractor
main=None
class FeViewMain(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(FeViewMain, self).__init__(parent)
# load MainWindows.ui from Qt Designer
uic.loadUi('UI\MainWindows.ui', self)
# add the pyvista interactor object
vlayout = QVBoxLayout()
self.p=self.plot_widget = QtInteractor(self.frame)
self.p.show_axes()
vlayout.addWidget(self.plot_widget.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
vlayout.setContentsMargins(0, 0, 0, 0)
# add some tool bar
self.btn_tool_openTCL = QAction(QIcon('UI/icon/Open.png'),'Open TCL File', self)
self.btn_tool_editTCL = QAction(QIcon('UI/icon/edit.png'),'Edit TCL File with CypressEditor', self)
self.btn_tool_run_OS = QAction(QIcon('UI/icon/run.png'),'run TCL file with OpenSees', self)
self.btn_iso = QAction(QIcon('UI/icon/iso.png'),'View isometric', self)
self.btn_iso.setCheckable(True) # toolbar button checkable
self.btn_xy_zpluss = QAction(QIcon('UI/icon/xy_zpluss.png'), 'View xy_zpluss', self)
self.btn_xy_zpluss.setCheckable(True)
self.btn_xy_zminus = QAction(QIcon('UI/icon/xy_zminus.png'), 'View xy_zminus', self)
self.btn_xy_zminus.setCheckable(True)
self.btn_xz_ypluss = QAction(QIcon('UI/icon/xz_ypluss.png'), 'View xz_ypluss', self)
self.btn_xz_ypluss.setCheckable(True)
self.btn_xz_yminus = QAction(QIcon('UI/icon/xz_yminus.png'), 'View xz_yminus', self)
self.btn_xz_yminus.setCheckable(True)
self.btn_yz_xpluss = QAction(QIcon('UI/icon/yz_xpluss.png'), 'View yz_xpluss', self)
self.btn_yz_xpluss.setCheckable(True)
self.btn_yz_xminus = QAction(QIcon('UI/icon/yz_xminus.png'), 'View yz_xminus', self)
self.btn_yz_xminus.setCheckable(True)
self.btn_node_label = QAction(QIcon('UI/icon/nl.png'), 'View Node Label', self)
self.btn_node_label.setCheckable(True)
self.btn_node_cord = QAction(QIcon('UI/icon/nc.png'), 'View Node Co-ordinate', self)
self.btn_node_cord.setCheckable(True)
self.btn_load = QAction(QIcon('UI/icon/load.png'), 'View Point Load', self)
self.btn_load.setCheckable(True)
self.btn_color_plot_background= QAction(QIcon('UI/icon/color_plot_background.png'), 'Change Plot Background Color', self)
self.btn_color_gui = QAction(QIcon('UI/icon/color_gui.png'), 'Change Theme Color', self)
self.btn_font = QAction(QIcon('UI/icon/font.png'), 'Change Font Style', self)
self.btn_plot_image = QAction(QIcon('UI/icon/plot_image.png'), 'Save Plot as Image', self)
self.btn_plot_image_wb = QAction(QIcon('UI/icon/plot_image_wb.png'), 'Save Plot as Image with White Background', self)
self.btn_calc = QAction(QIcon('UI/icon/calculator.png'), 'Calculator', self)
self.btn_minumize = QAction(QIcon('UI/icon/minimize.png'), 'Mimimize the Window', self)
self.btn_maximize = QAction(QIcon('UI/icon/maximize.png'), 'Maximize the Window', self)
self.btn_full_s = QAction(QIcon('UI/icon/full_s.png'), 'Fullscreen', self)
self.btn_center = QAction(QIcon('UI/icon/center.png'), 'Center', self)
self.btn_min_s = QAction(QIcon('UI/icon/min.png'), 'Minimum Window Size', self)
self.btn_max_s = QAction(QIcon('UI/icon/max.png'), 'Maximum Window Size', self)
self.btn_restore = QAction(QIcon('UI/icon/rest_w.png'), 'Restore Window', self)
self.btn_help = QAction(QIcon('UI/icon/help.png'), 'Help', self)
self.btn_about = QAction(QIcon('UI/icon/info.png'), 'Info', self)
self.btn_close = QAction(QIcon('UI/icon/close.png'), 'Exir', self)
toolbar = self.addToolBar('Exit')
toolbar.addAction(self.btn_tool_openTCL)
toolbar.addAction(self.btn_tool_editTCL)
toolbar.addAction(self.btn_tool_run_OS)
toolbar.addSeparator()
toolbar.addAction(self.btn_iso)
toolbar.addAction(self.btn_xy_zpluss)
toolbar.addAction(self.btn_xy_zminus)
toolbar.addAction(self.btn_xz_ypluss)
toolbar.addAction(self.btn_xz_yminus)
toolbar.addAction(self.btn_yz_xpluss)
toolbar.addAction(self.btn_yz_xminus)
toolbar.addSeparator()# add separator
toolbar.addAction(self.btn_node_label)
toolbar.addAction(self.btn_node_cord)
toolbar.addAction(self.btn_load)
toolbar.addSeparator()
toolbar.addAction(self.btn_color_plot_background)
toolbar.addAction(self.btn_color_gui)
toolbar.addAction(self.btn_font)
toolbar.addSeparator()
toolbar.addAction(self.btn_plot_image)
toolbar.addAction(self.btn_plot_image_wb)
toolbar.addAction(self.btn_calc)
toolbar.addSeparator()
toolbar.addAction(self.btn_minumize)
toolbar.addAction(self.btn_maximize)
toolbar.addAction(self.btn_full_s)
toolbar.addAction(self.btn_center)
toolbar.addAction(self.btn_min_s)
toolbar.addAction(self.btn_max_s)
toolbar.addAction(self.btn_restore)
toolbar.addSeparator()
toolbar.addAction(self.btn_help)
toolbar.addAction(self.btn_about)
toolbar.addAction(self.btn_close)
toolbar.addSeparator()
# margin & layout setting for toolbar
toolbar.setContentsMargins(0, 0, 0, 0)
toolbar.layout().setSpacing(0)
toolbar.layout().setContentsMargins(0, 0, 0, 0)
self.btn_tool_openTCL.triggered.connect(self.openTCL) # call function for 'Open TCL file' toolbar button
self.actionOpen.triggered.connect(self.openTCL) # call function for 'Open TCL file' main manu button
self.btn_apply_static.clicked.connect(self.DispStatic)
self.actionApply_Static.triggered.connect(self.DispStatic)
self.btn_apply_modal.clicked.connect(self.DispModal)
self.actionApply_Modal.triggered.connect(self.DispModal)
self.btn_apply_dynamic.clicked.connect(self.DispDynamic)
self.Apply_Dyanamic.triggered.connect(self.DispDynamic)
self.btn_response_static.clicked.connect(self.res_static)
self.actionShow_Response.triggered.connect(self.res_static)
self.btn_response_dynamic.clicked.connect(self.res_dynamic)
self.actionShow_Response_dynamic.triggered.connect(self.res_dynamic)
self.btn_tool_editTCL.triggered.connect(self.edit_TCL)
self.actionEdit.triggered.connect(self.edit_TCL)
self.btn_tool_run_OS.triggered.connect(self.runOS)
self.actionRun_OpenSees.triggered.connect(self.runOS)
self.btn_iso.triggered.connect(self.iso)
self.btn_xy_zpluss.triggered.connect(self.xy_zpluss)
self.btn_xy_zminus.triggered.connect(self.xy_zminus)
self.btn_xz_ypluss.triggered.connect(self.xz_ypluss)
self.btn_xz_yminus.triggered.connect(self.xz_yminus)
self.btn_yz_xpluss.triggered.connect(self.yz_xpluss)
self.btn_yz_xminus.triggered.connect(self.yz_xminus)
self.actionFeView.triggered.connect(self.about_feview)
self.btn_about.triggered.connect(self.about_feview)
self.actionPlot_Background_Color.triggered.connect(self.Plot_Background_Color)
self.btn_color_plot_background.triggered.connect(self.Plot_Background_Color)
self.actionGUI_Font.triggered.connect(self.GUI_Font)
self.btn_font.triggered.connect(self.GUI_Font)
self.actionTheme_Color.triggered.connect(self.gui_color)
self.btn_color_gui.triggered.connect(self.gui_color)
self.btn_plot_image.triggered.connect(self.savePlot)
self.actionWith_background.triggered.connect(self.savePlot)
self.btn_plot_image_wb.triggered.connect(self.savePlot_wb)
self.actionWhite_Background.triggered.connect(self.savePlot_wb)
self.btn_calc.triggered.connect(self.calculator)
self.actionMinimize.triggered.connect(lambda: self.showMinimized())
self.btn_minumize.triggered.connect(lambda: self.showMinimized())
self.actionMaximize.triggered.connect(lambda: self.showMaximized())
self.btn_maximize.triggered.connect(lambda: self.showMaximized())
self.actionFull_Screen.triggered.connect(lambda: self.showFullScreen())
self.btn_full_s.triggered.connect(lambda: self.showFullScreen())
self.actionCenter.triggered.connect(lambda: self.center())
self.btn_center.triggered.connect(lambda: self.center())
self.actionMinimum_Size.triggered.connect(lambda: self.resize(self.minimumSize()))
self.btn_min_s.triggered.connect(lambda: self.resize(self.minimumSize()))
self.actionMaximum_Size.triggered.connect(lambda: self.resize(self.maximumSize()))
self.btn_max_s.triggered.connect(lambda: self.resize(self.maximumSize()))
self.actionRestore.triggered.connect(lambda: self.showNormal())
self.btn_restore.triggered.connect(lambda: self.showNormal())
self.actionSSL.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.btn_help.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.actionOpenSees.triggered.connect(lambda: open_new_tab('https://opensees.berkeley.edu'))
self.actionSSL_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/SSL_Software.php'))
self.actionFeView_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/FeView/FeView.php'))
self.btn_node_label.triggered.connect(self.nodelebels)
self.actionNode_Label.triggered.connect(self.nodelebels)
self.btn_node_cord.triggered.connect(self.nodecoordinates)
self.actionNode_Coordinate.triggered.connect(self.nodecoordinates)
self.btn_load.triggered.connect(self.pointload_show)
self.actionLoad.triggered.connect(self.pointload_show)
self.actionExit.triggered.connect(self.close)
self.btn_close.triggered.connect(self.close)
self.actionMesh_Fiew.triggered.connect(self.mesh_view_model)
self.actionSmooth_View.triggered.connect(self.smoth_view_model)
self.actionWireframe.triggered.connect(self.wiremesh_model)
self.actionMesh_View_2.triggered.connect(self.mesh_view_model_deform)
self.actionSmooth_View_2.triggered.connect(self.smoth_view_model_deform)
self.actionMesh_View_Wiremesh_undeform.triggered.connect(self.mesh_wiremesh_model_deform)
self.actionSmooth_View_Wiremesh_undeform.triggered.connect(self.smooth_wiremesh_model_deform)
self.btn_datatable_static.clicked.connect(self.data_table_static)
self.actionData_Table.triggered.connect(self.data_table_static)
self.btn_datatable_modal.clicked.connect(self.data_table_modal)
self.actionData_Table_modal.triggered.connect(self.data_table_modal)
self.btn_datatable_dynamic.clicked.connect(self.data_table_dynamic)
self.actionData_Table_dynamic.triggered.connect(self.data_table_dynamic)
self.actionView_load.triggered.connect(self.load_setting_arrow)
self.reportEdit.keyReleaseEvent = self.handleKeyRelease
self.addInfoText("Opend tcl file")
self.prgb = QProgressBar(self)
self.statusBar().addPermanentWidget(self.prgb)
self.dialogs = list()
def progress(self, value, newLines):
#self.te.append('\n'.join(newLines))
self.prgb.setValue(value)
def addInfoText(self, text):
"""Adds info text"""
return self.reportEdit.insertPlainText("\n >>"+str(text))
def handleKeyRelease(self, event):
"""Handles key inputs to report box"""
if(event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter):
self.interpretUserInput(self.reportEdit.toPlainText())
# function to unchecked another model display style setting except 'mesh view'
def mesh_view_model(self):
self.actionSmooth_View.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'smooth view'
def smoth_view_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'wiremesh view'
def wiremesh_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionSmooth_View.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view'
def mesh_view_model_deform(self):
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view'
def smoth_view_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view+wiremesh'
def mesh_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view+wiremesh'
def smooth_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
def openTCL(self):
try:
global numModes #set numModes as global variable
# create file dialog function to browse file path
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.fileName, _ = QFileDialog.getOpenFileName(self, "OpenSees File", "","OpenSees File (*.tcl)", options=options)
self.file_path, self.file_name = os.path.split(self.fileName)
[filename0, sep, ext] = self.file_name.partition('.')
# make path for output files
self.result_directory = os.path.join(self.file_path, r'out_files_%s' % filename0)
if not os.path.exists(self.result_directory):
# create directory for output files
os.mkdir(self.result_directory)
# clear all actors from plot interface
self.prgb.setMaximum(len(node(self.fileName)))
self.p.clear()
if self.actionSmooth_View.isChecked() == True:
# call plotter considering smooth view
plotter(self.p, self.fileName, 'smooth_view',NodeCoords(self.fileName), None, None)
elif self.actionWireframe.isChecked() == True:
# call plotter considering wiremesh view
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName),None, None)
elif self.actionMesh_Fiew.isChecked() == True:
# call plotter considering mesh view
plotter(self.p, self.fileName, 'mesh_view',NodeCoords(self.fileName), None, None)
#plotter_rigiddiaphram(self.p, self.fileName, NodeCoords(self.fileName))
if (ndm_v(self.fileName))==2:
self.p.view_xy() # initial setting for 2d interface considering x-y axis view
else:
self.p.view_isometric() # initial setting for 3d interface considering isometric view
# read number of modes as "numModes"
numModes=modeNumber(self.fileName)
# clear previous item from "Mode Num." Combobox
self.cb_numNodes.clear()
if numModes.size>0:
for i in range(int(numModes)):
# add item to "Mode Num." combobox as Mode_1...
self.cb_numNodes.addItem('Mode_'+str(i+1))
self.recorder_disp, self.recorder_rot, self.recorder_force, self.recorder_moment, self.recorder_accel, self.recorder_vel = recorder_types(
self.fileName)
if self.recorder_disp==1:
# add item to "Component" combobox for displacement in static analysis result
self.cb_node_contour_static.addItem('Displacement, Ux',)
self.cb_node_contour_static.addItem('Displacement, Uy')
self.cb_node_contour_static.addItem('Displacement, Uz')
self.cb_node_contour_static.addItem('Displacement, Uxyz')
if self.recorder_rot==1:
# add item to "Component" combobox for rotation in static analysis result
self.cb_node_contour_static.addItem('Rotation, Rx')
self.cb_node_contour_static.addItem('Rotation, Ry')
self.cb_node_contour_static.addItem('Rotation, Rz')
self.cb_node_contour_static.addItem('Rotation, Rxyz')
if self.recorder_force==1:
# add item to "Component" combobox for force reaction in static analysis result
self.cb_node_contour_static.addItem('Force Reaction, RFx')
self.cb_node_contour_static.addItem('Force Reaction, RFy')
self.cb_node_contour_static.addItem('Force Reaction, RFz')
self.cb_node_contour_static.addItem('Force Reaction, RFxyz')
if self.recorder_moment==1:
# add item to "Component" combobox for moment reaction in static analysis result
self.cb_node_contour_static.addItem('Moment Reaction, RMx')
self.cb_node_contour_static.addItem('Moment Reaction, RMy')
self.cb_node_contour_static.addItem('Moment Reaction, RMz')
self.cb_node_contour_static.addItem('Moment Reaction, RMxyz')
if self.recorder_disp == 1:
# add item to "Component" combobox for displacement in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Displacement, Ux')
self.cb_node_contour_dynamic.addItem('Displacement, Uy')
self.cb_node_contour_dynamic.addItem('Displacement, Uz')
self.cb_node_contour_dynamic.addItem('Displacement, Uxyz')
if self.recorder_rot == 1:
# add item to "Component" combobox for rotation in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Rotation, Rx')
self.cb_node_contour_dynamic.addItem('Rotation, Ry')
self.cb_node_contour_dynamic.addItem('Rotation, Rz')
self.cb_node_contour_dynamic.addItem('Rotation, Rxyz')
if self.recorder_force == 1:
# add item to "Component" combobox for force reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Force Reaction, RFx')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFy')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFz')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFxyz')
if self.recorder_moment == 1:
# add item to "Component" combobox for moment reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMx')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMy')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMz')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMxyz')
if self.recorder_accel == 1:
# add item to "Component" combobox for acceleration in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Acceleration, Ax')
self.cb_node_contour_dynamic.addItem('Acceleration, Ay')
self.cb_node_contour_dynamic.addItem('Acceleration, Az')
self.cb_node_contour_dynamic.addItem('Acceleration, Axyz')
if self.recorder_vel == 1:
# add item to "Component" combobox for velocity in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Velocity, Vx')
self.cb_node_contour_dynamic.addItem('Velocity, Vy')
self.cb_node_contour_dynamic.addItem('Velocity, Vz')
self.cb_node_contour_dynamic.addItem('Velocity, Vxyz')
self.setWindowTitle(
# windows title to show file path and filename
"{}[*] - {}".format((self.fileName + ' ['+filename0)+']', 'FeView'))
try:
# show total node and element in status bar
self.statusBar().showMessage('Total Node : '+str(len(node(self.fileName)))+'; Total Element :'+total_element(self.fileName))
except:
QMessageBox.critical(self, "Error", "No node or element found")
if self.actionView_load.isChecked()==True:
# show point load
point_load(self.fileName,self.p,load_arrow_size, load_font_size,load_arrow_color,load_font_color)
if self.actionView_Support.isChecked() == True:
# show support
support(self.fileName,self.p)
self.addInfoText("Successfully loaded file \n" + self.fileName)
except:
QMessageBox.critical(self, "Error", "Please check TCL file")
def DispStatic(self):
try:
self.btn_apply_modal.setChecked(False)
self.btn_apply_dynamic.setChecked(False)
scalefactor = float(self.tb_sef_scale_factor.text()) # scale factor for diformation (static, modal and dynamic analysis)
if self.recorder_disp==1:
# read output files for displacement
self.outdispFile = OpenSeesOutputRead(os.path.join(self.result_directory,'Node_displacements.out'))
if step_static(self.fileName).size>0:
# number of steps for static (if dynamic/transient analysis also included)
self.step_statics=int(step_static(self.fileName))
else:
# number of steps for only static analysis
self.step_statics = len(self.outdispFile[:, 1])
self.step_dynamic = len(self.outdispFile[:, 1]) - self.step_statics # steps for transient analysis
if self.recorder_disp == 1:
# read output files for displacement
self.deformation=(out_response((os.path.join(self.result_directory,'Node_displacements.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.dispNodeCoords = NodeCoords(self.fileName) + (scalefactor * self.deformation)
if self.recorder_rot == 1:
# read output files for rotation
self.rotation=(out_response((os.path.join(self.result_directory,'Node_rotations.out')), self.step_statics, ndm_v(self.fileName),'rotation_moment'))
self.outrotFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_rotations.out'))
if self.recorder_force == 1:
# read output files for force reaction
self.forcereaction=(out_response((os.path.join(self.result_directory,'Node_forceReactions.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.outfreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_forceReactions.out'))
if self.recorder_moment == 1:
# read output files for moment reaction
self.momentreaction = (out_response((os.path.join(self.result_directory, 'Node_momentReactions.out')), self.step_statics,ndm_v(self.fileName),'rotation_moment'))
self.outmreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_momentReactions.out'))
self.p.clear()
node_contour_type = (self.cb_node_contour_static.currentText()) # get current text from "Component" combobox (Static result)
if self.actionMesh_View_2.isChecked() == True:
if node_contour_type=='Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x= np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = '+str(d_max_x)+')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rx':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rz':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFxyz':
scalars = self.forcereaction[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMx':
scalars = self.momentreaction[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMy':
scalars = self.momentreaction[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMx\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMz':
scalars = self.momentreaction[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMz\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMxyz':
scalars = self.momentreaction[:, :3]
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif self.actionSmooth_View_2.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x = np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = ' + str(d_max_x) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rx':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rz':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
# stitle = 'Force Reaction, RFy\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Force Reaction, RFxyz':
scalars = self.forcereaction[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Moment Reaction, RMx':
scalars = self.momentreaction[:, 2]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Moment Reaction, RMy':
scalars = self.momentreaction[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
# stitle = 'Moment Reaction, RMx\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Moment Reaction, RMz':
scalars = self.momentreaction[:, 0]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
# stitle = 'Moment Reaction, RMz\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Moment Reaction, RMxyz':
scalars = self.momentreaction[:, :3]
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif self.actionMesh_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type=='Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x= np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = '+str(d_max_x)+')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rx':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rz':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Force Reaction, RFy\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFxyz':
scalars = self.forcereaction[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMx':
scalars = self.momentreaction[:, 2]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMy':
scalars = self.momentreaction[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMx\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMz':
scalars = self.momentreaction[:, 0]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMz\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMxyz':
scalars = self.momentreaction[:, :3]
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif self.actionSmooth_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type=='Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x= np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = '+str(d_max_x)+')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rx':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rz':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Force Reaction, RFy\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Force Reaction, RFxyz':
scalars = self.forcereaction[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMx':
scalars = self.momentreaction[:, 2]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMy':
scalars = self.momentreaction[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMx\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMz':
scalars = self.momentreaction[:, 0]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMz\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif node_contour_type=='Moment Reaction, RMxyz':
scalars = self.momentreaction[:, :3]
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
if self.step_dynamic>0:
for i in range(self.step_dynamic):
# add items to as dynamic steps in Step Num. Combobox
self.cb_steps_dynamic.addItem(str(i))
if self.actionView_Support.isChecked() == True:
support_disp(self.fileName, self.p, self.dispNodeCoords)
except:
QMessageBox.critical(self, "Error", "Check the recorders options")
def DispModal(self):
try:
self.dispNodeCoords=None
self.btn_apply_static.setChecked(False)
self.btn_apply_dynamic.setChecked(False)
mode_file = (self.cb_numNodes.currentText())
scalefactor = float(self.tb_sef_scale_factor.text())
# read period.out file
period_file = os.path.join(self.result_directory, 'Periods.out')
outperiodFile = OpenSeesOutputRead(period_file)
nodes = node(main.fileName)[:, 1].astype(int)
#all_nodes = nodes.reshape((-1, 1))
all_nodes=np.sort(nodes).reshape((-1, 1))
nodes_inex=[]
for i in range (len(nodes)):
nodes_in=list(all_nodes).index(nodes[i])
#list(vowels).index(5)
nodes_inex.append(nodes_in)
for i in range (int(numModes)):
if mode_file == 'Mode_' + str(i + 1):
mode_filename = os.path.join(self.result_directory, 'Mode_' + str(i + 1) + '.out')
outdispModalFile = OpenSeesOutputRead(mode_filename)
step = len(outdispModalFile[:, 1])
if ndm_v(self.fileName)==3:
self.deformation_modal = outdispModalFile[int(step) - 1, 0:].reshape(-1, 3)
self.deformation_modal_s=[]
for j in range(len(nodes)):
self.deformation_modal_1=self.deformation_modal[nodes_inex[j]]
self.deformation_modal_s.append(self.deformation_modal_1)
self.deformation_modal_f=np.vstack(self.deformation_modal_s)
if ndm_v(self.fileName)==2:
deformation_modal_xy = outdispModalFile[int(step) - 1, 0:].reshape(-1, 2)
z_def = np.repeat(0, len(deformation_modal_xy[:, 0]))
self.deformation_modal_f = np.column_stack([deformation_modal_xy, z_def])
self.dispNodeCoords_Modal = NodeCoords(self.fileName) + (scalefactor * self.deformation_modal_f)
self.p.clear()
node_contour_type = (self.cb_node_contour_modal.currentText())
if self.actionMesh_View_2.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation_modal_f[:, 0]
stitle = 'Mode_' + str(i + 1) + '[Fr. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uy':
scalars = self.deformation_modal_f[:, 1]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uy\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uz':
scalars = self.deformation_modal_f[:, 2]
stitle = 'Mode_'+str(i+1)+'[Freq. '+str(i+1)+'[Freq. = '+str(float(np.round(1/outperiodFile[i],3)))+' Hz,'+' T. = '+str(float(np.round(outperiodFile[i],5)))+' s]'+'; '+'Disp., Uz\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation_modal_f[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Mode_'+str(i+1)+'[Freq. = '+str(float(np.round(1/outperiodFile[i],3)))+' Hz,'+' T. = '+str(float(np.round(outperiodFile[i],5)))+' s]'+'; '+'Disp., Uxyz\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if self.actionSmooth_View_2.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation_modal_f[:, 0]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uy':
scalars = self.deformation_modal_f[:, 1]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uz':
scalars = self.deformation_modal_f[:, 2]
stitle = 'Mode_' + str(i + 1) + '[Freq. ' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uz\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation_modal_f[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Mode_'+str(i+1)+'[Freq. = '+str(float(np.round(1/outperiodFile[i],3)))+' Hz,'+' T. = '+str(float(np.round(outperiodFile[i],5)))+' s]'+'; '+'Disp., Uxyz\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if self.actionMesh_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation_modal_f[:, 0]
stitle = 'Mode_' + str(i + 1) + '[Fr. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation_modal_f[:, 1]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uy\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uz':
scalars = self.deformation_modal_f[:, 2]
stitle = 'Mode_' + str(i + 1) + '[Freq. ' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uz\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation_modal_f[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uxyz\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_Modal, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
if self.actionSmooth_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation_modal_f[:, 0]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uy':
scalars = self.deformation_modal_f[:, 1]
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Ux\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uz':
scalars = self.deformation_modal_f[:, 2]
stitle = 'Mode_' + str(i + 1) + '[Freq. ' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uz\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
if node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation_modal_f[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Mode_' + str(i + 1) + '[Freq. = ' + str(
float(np.round(1 / outperiodFile[i], 3))) + ' Hz,' + ' T. = ' + str(
float(np.round(outperiodFile[i], 5))) + ' s]' + '; ' + 'Disp., Uxyz\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_Modal, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
if self.actionView_Support.isChecked() == True:
# show support
support_disp(self.fileName, self.p, self.dispNodeCoords_Modal)
except:
QMessageBox.critical(self, "Error", "Check the recorders options")
# function to open help file
def FeView_Help(self):
try:
programName = "Help\FeView_Help.chm"
sp.Popen([programName])
except:
QMessageBox.critical(self, "Error", "Please check FeView_Help.chm file exist or not in the help folder of the installation directory")
# function to create the close event for the GUI
def closeEvent(self, event):
return event.accept() if QMessageBox.question(
self, "Close", "Want to Exit ?", QMessageBox.Yes | QMessageBox.No,
QMessageBox.No) == QMessageBox.Yes else event.ignore()
# function to save the model as a picture with current background-color
def savePlot(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName_p, _ = QFileDialog.getSaveFileName(self, "Save Model as Picture", "","PNG (*.png)")
if fileName_p:
self.currentdir = os.path.dirname(os.path.abspath(fileName_p))
self.p.screenshot(fileName_p)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to save the model as a picture with white background
def savePlot_wb(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName_p_wb, _ = QFileDialog.getSaveFileName(self, "Save Plot as Picture", "","PNG (*.png)")
if fileName_p_wb:
self.currentdir = os.path.dirname(os.path.abspath(fileName_p_wb))
self.p.screenshot(fileName_p_wb,transparent_background=True,return_img=10)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
def calculator(self):
try:
programName = "calc.exe"
sp.Popen([programName])
except:
QMessageBox.critical(self, "Error", "calc.exe should be available in your system")
def center(self):
try:
"""Center and resize the window."""
self.showNormal()
self.resize(QDesktopWidget().screenGeometry().width() // 1.25,
QDesktopWidget().screenGeometry().height() // 1.25)
qr = self.frameGeometry()
qr.moveCenter(QDesktopWidget().availableGeometry().center())
return self.move(qr.topLeft())
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
def gui_color(self):
try:
GUI_color = QColorDialog.getColor()
if GUI_color.isValid():
#self.p.set_background(str(GUI_color.name()))
main.setStyleSheet("background-color: white;")
main.setStyleSheet("background-color:"+ str(GUI_color.name())+';')
main.dockWidget.setStyleSheet("background-color:"+ str(GUI_color.name())+';')
#main.dockWidget1.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.groupBox.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.groupBox_10.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.groupBox_2.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.groupBox_5.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.groupBox_7.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.btn_openTCL.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.label_3.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.sldr_deform_scale.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.btn_apply_static.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.label.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.sldr_deform_scale_modal.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.btn_apply_modal.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.sldr_deform_scale_dynamic.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.btn_apply_dynamic.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
main.label_5.setStyleSheet("background-color:" + str(GUI_color.name()) + ';')
except:
QMessageBox.critical(self, "Error", "Please provide python supported color")
def Plot_Background_Color(self):
try:
plot_background_color = QColorDialog.getColor()
if plot_background_color.isValid():
self.p.set_background(str(plot_background_color.name()))
except:
QMessageBox.critical(self, "Error", "Please provide python supported color")
def GUI_Font(self):
try:
self.setFont(QFontDialog.getFont(self)[0])
except:
QMessageBox.critical(self, "Error", "Fonts style should be supported by PyQt5 ")
def DispDynamic(self):
try:
self.dispNodeCoords = None
self.dispNodeCoords_Modal=None
self.btn_apply_static.setChecked(False)
self.btn_apply_modal.setChecked(False)
#global deformation
scalefactor = float(self.tb_sef_scale_factor.text())
if self.recorder_disp==1:
self.outdispFile = OpenSeesOutputRead(os.path.join(self.result_directory,'Node_displacements.out'))
self.deformation_dynamic = (out_response((os.path.join(self.result_directory, 'Node_displacements.out')), ((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1),ndm_v(self.fileName), 'all'))
self.dispNodeCoords_dynamic = NodeCoords(self.fileName) + (scalefactor * self.deformation_dynamic)
if self.recorder_rot == 1:
self.rotation_dyanamic = (out_response((os.path.join(self.result_directory, 'Node_rotations.out')), ((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1), ndm_v(self.fileName), 'rotation_moment'))
self.outrotFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_rotations.out'))
if self.recorder_force == 1:
self.forcereaction_dynamic=(out_response((os.path.join(self.result_directory,'Node_forceReactions.out')), ((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1), ndm_v(self.fileName),'all'))
self.outfreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_forceReactions.out'))
if self.recorder_moment == 1:
self.momentreaction_dynamic = (out_response((os.path.join(self.result_directory, 'Node_momentReactions.out')), ((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1),ndm_v(self.fileName),'rotation_moment'))
self.outmreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_momentReactions.out'))
if self.recorder_accel == 1:
self.acc_dynamic = (
out_response((os.path.join(self.result_directory, 'Node_accelerations.out')),
((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1), ndm_v(self.fileName),
'all'))
self.outaccFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_accelerations.out'))
if self.recorder_vel == 1:
self.vel_dynamic = (
out_response((os.path.join(self.result_directory, 'Node_velocities.out')),
((self.step_statics) + int(self.cb_steps_dynamic.currentText())+1), ndm_v(self.fileName),
'all'))
self.outvelFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_velocities.out'))
node_contour_type_dynamic = (self.cb_node_contour_dynamic.currentText())
self.p.clear()
if self.actionMesh_View_2.isChecked() == True:
if node_contour_type_dynamic == 'Displacement, Ux':
scalars = self.deformation_dynamic[:, 0]
stitle = 'Displacement, Ux (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uy':
scalars = self.deformation_dynamic[:, 1]
stitle = 'Displacement, Uy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uz':
scalars = self.deformation_dynamic[:, 2]
stitle = 'Displacement, Uz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uxyz':
scalars = self.deformation_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Displacement, Uxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rx':
scalars = self.rotation_dyanamic[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Ry':
scalars = self.rotation_dyanamic[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rz':
scalars = self.rotation_dyanamic[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rxyz':
scalars = self.rotation_dyanamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFx':
scalars = self.forcereaction_dynamic[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFy':
scalars = self.forcereaction_dynamic[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFz':
scalars = self.forcereaction_dynamic[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFxyz':
scalars = self.forcereaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMx':
scalars = self.momentreaction_dynamic[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMy':
scalars = self.momentreaction_dynamic[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMz':
scalars = self.momentreaction_dynamic[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMxyz':
scalars = self.momentreaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ax':
scalars = self.acc_dynamic[:, 0]
stitle = 'Acceleration, Ax (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ay':
scalars = self.acc_dynamic[:, 1]
stitle = 'Acceleration, Ay (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Az':
scalars = self.acc_dynamic[:, 2]
stitle = 'Acceleration, Az (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Axyz':
scalars = self.acc_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Acceleration, Axyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vx':
scalars = self.vel_dynamic[:, 0]
stitle = 'Velocity, Vx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vy':
scalars = self.vel_dynamic[:, 1]
stitle = 'Velocity, Vy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vz':
scalars = self.vel_dynamic[:, 2]
stitle = 'Velocity, Vz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vxyz':
scalars = self.vel_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Velocity, Vxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
elif self.actionSmooth_View_2.isChecked() == True:
if node_contour_type_dynamic == 'Displacement, Ux':
scalars = self.deformation_dynamic[:, 0]
stitle = 'Displacement, Ux (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uy':
scalars = self.deformation_dynamic[:, 1]
stitle = 'Displacement, Uy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uz':
scalars = self.deformation_dynamic[:, 2]
stitle = 'Displacement, Uz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uxyz':
scalars = self.deformation_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Displacement, Uxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rx':
scalars = self.rotation_dyanamic[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Ry':
scalars = self.rotation_dyanamic[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rz':
scalars = self.rotation_dyanamic[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rxyz':
scalars = self.rotation_dyanamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFx':
scalars = self.forcereaction_dynamic[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFy':
scalars = self.forcereaction_dynamic[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFz':
scalars = self.forcereaction_dynamic[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFxyz':
scalars = self.forcereaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMx':
scalars = self.momentreaction_dynamic[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMy':
scalars = self.momentreaction_dynamic[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMz':
scalars = self.momentreaction_dynamic[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMxyz':
scalars = self.momentreaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ax':
scalars = self.acc_dynamic[:, 0]
stitle = 'Acceleration, Ax (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ay':
scalars = self.acc_dynamic[:, 1]
stitle = 'Acceleration, Ay (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Az':
scalars = self.acc_dynamic[:, 2]
stitle = 'Acceleration, Az (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Axyz':
scalars = self.acc_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Acceleration, Axyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vx':
scalars = self.vel_dynamic[:, 0]
stitle = 'Velocity, Vx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vy':
scalars = self.vel_dynamic[:, 1]
stitle = 'Velocity, Vy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vz':
scalars = self.vel_dynamic[:, 2]
stitle = 'Velocity, Vz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vxyz':
scalars = self.vel_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Velocity, Vxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
elif self.actionMesh_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type_dynamic == 'Displacement, Ux':
scalars = self.deformation_dynamic[:, 0]
stitle = 'Displacement, Ux (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uy':
scalars = self.deformation_dynamic[:, 1]
stitle = 'Displacement, Uy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uz':
scalars = self.deformation_dynamic[:, 2]
stitle = 'Displacement, Uz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uxyz':
scalars = self.deformation_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Displacement, Uxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rx':
scalars = self.rotation_dyanamic[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Ry':
scalars = self.rotation_dyanamic[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rz':
scalars = self.rotation_dyanamic[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rxyz':
scalars = self.rotation_dyanamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFx':
scalars = self.forcereaction_dynamic[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFy':
scalars = self.forcereaction_dynamic[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFz':
scalars = self.forcereaction_dynamic[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFxyz':
scalars = self.forcereaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMx':
scalars = self.momentreaction_dynamic[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMy':
scalars = self.momentreaction_dynamic[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMz':
scalars = self.momentreaction_dynamic[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMxyz':
scalars = self.momentreaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ax':
scalars = self.acc_dynamic[:, 0]
stitle = 'Acceleration, Ax (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ay':
scalars = self.acc_dynamic[:, 1]
stitle = 'Acceleration, Ay (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Az':
scalars = self.acc_dynamic[:, 2]
stitle = 'Acceleration, Az (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Axyz':
scalars = self.acc_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Acceleration, Axyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vx':
scalars = self.vel_dynamic[:, 0]
stitle = 'Velocity, Vx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vy':
scalars = self.vel_dynamic[:, 1]
stitle = 'Velocity, Vy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vz':
scalars = self.vel_dynamic[:, 2]
stitle = 'Velocity, Vz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vxyz':
scalars = self.vel_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Velocity, Vxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords_dynamic, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
elif self.actionSmooth_View_Wiremesh_undeform.isChecked() == True:
if node_contour_type_dynamic == 'Displacement, Ux':
scalars = self.deformation_dynamic[:, 0]
stitle = 'Displacement, Ux (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view',self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uy':
scalars = self.deformation_dynamic[:, 1]
stitle = 'Displacement, Uy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uz':
scalars = self.deformation_dynamic[:, 2]
stitle = 'Displacement, Uz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Displacement, Uxyz':
scalars = self.deformation_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Displacement, Uxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rx':
scalars = self.rotation_dyanamic[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Ry':
scalars = self.rotation_dyanamic[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rz':
scalars = self.rotation_dyanamic[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Rotation, Rxyz':
scalars = self.rotation_dyanamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFx':
scalars = self.forcereaction_dynamic[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFy':
scalars = self.forcereaction_dynamic[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFz':
scalars = self.forcereaction_dynamic[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Force Reaction, RFxyz':
scalars = self.forcereaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMx':
scalars = self.momentreaction_dynamic[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMy':
scalars = self.momentreaction_dynamic[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMz':
scalars = self.momentreaction_dynamic[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Moment Reaction, RMxyz':
scalars = self.momentreaction_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ax':
scalars = self.acc_dynamic[:, 0]
stitle = 'Acceleration, Ax (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Ay':
scalars = self.acc_dynamic[:, 1]
stitle = 'Acceleration, Ay (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Az':
scalars = self.acc_dynamic[:, 2]
stitle = 'Acceleration, Az (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Acceleration, Axyz':
scalars = self.acc_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Acceleration, Axyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vx':
scalars = self.vel_dynamic[:, 0]
stitle = 'Velocity, Vx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vy':
scalars = self.vel_dynamic[:, 1]
stitle = 'Velocity, Vy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vz':
scalars = self.vel_dynamic[:, 2]
stitle = 'Velocity, Vz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
if node_contour_type_dynamic == 'Velocity, Vxyz':
scalars = self.vel_dynamic[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Velocity, Vxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords_dynamic, scalars, stitle)
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName), None, None)
if self.actionView_Support.isChecked() == True:
# show support
support_disp(self.fileName, self.p, self.dispNodeCoords_dynamic)
except:
QMessageBox.critical(self, "Error", "Check the recorders options")
# function to open input file with editor
def edit_TCL(self):
try:
programName = "CypressEditor/CypressEditor.exe"
sp.Popen([programName, self.fileName],cwd=self.file_path)
except:
QMessageBox.critical(self, "Error", "Please check OpenSees TCL input file")
# function to open input file with editor
def runOS(self):
try:
import subprocess as sp
programName = "OpenSees\OpenSees.exe"
sp.Popen([programName, self.fileName],cwd=self.result_directory)
except:
QMessageBox.critical(self, "Error", "Please check TCL input file and also OpenSees.exe (you should have installed 'Active tcl'")
# function to unchecked another windows view button except for the isometric view
def iso(self):
try:
self.p.view_isometric()
self.btn_xy_zpluss.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the xy_zpluss view
def xy_zpluss(self):
try:
self.p.view_xy()
self.btn_iso.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the xy_zminus view
def xy_zminus(self):
try:
self.p.view_xy(negative=True)
self.btn_iso.setChecked(False)
self.btn_xy_zpluss.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the xz_ypluss view
def xz_ypluss(self):
try:
self.p.view_xz()
self.btn_iso.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xy_zpluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the xz_yminus view
def xz_yminus(self):
try:
self.p.view_xz(negative=True)
self.btn_iso.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xy_zpluss.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the yz_xpluss view
def yz_xpluss(self):
try:
self.p.view_yz()
self.btn_iso.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xy_zpluss.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xminus.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to unchecked another windows view button except for the yz_xminus view
def yz_xminus(self):
try:
self.p.view_yz(negative=True)
self.btn_iso.setChecked(False)
self.btn_xy_zminus.setChecked(False)
self.btn_xy_zpluss.setChecked(False)
self.btn_xz_ypluss.setChecked(False)
self.btn_xz_yminus.setChecked(False)
self.btn_yz_xpluss.setChecked(False)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to show node labels
def nodelebels(self):
try:
self.btn_node_cord.setChecked(False)
self.btn_load.setChecked(False)
if self.btn_node_label.isChecked() == True:
try:
# NodesLabel considering static displacement coordinates
NodesLabel(self.p,self.fileName,self.dispNodeCoords, 1)
except:
pass
try:
# NodesLabel considering modal displacement coordinates
NodesLabel(self.p,self.fileName,self.dispNodeCoords_Modal, 1)
except:
pass
try:
# NodesLabel considering dynamic displacement coordinates
NodesLabel(self.p,self.fileName,self.dispNodeCoords_dynamic, 1)
except:
NodesLabel(self.p, self.fileName, NodeCoords(self.fileName), 1)
if self.btn_node_label.isChecked() == False:
NodesLabel(self.p, self.fileName, NodeCoords(self.fileName), 0)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function to show node labels
def nodecoordinates(self):
try:
self.btn_node_label.setChecked(False)
self.btn_load.setChecked(False)
if self.btn_node_cord.isChecked() == True:
try:
dispNodeCoords = NodeCoords(self.fileName) + (self.deformation)
NodesCoordinate(self.p,self.dispNodeCoords,np.round(dispNodeCoords,3), 1)
except:
pass
try:
dispNodeCoords_Modal = NodeCoords(self.fileName) + (self.deformation_modal)
NodesCoordinate(self.p,self.dispNodeCoords_Modal,np.round(dispNodeCoords_Modal,3), 1)
except:
pass
try:
dispNodeCoords_dynamic = NodeCoords(self.fileName) + (self.deformation_dynamic)
NodesCoordinate(self.p,self.dispNodeCoords_dynamic,np.round(dispNodeCoords_dynamic,3), 1)
except:
NodesCoordinate(self.p, NodeCoords(self.fileName),NodeCoords(self.fileName), 1)
#if self.btn_node_cord.isChecked() == False:
# NodesCoordinate(self.p, self.fileName, NodeCoords(self.fileName), 0)
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
# function for value of scale factor
def sfvalue(self):
try:
self.lbl_scale_cactor.setText(str(self.sldr_deform_scale.value()))
self.lbl_scale_cactor_modal.setText(str(self.sldr_deform_scale_modal.value()))
self.lbl_scale_cactor_dynamic.setText(str(self.sldr_deform_scale_dynamic.value()))
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
#
def res_static(self):
try:
dialog = response_static(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Please apply the satatic analysis first")
def res_dynamic(self):
try:
dialog = response_dynamic(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Please apply the satatic analysis first")
def about_feview(self):
try:
dialog = about(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Unknown error")
def data_table_static(self):
try:
dialog = table_static(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Unknown error")
def data_table_modal(self):
try:
dialog = table_modal(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Unknown error")
def load_setting_arrow(self):
try:
if main.actionView_load.isChecked() == True:
dialog = load_setting(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Unknown error")
def data_table_dynamic(self):
try:
dialog = table_dynamic(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Unknown error")
def pointload_show(self):
try:
self.btn_node_label.setChecked(False)
self.btn_node_cord.setChecked(False)
if self.btn_load.isChecked() == True:
dialog = load_setting_btn(self)
self.dialogs.append(dialog)
dialog.show()
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
class response_static(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(response_static, self).__init__(parent)
uic.loadUi('UI\StaticResponseWindows.ui', self)
self.btn_insert_static.clicked.connect(self.insert_row_static)
self.btn_add_roe_static.clicked.connect(self.add_row_static)
self.btn_delet_row_static.clicked.connect(self.delet_row_static)
self.cb_dt_static.stateChanged.connect(self.dt_static)
self.cb_multiStatic.stateChanged.connect(self.multi_static)
self.actionChange_Background_Color.triggered.connect(self.plotresponse_static_bg_color)
self.actionLine_Color_g.triggered.connect(self.plotresponse_static_mg_linecolor)
self.actionSolid.triggered.connect(self.mg_ls_solid)
self.actionDotted.triggered.connect(self.mg_ls_dotted)
self.actionDashed.triggered.connect(self.mg_ls_dashed)
self.actionDashdot.triggered.connect(self.mg_ls_dashdot)
self.action0_2.triggered.connect(self.mg_lw_0_2)
self.action0_4.triggered.connect(self.mg_lw_0_4)
self.action0_6.triggered.connect(self.mg_lw_0_6)
self.action0_8.triggered.connect(self.mg_lw_0_8)
self.action1.triggered.connect(self.mg_lw_1)
self.mb_savedata.triggered.connect(self.savedata_static)
self.btn_insert_supportnode.clicked.connect(self.insert_row_pushover)
self.btn_add_roe_pushover.clicked.connect(self.add_row_pushover)
self.btn_delete_roe_pushover.clicked.connect(self.delet_row_pushover)
self.apply_pushover.clicked.connect(self.plotresponse_pushover)
self.actionSave_Pushover_Data.triggered.connect(self.savedata_pushover)
nodes=node(main.fileName)[:, 1].astype(int)
if nodes.size>0:
for i in range(len(nodes)):
self.cb_resp_nodenumber.addItem(str(nodes[i]))
self.cb_pushover_nodenumber.addItem(str(nodes[i]))
self.apply_repnse.clicked.connect(self.plotresponse_static)
def savedata_pushover(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
datafilename, _ = QFileDialog.getSaveFileName(self, "Save Data as Text", "", "Text File (*.txt)")
if datafilename:
self.currentdir = os.path.dirname(os.path.abspath(datafilename))
with open(datafilename, 'w') as table:
table.write('Result data from static analysis: \n\n')
table.write('Node Number: ' + str(index_cb + 1) + '\n')
if node_component_pushover == 'Ux':
table.write('Displacement_Ux' + '\t'+'Base Shear_Vx\n')
elif node_component_pushover == 'Uy':
table.write('Displacement_Uy' + '\t'+'Base Shear_Vy\n')
elif node_component_pushover == 'Uz':
table.write('Displacement_Uy' + '\t'+'Base Shear_Vy\n')
for row in zip(np.round(res_disp,6),np.round(base_shear_total,6)):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
except:
QMessageBox.critical(self, "Error", "Data not found, please apply for pushover")
def plotresponse_pushover(self):
global index_cb,res_disp,base_shear_total, node_component_pushover
try:
index_cb = self.cb_pushover_nodenumber.currentIndex()
intrl_disp_static = (np.linspace(0, 1, len((main.outdispFile[0:main.step_statics, 0]))))
node_component_pushover = self.cb_pushover_component.currentText()
#time_static = (float(self.tb_dt_static.text()))
#time_series_static = (np.linspace(0, time_static * (len(intrl_disp_static) - 1), len(intrl_disp_static)))
zero_2d_static = x = np.repeat(0, (len(intrl_disp_static)))
col_count = self.tbl_pushover_supnode.columnCount()
row_count = self.tbl_pushover_supnode.rowCount()
headers = [str(self.tbl_pushover_supnode.horizontalHeaderItem(i).text()) for i in range(col_count)]
df_list = []
for row in range(row_count):
df_list2 = []
for col in range(col_count):
table_item = self.tbl_pushover_supnode.item(row, col)
df_list2.append('' if table_item is None else str(table_item.text()))
df_list.append(df_list2)
df = pd.DataFrame(df_list, columns=headers)
sup_node_pushover = df.values.astype(int)
base_shear = []
self.PushoverWidget.canvas_pushover.axes_pushover.clear()
try:
self.PushoverWidget.canvas_pushover.axes_pushover.set_facecolor(str(plotresponse_static_bg_color.name()))
except:
self.PushoverWidget.canvas_pushover.axes_pushover.set_facecolor('#000000')
try:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", color=str(plotresponse_static_mg_color.name()))
except:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", color='#ffffff')
try:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", linestyle=lstyle)
except:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", linestyle='--')
try:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", linewidth=lwidth)
except:
self.PushoverWidget.canvas_pushover.axes_pushover.grid(True, which="both", linewidth=0.4)
# #
if node_component_pushover == 'Ux':
if ndm_v(main.fileName)==3:
res_disp=np.abs((main.outdispFile[0:main.step_statics, (3*index_cb+1)]))
elif ndm_v(main.fileName) == 2:
res_disp=np.abs((main.outdispFile[0:main.step_statics, (2*index_cb+1)]))
for i in range(len(sup_node_pushover)):
if ndm_v(main.fileName) == 3:
res_baseshear = (main.outfreactFile[0:main.step_statics, (3 * sup_node_pushover[i] - 2)])
elif ndm_v(main.fileName) == 2:
res_baseshear = (main.outfreactFile[0:main.step_statics, (2 * sup_node_pushover[i] - 1)])
base_shear.append(res_baseshear)
self.PushoverWidget.canvas_pushover.axes_pushover.set_ylabel('Base Shear, Vx',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlabel('Displacement, Ux',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlim(xmin=np.min(res_disp), xmax=np.max(res_disp))
elif node_component_pushover == 'Uy':
if ndm_v(main.fileName)==3:
res_disp=np.abs((main.outdispFile[0:main.step_statics, (3*index_cb+2)]))
elif ndm_v(main.fileName) == 2:
res_disp=np.abs((main.outdispFile[0:main.step_statics, (2*index_cb+2)]))
for i in range(len(sup_node_pushover)):
if ndm_v(main.fileName) == 3:
res_baseshear = np.abs((main.outfreactFile[0:main.step_statics, (3 * sup_node_pushover[i] - 1)]))
elif ndm_v(main.fileName) == 2:
res_baseshear = np.abs((main.outfreactFile[0:main.step_statics, (2 * sup_node_pushover[i])]))
base_shear.append(res_baseshear)
self.PushoverWidget.canvas_pushover.axes_pushover.set_ylabel('Base Shear, Vy',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlabel('Displacement, Uy',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlim(xmin=np.min(res_disp), xmax=np.max(res_disp))
elif node_component_pushover == 'Uz':
if ndm_v(main.fileName)==3:
res_disp=np.abs((main.outdispFile[0:main.step_statics, (3*index_cb+3)]))
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlim(xmin=np.min(res_disp),
xmax=np.max(res_disp))
elif ndm_v(main.fileName) == 2:
res_disp=zero_2d_static
for i in range(len(sup_node_pushover)):
if ndm_v(main.fileName) == 3:
res_baseshear = np.abs((main.outfreactFile[0:main.step_statics, (3 * sup_node_pushover[i] - 1)]))
elif ndm_v(main.fileName) == 2:
res_baseshear = zero_2d_static
base_shear.append(res_baseshear)
self.PushoverWidget.canvas_pushover.axes_pushover.set_ylabel('Base Shear, Vz',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.set_xlabel('Displacement, Uz',
fontname="Times New Roman", size=10)
self.PushoverWidget.canvas_pushover.axes_pushover.yaxis.offsetText.set_fontsize(9)
self.PushoverWidget.canvas_pushover.axes_pushover.yaxis.offsetText.set_fontname("Times New Roman")
for tick in self.PushoverWidget.canvas_pushover.axes_pushover.get_xticklabels():
tick.set_fontname("Times New Roman")
tick.set_fontsize(9)
for tick in self.PushoverWidget.canvas_pushover.axes_pushover.get_yticklabels():
tick.set_fontname("Times New Roman")
tick.set_fontsize(9)
base_shear_t = (np.transpose(
np.reshape(np.ravel(base_shear), (len(sup_node_pushover), len(intrl_disp_static)))))
base_shear_total=[]
for i in range (len(base_shear_t[:,0])):
base_shear_total.append(np.abs(np.sum(base_shear_t[i,:])))
self.PushoverWidget.canvas_pushover.axes_pushover.plot(res_disp,base_shear_total,color='r',linewidth=1)
self.PushoverWidget.canvas_pushover.axes_pushover.set_ylim(ymin=np.min(base_shear_total), ymax=np.max(base_shear_total))
mpl.rcParams['savefig.dpi'] = 1000
self.PushoverWidget.canvas_pushover.draw()
except:
QMessageBox.critical(self, "Error", "Please ckeck support node")
def insert_row_pushover(self):
for i in range (int(self.tb_supportnode.text())):
self.tbl_pushover_supnode.insertRow(i)
def add_row_pushover(self):
row = self.tbl_pushover_supnode.rowCount()
self.tbl_pushover_supnode.insertRow(row)
def delet_row_pushover(self):
index = self.tbl_pushover_supnode.currentIndex()
self.tbl_pushover_supnode.removeRow(index.row())
def insert_row_static(self):
for i in range (int(self.tb_mnode_static.text())):
self.tbl_static_mtp.insertRow(i)
def add_row_static(self):
row = self.tbl_static_mtp.rowCount()
self.tbl_static_mtp.insertRow(row)
def delet_row_static(self):
index = self.tbl_static_mtp.currentIndex()
self.tbl_static_mtp.removeRow(index.row())
def multi_static(self,state):
if state > 0:
self.gb_multi_static.setEnabled(True)
self.gb_sigle_static.setEnabled(False)
else:
self.gb_multi_static.setEnabled(False)
self.gb_sigle_static.setEnabled(True)
def dt_static(self,state):
if state > 0:
self.tb_dt_static.setEnabled(True)
else:
self.tb_dt_static.setEnabled(False)
def plotresponse_static_bg_color(self):
global plotresponse_static_bg_color
try:
plotresponse_static_bg_color = QColorDialog.getColor()
except:
QMessageBox.critical(self, "Error", "Please provide matplotlib supported color")
def plotresponse_static_mg_linecolor(self):
global plotresponse_static_mg_color
try:
plotresponse_static_mg_color = QColorDialog.getColor()
except:
QMessageBox.critical(self, "Error", "Please provide matplotlib supported color")
def mg_ls_solid(self):
global lstyle
lstyle=False
lstyle='-'
def mg_ls_dotted(self):
global lstyle
lstyle=False
lstyle=':'
def mg_ls_dashed(self):
global lstyle
lstyle=False
lstyle='--'
def mg_ls_dashdot(self):
global lstyle
lstyle=False
lstyle='-.'
def mg_lw_0_2(self):
global lwidth
lwidth=False
lwidth=0.2
def mg_lw_0_4(self):
global lwidth
lwidth=False
lwidth=0.4
def mg_lw_0_6(self):
global lwidth
lwidth=False
lwidth=0.6
def mg_lw_0_8(self):
global lwidth
lwidth=False
lwidth=0.8
def mg_lw_1(self):
global lwidth
lwidth=False
lwidth=1
def savedata_static(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
datafilename, _ = QFileDialog.getSaveFileName(self, "Save Data as Text", "", "Text File (*.txt)")
if datafilename:
self.currentdir = os.path.dirname(os.path.abspath(datafilename))
with open(datafilename, 'w') as table:
table.write('Result data from static analysis: \n\n')
if self.cb_multiStatic.isChecked() == False:
table.write('Node Number: ' + str(index_cb + 1) + '\n')
if self.cb_dt_static.isChecked() == False:
if node_component_static == 'Ux':
table.write('Interval' + '\t'+'Displacement_Ux\n')
elif node_component_static == 'Uy':
table.write('Interval' + '\t'+'Displacement_Uy\n')
elif node_component_static == 'Uz':
table.write('Interval' + '\t'+'Displacement_Uz\n')
elif node_component_static == 'Rx':
table.write('Interval' + '\t'+'Rotation_Rx\n')
elif node_component_static == 'Ry':
table.write('Interval' + '\t'+'Rotation_Ry\n')
elif node_component_static == 'Rz':
table.write('Interval' + '\t'+'Rotation_Rz\n')
elif node_component_static == 'RFx':
table.write('Interval' + '\t'+'Reaction_Force_RFx\n')
elif node_component_static == 'RFy':
table.write('Interval' + '\t'+'Reaction_Force_RFy\n')
elif node_component_static == 'RFz':
table.write('Interval' + '\t'+'Reaction_Force_RFz\n')
elif node_component_static == 'RMx':
table.write('Interval' + '\t'+'Moment_Reaction_RMx\n')
elif node_component_static == 'RMy':
table.write('Interval' + '\t'+'Moment_Reaction_RMy\n')
elif node_component_static == 'RMz':
table.write('Interval' + '\t'+'Moment_Reaction_RMz\n')
for row in zip(np.round(intrl_disp_static,4), res_static):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_dt_static.isChecked() == True:
if node_component_static == 'Ux':
table.write('Time(s)' + '\t' + 'Displacement_Ux\n')
elif node_component_static == 'Uy':
table.write('Time(s)' + '\t' + 'Displacement_Uy\n')
elif node_component_static == 'Uz':
table.write('Time(s)' + '\t' + 'Displacement_Uz\n')
elif node_component_static == 'Rx':
table.write('Time(s)' + '\t' + 'Rotation_Rx\n')
elif node_component_static == 'Ry':
table.write('Time(s)' + '\t' + 'Rotation_Ry\n')
elif node_component_static == 'Rz':
table.write('Time(s)' + '\t' + 'Rotation_Rz\n')
elif node_component_static == 'RFx':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFx\n')
elif node_component_static == 'RFy':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFy\n')
elif node_component_static == 'RFz':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFz\n')
elif node_component_static == 'RMx':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMx\n')
elif node_component_static == 'RMy':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMy\n')
elif node_component_static == 'RMz':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMz\n')
for row in zip(time_series_static, res_static):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_multiStatic.isChecked() == True:
table.write('Node Number: ' + str(np.transpose(nulti_node_static).ravel()) + '\n')
response_multinode_static_t=(np.transpose(np.reshape(response_multinode_static,(len(nulti_node_static), len(intrl_disp_static)))))
if self.cb_dt_static.isChecked() == False:
if node_component_static == 'Ux':
table.write('Interval' + '\t'+'Displacement_Ux\n')
elif node_component_static == 'Uy':
table.write('Interval' + '\t' + 'Displacement_Uy\n')
elif node_component_static == 'Uz':
table.write('Interval' + '\t'+'Displacement_Uz\n')
elif node_component_static == 'Rx':
table.write('Interval' + '\t'+'Rotation_Rx\n')
elif node_component_static == 'Ry':
table.write('Interval' + '\t'+'Rotation_Ry\n')
elif node_component_static == 'Rz':
table.write('Interval' + '\t'+'Rotation_Rz\n')
elif node_component_static == 'RFx':
table.write('Interval' + '\t'+'Reaction_Force_RFx\n')
elif node_component_static == 'RFy':
table.write('Interval' + '\t'+'Reaction_Force_RFy\n')
elif node_component_static == 'RFz':
table.write('Interval' + '\t'+'Reaction_Force_RFz\n')
elif node_component_static == 'RMx':
table.write('Interval' + '\t'+'Moment_Reaction_RMx\n')
elif node_component_static == 'RMy':
table.write('Interval' + '\t'+'Moment_Reaction_RMy\n')
elif node_component_static == 'RMz':
table.write('Interval' + '\t'+'Moment_Reaction_RMz\n')
for row in zip(np.round(intrl_disp_static, 4), response_multinode_static_t):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_dt_static.isChecked() == True:
if node_component_static == 'Ux':
table.write('Time(s)' + '\t' + 'Displacement_Ux\n')
elif node_component_static == 'Uy':
table.write('Time(s)' + '\t' + 'Displacement_Uy\n')
elif node_component_static == 'Uz':
table.write('Time(s)' + '\t' + 'Displacement_Uz\n')
elif node_component_static == 'Rx':
table.write('Time(s)' + '\t' + 'Rotation_Rx\n')
elif node_component_static == 'Ry':
table.write('Time(s)' + '\t' + 'Rotation_Ry\n')
elif node_component_static == 'Rz':
table.write('Time(s)' + '\t' + 'Rotation_Rz\n')
elif node_component_static == 'RFx':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFx\n')
elif node_component_static == 'RFy':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFy\n')
elif node_component_static == 'RFz':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFz\n')
elif node_component_static == 'RMx':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMx\n')
elif node_component_static == 'RMy':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMy\n')
elif node_component_static == 'RMz':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMz\n')
for row in zip(time_series_static, response_multinode_static_t):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
except:
QMessageBox.critical(self, "Error", "Data not found, please apply for response")
def plotresponse_static(self):
global intrl_disp_static,time_series_static,res_static, node_component_static,index_cb, response_multinode_static, nulti_node_static
try:
index_cb = self.cb_resp_nodenumber.currentIndex()
intrl_disp_static = (np.linspace(0, 1, len((main.outdispFile[0:main.step_statics, 0]))))
node_component_static = self.cb_resp_component.currentText()
time_static = (float(self.tb_dt_static.text()))
time_series_static = (np.linspace(0, time_static * (len(intrl_disp_static) - 1), len(intrl_disp_static)))
zero_2d_static = x = np.repeat(0, (len(intrl_disp_static)))
self.PstaticWidget.canvas_pstatic.axes_pstatic.clear()
try:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_facecolor(str(plotresponse_static_bg_color.name()))
except:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_facecolor('#000000')
try:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", color=str(plotresponse_static_mg_color.name()))
except:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", color='#ffffff')
try:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", linestyle=lstyle)
except:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", linestyle='--')
try:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", linewidth=lwidth)
except:
self.PstaticWidget.canvas_pstatic.axes_pstatic.grid(True, which="both", linewidth=0.4)
if self.cb_multiStatic.isChecked() == False: #
if node_component_static == 'Ux':
if ndm_v(main.fileName)==3:
res_static=(main.outdispFile[0:main.step_statics, (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_static=(main.outdispFile[0:main.step_statics, (2*index_cb+1)])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Ux', fontname="Times New Roman", size=10)
elif node_component_static == 'Uy':
if ndm_v(main.fileName)==3:
res_static=(main.outdispFile[0:main.step_statics, (3*index_cb+2)])
elif ndm_v(main.fileName) == 2:
res_static=(main.outdispFile[0:main.step_statics, (2*index_cb+2)])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Uy', fontname="Times New Roman", size=10)
elif node_component_static == 'Uz':
if ndm_v(main.fileName)==3:
res_static=(main.outdispFile[0:main.step_statics, (3*index_cb+3)])
elif ndm_v(main.fileName) == 2:
QMessageBox.critical(self, "Error", "This DOF is not available for 2D problem")
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Uz', fontname="Times New Roman", size=10)
elif node_component_static == 'Rx':
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_static = zero_2d_static
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Rx', fontname="Times New Roman", size=10)
elif node_component_static == 'Ry':
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_static = zero_2d_static
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Ry', fontname="Times New Roman", size=10)
elif node_component_static == 'Rz':
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outrotFile[0:main.step_statics, (index_cb+1 )])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Rz', fontname="Times New Roman", size=10)
elif node_component_static == 'RFx':
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outfreactFile[0:main.step_statics, (2*index_cb+1)])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFx', fontname="Times New Roman", size=10)
elif node_component_static == 'RFy':
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outfreactFile[0:main.step_statics, (2 * index_cb + 2)])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFy', fontname="Times New Roman", size=10)
elif node_component_static == 'RFz':
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
QMessageBox.critical(self, "Error", "This DOF is not available for 2D problem")
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFz', fontname="Times New Roman", size=10)
elif node_component_static == 'RMx':
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * index_cb + 1)])
elif ndm_v(main.fileName) == 2:
res_static=zero_2d_static
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moment Reaction, RMx', fontname="Times New Roman", size=10)
elif node_component_static == 'RMy':
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_static=zero_2d_static
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moment Reaction, RMy', fontname="Times New Roman", size=10)
elif node_component_static == 'RMz':
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outmreactFile[0:main.step_statics, (index_cb + 1)])
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moment Reaction, RMz', fontname="Times New Roman", size=10)
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlabel('Interval',
fontname="Times New Roman", size=10)
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static, color='r', linewidth=1)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlim(xmin=0, xmax=np.max(intrl_disp_static))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlabel('Time (s)', fontname="Times New Roman", size=10)
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static, color='r', linewidth=1)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlim(xmin=0, xmax=np.max(time_series_static))
elif self.cb_multiStatic.isChecked() == True:
col_count = self.tbl_static_mtp.columnCount()
row_count = self.tbl_static_mtp.rowCount()
headers = [str(self.tbl_static_mtp.horizontalHeaderItem(i).text()) for i in range(col_count)]
df_list = []
for row in range(row_count):
df_list2 = []
for col in range(col_count):
table_item = self.tbl_static_mtp.item(row, col)
df_list2.append('' if table_item is None else str(table_item.text()))
df_list.append(df_list2)
df = pd.DataFrame(df_list, columns=headers)
nulti_node_static = df.values.astype(int)
response_multinode_static = []
if node_component_static == 'Ux':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outdispFile[0:main.step_statics, (3 * nulti_node_static[i]-2)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outdispFile[0:main.step_statics, (2 * nulti_node_static[i]-1)])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Ux', fontname="Times New Roman", size=10)
elif node_component_static == 'Uy':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outdispFile[0:main.step_statics, (3 * nulti_node_static[i]-1)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outdispFile[0:main.step_statics, (2 * nulti_node_static[i])])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Uy', fontname="Times New Roman", size=10)
elif node_component_static == 'Uz':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outdispFile[0:main.step_statics, (3 * nulti_node_static[i])])
elif ndm_v(main.fileName) == 2:
QMessageBox.critical(self, "Error", "This DOF is not available for 2D problem")
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Displacement, Uz', fontname="Times New Roman", size=10)
elif node_component_static == 'Rx':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3 * nulti_node_static[i]-2)])
elif ndm_v(main.fileName) == 2:
res_static=zero_2d_static
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Rx', fontname="Times New Roman", size=10)
elif node_component_static == 'Ry':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3 * nulti_node_static[i]-1)])
elif ndm_v(main.fileName) == 2:
res_static=zero_2d_static
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Ry', fontname="Times New Roman", size=10)
elif node_component_static == 'Rz':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outrotFile[0:main.step_statics, (3 * nulti_node_static[i])])
elif ndm_v(main.fileName) == 2:
res_static = (main.outrotFile[0:main.step_statics, (nulti_node_static[i])])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Rotation, Rz', fontname="Times New Roman", size=10)
elif node_component_static == 'RFx':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3 * nulti_node_static[i]-2)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outfreactFile[0:main.step_statics, (2 * nulti_node_static[i]-1)])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFx', fontname="Times New Roman", size=10)
elif node_component_static == 'RFy':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3 * nulti_node_static[i]-1)])
elif ndm_v(main.fileName) == 2:
res_static = (main.outfreactFile[0:main.step_statics, (2 * nulti_node_static[i])])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFy', fontname="Times New Roman", size=10)
elif node_component_static == 'RFz':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outfreactFile[0:main.step_statics, (3 * nulti_node_static[i])])
elif ndm_v(main.fileName) == 2:
QMessageBox.critical(self, "Error", "This DOF is not available for 2D problem")
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1,label='Node '+str(nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Force Reaction, RFz', fontname="Times New Roman", size=10)
elif node_component_static == 'RMx':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * nulti_node_static[i]-2)])
elif ndm_v(main.fileName) == 2:
res_static = zero_2d_static
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moment Reaction, RMx',
fontname="Times New Roman", size=10)
elif node_component_static == 'RMy':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * nulti_node_static[i]-1)])
elif ndm_v(main.fileName) == 2:
res_static = zero_2d_static
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moement Reaction, RMy',
fontname="Times New Roman", size=10)
elif node_component_static == 'RMz':
for i in range(len(nulti_node_static)):
if ndm_v(main.fileName) == 3:
res_static = (main.outmreactFile[0:main.step_statics, (3 * nulti_node_static[i])])
elif ndm_v(main.fileName) == 2:
res_static = (main.outmreactFile[0:main.step_statics, (nulti_node_static[i])])
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(intrl_disp_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.plot(time_series_static, res_static,
linewidth=1, label='Node ' + str(
nulti_node_static[i]))
response_multinode_static.append(res_static)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_ylabel('Moment Reaction, RMz',
fontname="Times New Roman", size=10)
if self.cb_dt_static.isChecked() == False:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlabel('Interval',
fontname="Times New Roman",
size=10)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlim(xmin=0,xmax=np.max(intrl_disp_static))
elif self.cb_dt_static.isChecked() == True:
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlabel('Time (s)',
fontname="Times New Roman",
size=10)
self.PstaticWidget.canvas_pstatic.axes_pstatic.set_xlim(xmin=0, xmax=np.max(time_series_static))
legend = self.PstaticWidget.canvas_pstatic.axes_pstatic.legend(fontsize=10, prop={"family": "Times New Roman"})
self.PstaticWidget.canvas_pstatic.axes_pstatic.yaxis.offsetText.set_fontsize(9)
self.PstaticWidget.canvas_pstatic.axes_pstatic.yaxis.offsetText.set_fontname("Times New Roman")
for tick in self.PstaticWidget.canvas_pstatic.axes_pstatic.get_xticklabels():
tick.set_fontname("Times New Roman")
tick.set_fontsize(9)
for tick in self.PstaticWidget.canvas_pstatic.axes_pstatic.get_yticklabels():
tick.set_fontname("Times New Roman")
tick.set_fontsize(9)
mpl.rcParams['savefig.dpi'] = 1000
self.PstaticWidget.canvas_pstatic.draw()
except:
QMessageBox.critical(self, "Error", "Please check, unknown reason")
class response_dynamic(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(response_dynamic, self).__init__(parent)
uic.loadUi('UI\dynamicResponseWindows.ui', self)
self.btn_insert_dynamic.clicked.connect(self.insert_row_dynamic)
self.btn_add_roe_dynamic.clicked.connect(self.add_row_dynamic)
self.btn_delet_row_dynamic.clicked.connect(self.delet_row_dynamic)
self.cb_dt_dynamic.stateChanged.connect(self.dt_dynamic)
self.cb_multiDynamic.stateChanged.connect(self.multi_dynamic)
self.actionChange_Background_Color.triggered.connect(self.plotresponse_dynamic_bg_color)
self.actionLine_Color_g.triggered.connect(self.plotresponse_dynamic_mg_linecolor)
self.actionSolid.triggered.connect(self.mg_ls_solid)
self.actionDotted.triggered.connect(self.mg_ls_dotted)
self.actionDashed.triggered.connect(self.mg_ls_dashed)
self.actionDashdot.triggered.connect(self.mg_ls_dashdot)
self.action0_2.triggered.connect(self.mg_lw_0_2)
self.action0_4.triggered.connect(self.mg_lw_0_4)
self.action0_6.triggered.connect(self.mg_lw_0_6)
self.action0_8.triggered.connect(self.mg_lw_0_8)
self.action1.triggered.connect(self.mg_lw_1)
self.mb_savedata_timeseries.triggered.connect(self.savedata_timeseries)
self.actionResponse_Spectra.triggered.connect(self.savedata_rs)
nodes=node(main.fileName)[:, 1].astype(int)
if nodes.size>0:
for i in range(len(nodes)):
self.cb_resp_nodenumber.addItem(str(nodes[i]))
self.cb_resp_nodenumber_rs.addItem(str(nodes[i]))
self.apply_repnse_dynamic.clicked.connect(self.plotresponse_dynamic)
self.apply_rs.clicked.connect(self.res_spectra)
def insert_row_dynamic(self):
for i in range (int(self.tb_mnode_dynamic.text())):
self.tbl_dynamic_mtp.insertRow(i)
def add_row_dynamic(self):
row = self.tbl_dynamic_mtp.rowCount()
self.tbl_dynamic_mtp.insertRow(row)
def delet_row_dynamic(self):
index = self.tbl_dynamic_mtp.currentIndex()
self.tbl_dynamic_mtp.removeRow(index.row())
def multi_dynamic(self,state):
if state > 0:
self.gb_multi_dynamic.setEnabled(True)
self.gb_sigle_dynamic.setEnabled(False)
else:
self.gb_multi_dynamic.setEnabled(False)
self.gb_sigle_dynamic.setEnabled(True)
def dt_dynamic(self,state):
if state > 0:
self.tb_dt_dynamic.setEnabled(True)
else:
self.tb_dt_dynamic.setEnabled(False)
def plotresponse_dynamic_bg_color(self):
global plotresponse_dynamic_bg_color
try:
plotresponse_dynamic_bg_color = QColorDialog.getColor()
except:
QMessageBox.critical(self, "Error", "Please provide matplotlib supported color")
def plotresponse_dynamic_mg_linecolor(self):
global plotresponse_dynamic_mg_color
try:
plotresponse_dynamic_mg_color = QColorDialog.getColor()
except:
QMessageBox.critical(self, "Error", "Please provide matplotlib supported color")
def mg_ls_solid(self):
global lstyle
lstyle=False
lstyle='-'
def mg_ls_dotted(self):
global lstyle
lstyle=False
lstyle=':'
def mg_ls_dashed(self):
global lstyle
lstyle=False
lstyle='--'
def mg_ls_dashdot(self):
global lstyle
lstyle=False
lstyle='-.'
def mg_lw_0_2(self):
global lwidth
lwidth=False
lwidth=0.2
def mg_lw_0_4(self):
global lwidth
lwidth=False
lwidth=0.4
def mg_lw_0_6(self):
global lwidth
lwidth=False
lwidth=0.6
def mg_lw_0_8(self):
global lwidth
lwidth=False
lwidth=0.8
def mg_lw_1(self):
global lwidth
lwidth=False
lwidth=1
def savedata_rs(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
datafilename, _ = QFileDialog.getSaveFileName(self, "Save Data as Text", "", "Text File (*.txt)")
if datafilename:
self.currentdir = os.path.dirname(os.path.abspath(datafilename))
with open(datafilename, 'w') as table:
table.write('Result data from Transient analysis: \n\n')
if self.cb_multi_rs.isChecked() == False:
table.write('Node Number: ' + str(index_cb + 1)+ '\n\n')
if self.rbn_Ax.isChecked()==True:
table.write('Component: X' +'\n\n')
elif self.rbn_Ay.isChecked()==True:
table.write('Component: Y' +'\n\n')
elif self.rbn_Az.isChecked()==True:
table.write('Component: Z' +'\n\n')
if self.rbn__T.isChecked()==True:
if self.rbn_PSa.isChecked()==True:
table.write('Period(s)' + '\t' + 'Pseudo Spectral Acceleration_PSa\n')
elif self.rbn_PSv.isChecked() == True:
table.write('Period(s)' + '\t' + 'Pseudo Spectral Velocity_PSv\n')
elif self.rbn_Sd.isChecked() == True:
table.write('Period(s)' + '\t' + 'Spectral Displacement_Sd\n')
if self.rbn_F.isChecked()==True:
if self.rbn_PSa.isChecked()==True:
table.write('Frequency(Hz)' + '\t' + 'Pseudo Spectral Acceleration_PSa\n')
elif self.rbn_PSv.isChecked() == True:
table.write('Frequency(Hz)' + '\t' + 'Pseudo Spectral Velocity_PSv\n')
elif self.rbn_Sd.isChecked() == True:
table.write('Frequency(Hz)' + '\t' + 'Spectral Displacement_Sd\n')
for row in zip(np.round(x_values,4), y_values):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_multi_rs.isChecked() == True:
table.write('Node Number: ' + str(np.transpose(nulti_node_dynamic).ravel()) + '\n')
response_multinode_dynamic_t=(np.transpose(np.reshape(response_multinode_dynamic,(len(nulti_node_dynamic), len(intrl_disp_dynamic)))))
if self.cb_dt_dynamic.isChecked() == False:
if node_component_dynamic == 'Ax':
table.write('Interval' + '\t' + 'Acceleration_Ax\n')
except:
QMessageBox.critical(self, "Error", "Data not found, please apply for response spectra")
def savedata_timeseries(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
datafilename, _ = QFileDialog.getSaveFileName(self, "Save Data as Text", "", "Text File (*.txt)")
if datafilename:
self.currentdir = os.path.dirname(os.path.abspath(datafilename))
with open(datafilename, 'w') as table:
table.write('Result data from Transient analysis: \n\n')
if self.cb_multiDynamic.isChecked() == False:
table.write('Node Number: ' + str(index_cb + 1) + '\n')
if self.cb_dt_dynamic.isChecked() == False:
if node_component_dynamic == 'Ax':
table.write('Interval' + '\t' + 'Acceleration_Ax\n')
elif node_component_dynamic == 'Ay':
table.write('Interval' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Ay':
table.write('Interval' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Vx':
table.write('Interval' + '\t' + 'Velocity_Vx\n')
elif node_component_dynamic == 'Vy':
table.write('Interval' + '\t' + 'Velocity_Vy\n')
elif node_component_dynamic == 'Vz':
table.write('Interval' + '\t' + 'Velocity_Vz\n')
elif node_component_dynamic == 'Ux':
table.write('Interval' + '\t'+'Displacement_Ux\n')
elif node_component_dynamic == 'Uy':
table.write('Interval' + '\t'+'Displacement_Uy\n')
elif node_component_dynamic == 'Uz':
table.write('Interval' + '\t'+'Displacement_Uz\n')
elif node_component_dynamic == 'Rx':
table.write('Interval' + '\t'+'Rotation_Rx\n')
elif node_component_dynamic == 'Ry':
table.write('Interval' + '\t'+'Rotation_Ry\n')
elif node_component_dynamic == 'Rz':
table.write('Interval' + '\t'+'Rotation_Rz\n')
elif node_component_dynamic == 'RFx':
table.write('Interval' + '\t'+'Reaction_Force_RFx\n')
elif node_component_dynamic == 'RFy':
table.write('Interval' + '\t'+'Reaction_Force_RFy\n')
elif node_component_dynamic == 'RFz':
table.write('Interval' + '\t'+'Reaction_Force_RFz\n')
elif node_component_dynamic == 'RMx':
table.write('Interval' + '\t'+'Moment_Reaction_RMx\n')
elif node_component_dynamic == 'RMy':
table.write('Interval' + '\t'+'Moment_Reaction_RMy\n')
elif node_component_dynamic == 'RMz':
table.write('Interval' + '\t'+'Moment_Reaction_RMz\n')
for row in zip(np.round(intrl_disp_dynamic,4), res_dynamic):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_dt_dynamic.isChecked() == True:
if node_component_dynamic == 'Ax':
table.write('Time(s)' + '\t' + 'Acceleration_Ax\n')
elif node_component_dynamic == 'Ay':
table.write('Time(s)' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Ay':
table.write('Time(s)' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Vx':
table.write('Time(s)' + '\t' + 'Velocity_Vx\n')
elif node_component_dynamic == 'Vy':
table.write('Time(s)' + '\t' + 'Velocity_Vy\n')
elif node_component_dynamic == 'Vz':
table.write('Time(s)' + '\t' + 'Velocity_Vz\n')
elif node_component_dynamic == 'Ux':
table.write('Time(s)' + '\t' + 'Displacement_Ux\n')
elif node_component_dynamic == 'Uy':
table.write('Time(s)' + '\t' + 'Displacement_Uy\n')
elif node_component_dynamic == 'Uz':
table.write('Time(s)' + '\t' + 'Displacement_Uz\n')
elif node_component_dynamic == 'Rx':
table.write('Time(s)' + '\t' + 'Rotation_Rx\n')
elif node_component_dynamic == 'Ry':
table.write('Time(s)' + '\t' + 'Rotation_Ry\n')
elif node_component_dynamic == 'Rz':
table.write('Time(s)' + '\t' + 'Rotation_Rz\n')
elif node_component_dynamic == 'RFx':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFx\n')
elif node_component_dynamic == 'RFy':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFy\n')
elif node_component_dynamic == 'RFz':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFz\n')
elif node_component_dynamic == 'RMx':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMx\n')
elif node_component_dynamic == 'RMy':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMy\n')
elif node_component_dynamic == 'RMz':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMz\n')
for row in zip(np.round(time_series_dynamic,4), res_dynamic):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_multiDynamic.isChecked() == True:
table.write('Node Number: ' + str(np.transpose(nulti_node_dynamic).ravel()) + '\n')
response_multinode_dynamic_t=(np.transpose(np.reshape(response_multinode_dynamic,(len(nulti_node_dynamic), len(intrl_disp_dynamic)))))
if self.cb_dt_dynamic.isChecked() == False:
if node_component_dynamic == 'Ax':
table.write('Interval' + '\t' + 'Acceleration_Ax\n')
elif node_component_dynamic == 'Ay':
table.write('Interval' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Ay':
table.write('Interval' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Vx':
table.write('Interval' + '\t' + 'Velocity_Vx\n')
elif node_component_dynamic == 'Vy':
table.write('Interval' + '\t' + 'Velocity_Vy\n')
elif node_component_dynamic == 'Vz':
table.write('Interval' + '\t' + 'Velocity_Vz\n')
elif node_component_dynamic == 'Ux':
table.write('Interval' + '\t'+'Displacement_Ux\n')
elif node_component_dynamic == 'Uy':
table.write('Interval' + '\t'+'Displacement_Uy\n')
elif node_component_dynamic == 'Uz':
table.write('Interval' + '\t'+'Displacement_Uz\n')
elif node_component_dynamic == 'Rx':
table.write('Interval' + '\t'+'Rotation_Rx\n')
elif node_component_dynamic == 'Ry':
table.write('Interval' + '\t'+'Rotation_Ry\n')
elif node_component_dynamic == 'Rz':
table.write('Interval' + '\t'+'Rotation_Rz\n')
elif node_component_dynamic == 'RFx':
table.write('Interval' + '\t'+'Reaction_Force_RFx\n')
elif node_component_dynamic == 'RFy':
table.write('Interval' + '\t'+'Reaction_Force_RFy\n')
elif node_component_dynamic == 'RFz':
table.write('Interval' + '\t'+'Reaction_Force_RFz\n')
elif node_component_dynamic == 'RMx':
table.write('Interval' + '\t'+'Moment_Reaction_RMx\n')
elif node_component_dynamic == 'RMy':
table.write('Interval' + '\t'+'Moment_Reaction_RMy\n')
elif node_component_dynamic == 'RMz':
table.write('Interval' + '\t'+'Moment_Reaction_RMz\n')
for row in zip(np.round(intrl_disp_dynamic, 4), response_multinode_dynamic_t):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
elif self.cb_dt_dynamic.isChecked() == True:
if node_component_dynamic == 'Ax':
table.write('Time(s)' + '\t' + 'Acceleration_Ax\n')
elif node_component_dynamic == 'Ay':
table.write('Time(s)' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Ay':
table.write('Time(s)' + '\t' + 'Acceleration_Ay\n')
elif node_component_dynamic == 'Vx':
table.write('Time(s)' + '\t' + 'Velocity_Vx\n')
elif node_component_dynamic == 'Vy':
table.write('Time(s)' + '\t' + 'Velocity_Vy\n')
elif node_component_dynamic == 'Vz':
table.write('Time(s)' + '\t' + 'Velocity_Vz\n')
elif node_component_static == 'Ux':
table.write('Time(s)' + '\t' + 'Displacement_Ux\n')
elif node_component_static == 'Uy':
table.write('Time(s)' + '\t' + 'Displacement_Uy\n')
elif node_component_static == 'Uz':
table.write('Time(s)' + '\t' + 'Displacement_Uz\n')
elif node_component_static == 'Rx':
table.write('Time(s)' + '\t' + 'Rotation_Rx\n')
elif node_component_static == 'Ry':
table.write('Time(s)' + '\t' + 'Rotation_Ry\n')
elif node_component_static == 'Rz':
table.write('Time(s)' + '\t' + 'Rotation_Rz\n')
elif node_component_static == 'RFx':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFx\n')
elif node_component_static == 'RFy':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFy\n')
elif node_component_static == 'RFz':
table.write('Time(s)' + '\t' + 'Reaction_Force_RFz\n')
elif node_component_static == 'RMx':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMx\n')
elif node_component_static == 'RMy':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMy\n')
elif node_component_static == 'RMz':
table.write('Time(s)' + '\t' + 'Moment_Reaction_RMz\n')
for row in zip(np.round(time_series_dynamic,4), response_multinode_dynamic_t):
for cell in row:
table.write(str(cell) + '\t')
table.write('\n')
except:
QMessageBox.critical(self, "Error", "Data not found, please apply for transient analysis")
#
def plotresponse_dynamic(self):
global intrl_disp_dynamic,time_series_dynamic,res_dynamic, node_component_dynamic,index_cb, response_multinode_dynamic, nulti_node_dynamic
try:
index_cb = self.cb_resp_nodenumber.currentIndex()
intrl_disp_dynamic =np.linspace(0,(len(main.outdispFile[:, 1])-main.step_statics-1),(len(main.outdispFile[:, 1])-main.step_statics))
node_component_dynamic = self.cb_resp_component.currentText()
time_dynamic = (float(self.tb_dt_dynamic.text()))
time_series_dynamic = (np.linspace(0, time_dynamic * (len(intrl_disp_dynamic) - 1), len(intrl_disp_dynamic)))
zero_2d_dynamic = np.repeat(0, (len(intrl_disp_dynamic)))
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.clear()
try:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_facecolor(str(plotresponse_dynamic_bg_color.name()))
except:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_facecolor('#000000')
try:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", color=str(plotresponse_dynamic_mg_color.name()))
except:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", color='#ffffff')
try:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", linestyle=lstyle)
except:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", linestyle='--')
try:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", linewidth=lwidth)
except:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.grid(True, which="both", linewidth=0.4)
if self.cb_multiDynamic.isChecked() == False: #
if node_component_dynamic == 'Ax':
if ndm_v(main.fileName)==3:
res_dynamic = (main.outaccFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outaccFile[main.step_statics:len(main.outdispFile[:, 1]), (2*index_cb+1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Acceleration, Ax', fontname="Times New Roman", size=10)
elif node_component_dynamic == 'Ay':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outaccFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (
main.outaccFile[main.step_statics:len(main.outdispFile[:, 1]), (2 * index_cb + 2)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Acceleration, Ay',
fontname="Times New Roman", size=10)
elif node_component_dynamic == 'Az':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outaccFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Acceleration, Az',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Vx':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outvelFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outvelFile[main.step_statics:len(main.outdispFile[:, 1]), (2 * index_cb + 1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Velocity, Vx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Vy':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outvelFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (
main.outvelFile[main.step_statics:len(main.outdispFile[:, 1]), (2 * index_cb + 2)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Velocity, Vy',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Vz':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outvelFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Velocity, Vz',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Ux':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outdispFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (
main.outdispFile[main.step_statics:len(main.outdispFile[:, 1]), (2 * index_cb + 1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Displacement, Dx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Uy':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outdispFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (
main.outdispFile[main.step_statics:len(main.outdispFile[:, 1]), (2 * index_cb + 2)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Displacement, Dy',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Uz':
if ndm_v(main.fileName) == 3:
res_dynamic = (
main.outdispFile[main.step_statics:len(main.outdispFile[:, 1]), (3 * index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_dynamic =zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Displacement, Dz',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Rx':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outrotFile[main.step_statics:len(main.outdispFile[:, 1]), (3 *index_cb + 1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Rotation, Rx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Ry':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outrotFile[main.step_statics:len(main.outdispFile[:, 1]), (3 *index_cb + 2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Rotation, Ry',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'Rz':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outrotFile[main.step_statics:len(main.outdispFile[:, 1]), (3 *index_cb + 3)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outrotFile[main.step_statics:len(main.outdispFile[:, 1]), (index_cb + 1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Rotation, Rz',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RFx':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outfreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outfreactFile[main.step_statics:len(main.outdispFile[:, 1]), (2*index_cb+1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Force Rotation, RFx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RFy':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outfreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outfreactFile[main.step_statics:len(main.outdispFile[:, 1]), (2*index_cb+2)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Force Rotation, RFy',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RFz':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outfreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+3)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Force Rotation, RFz',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RMx':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outmreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+1)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Moment Rotation, RFx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RMy':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outmreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+2)])
elif ndm_v(main.fileName) == 2:
res_dynamic = zero_2d_dynamic
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Moment Rotation, RFx',
fontname="Times New Roman",
size=10)
elif node_component_dynamic == 'RMz':
if ndm_v(main.fileName) == 3:
res_dynamic = (main.outmreactFile[main.step_statics:len(main.outdispFile[:, 1]), (3*index_cb+3)])
elif ndm_v(main.fileName) == 2:
res_dynamic = (main.outmreactFile[main.step_statics:len(main.outdispFile[:, 1]), (index_cb+1)])
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_ylabel('Moment Rotation, RFx',
fontname="Times New Roman",
size=10)
if self.cb_dt_dynamic.isChecked() == False:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_xlabel('Interval',
fontname="Times New Roman", size=10)
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.plot(intrl_disp_dynamic, res_dynamic, color='r', linewidth=1)
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_xlim(xmin=0, xmax=np.max(intrl_disp_dynamic))
elif self.cb_dt_dynamic.isChecked() == True:
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_xlabel('Time (s)', fontname="Times New Roman", size=10)
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.plot(time_series_dynamic, res_dynamic, color='r', linewidth=1)
self.PdynamicWidget.canvas_pdynamic.axes_pdynamic.set_xlim(xmin=0, xmax=np.max(time_series_dynamic))
elif self.cb_multiDynamic.isChecked() == True:
col_count = self.tbl_dynamic_mtp.columnCount()
row_count = self.tbl_dynamic_mtp.rowCount()
headers = [str(self.tbl_dynamic_mtp.horizontalHeaderItem(i).text()) for i in range(col_count)]
df_list = []
for row in range(row_count):
df_list2 = []
for col in range(col_count):
table_item = self.tbl_dynamic_mtp.item(row, col)
df_list2.append('' if table_item is None else str(table_item.text()))
df_list.append(df_list2)
df = | pd.DataFrame(df_list, columns=headers) | pandas.DataFrame |
import collections
import warnings
from math import sqrt
from typing import Union, Tuple
import nolds
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats
from arch import unitroot
from numpy.linalg import LinAlgError
from pandas.tseries.frequencies import to_offset
from scipy import signal
from sklearn.neighbors import KernelDensity
from tsfresh.feature_extraction import feature_calculators as fc
import periodicity as per
import swinging_door as swd
import util
# noinspection PyMethodMayBeStatic
class TimeSeriesCharacteristics:
"""
The base class for calculating time series characteristics. Each function has
the time series ``data`` as first parameter. The characteristics are designed
to work on standardized (zero mean, unit variance) data, so ensure that all
time series are standardized before calling the functions of this class.
"""
def __init__(self, block_padding_limit: float = None):
"""
Creates a new ``TimeSeriesCharacteristics``.
:param block_padding_limit: If not None, specifies the limit in % of the data
size, which the last block shall not exceed when calculating block-wise
metrics. This takes effect in case the data length is not evenly divisible
by the block size and thus the last block is smaller. If this last block
is larger than this limit, an exception is raised (default: None)
Example:
data length = 100, block size = 60, block_padding_limit = 0.2 = 20%
last block size = 40 = 40% of data length
40% > 20% ---> raise exception
"""
self.block_padding_limit = block_padding_limit
##########################################################################
# helper functions
##########################################################################
@staticmethod
def _ensure_ndarray(data):
if isinstance(data, pd.Series):
return data.values
if isinstance(data, np.ndarray):
return data
return np.array(data, copy=False)
@staticmethod
def _check_block_padding(data_size, block_size, limit):
"""
Block-based metrics usually run into the problem that the last block is smaller, if the data length is
not evenly divisible by the block size. This function raises an exception if the last block size is
larger than x % of the overall data size.
:param data_size: The length of the data
:param block_size: The length of the block
:param limit: The limit in % of the data size, which the last block shall not exceed
:return The padding ratio, i.e. the size of the remaining, last block, in % of the overall data size
"""
padding = (data_size % block_size) / data_size
if padding > limit:
raise Exception("block size {:d} is not evenly divisible and would ignore the last block of size {:d} % {:d} = {:d}, "
"which is too large to be ignored ({:.1%}) - change the block size or increase the limit (current limit: {:.1%})"
.format(block_size, data_size, block_size, data_size % block_size, padding, limit))
return padding
def _block_metrics(self, data, block_size: int):
"""
Computes stability, lumpiness, level shift, and variance change in one pass
:return: A dictionary with keys index and value
"""
if self.block_padding_limit is not None:
TimeSeriesCharacteristics._check_block_padding(len(data), block_size, self.block_padding_limit)
means, variances = [], []
for i in range(0, data.shape[0], block_size):
block = data[i:i + block_size]
means.append(block.mean())
variances.append(block.var(ddof=1))
stability_ = np.var(means)
lumpiness_ = np.var(variances)
level_shift_ = np.nan if len(means) <= 1 else max(np.abs(np.diff(means)))
variance_change_ = np.nan if len(variances) <= 1 else max(np.abs(np.diff(variances)))
return dict(stability=stability_,
lumpiness=lumpiness_,
level_shift=level_shift_,
variance_change=variance_change_)
def _kullback_leibler_core(self, data: Union[np.ndarray, pd.Series], block_size: int,
interval: Union[str, Tuple[int, int]] = "infer", resolution: int = 100):
"""
Computes the Kullback-Leibler score, which is the difference of
Kullback-Leibler divergences of consecutive blocks.
The distribution within a block is estimated with a Gaussian KDE.
The maximum difference between Kullback-Leibler divergences is returned.
Invented by Hyndman et al. https://doi.org/10.1109/ICDMW.2015.104
:param data: The time series as a one-dimensional set of data points
:param block_size: The number of data points per block
:param interval: The (min, max) interval of the data, on which the distribution shall be estimated.
If you choose "infer", the minimum and maximum of the data is inferred automatically. (default: "infer")
:param resolution: The resolution of the density estimation (default: 100)
:return: A dictionary with keys index and value
"""
# min = -inf, max = inf
if self.block_padding_limit is not None:
TimeSeriesCharacteristics._check_block_padding(len(data), block_size, self.block_padding_limit)
data = TimeSeriesCharacteristics._ensure_ndarray(data)
# the value range, onto which we estimate the distribution
if interval == "infer":
x_space = np.linspace(np.min(data), np.max(data), resolution)
else:
x_space = np.linspace(*interval, resolution)
x_space = x_space.reshape(-1, 1)
# estimate the kde bandwidth parameter with Silverman's rule of thumb over the entire data space
bw = 0.9 * min(np.std(data), sp.stats.iqr(data) / 1.34) * (len(data) ** (- 1 / 5))
bw = max(0.05, bw) # ... avoid too narrow bandwidths
kls, probs_pre = [], None
for i in range(0, data.shape[0], block_size):
block = data[i:i + block_size].reshape(-1, 1)
# ignore the last block if its not a full one any more
if len(block) != block_size:
break
# kde of current block
kde = KernelDensity(bandwidth=bw, kernel="gaussian", metric="euclidean")
kde.fit(block)
probs = np.exp(kde.score_samples(x_space))
probs[probs < 1E-6] = 1E-6 # avoid divisions by zero
# kl divergence between consecutive blocks
if i > 0:
kls.append(sp.stats.entropy(probs_pre, probs))
probs_pre = probs
# the maximum of kl divergence differences
kls_diff = np.diff(kls)
if len(kls_diff) == 0:
return dict(index=np.nan, value=np.nan)
kl_diff_max_index_ = np.argmax(kls_diff)
return dict(index=kl_diff_max_index_ / len(kls_diff),
value=kls_diff[kl_diff_max_index_])
##########################################################################
# 1. Distributional Features
##########################################################################
##########################################################################
# 1.1. Distributional Dispersion Features
##########################################################################
@util.copy_doc_from(fc.kurtosis)
def kurtosis(self, data):
# min = -3, max = inf
return fc.kurtosis(data)
@util.copy_doc_from(fc.skewness)
def skewness(self, data):
# min = -inf, max = inf
return fc.skewness(data)
def shift(self, data):
"""
Returns the mean minus the median of those values that are smaller than the mean.
"""
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
mean_ = np.mean(data)
subset_ = data[data < mean_]
shift_ = np.mean(subset_) - np.median(subset_)
return shift_
##########################################################################
# 1.2. Distributional Dispersion Blockwise Features
##########################################################################
def lumpiness(self, data, block_size: int):
"""
Returns the variance of the variances of all (non-overlapping) blocks of size ``block_size``.
"""
# min = 0, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
lumpiness_ = self._block_metrics(data, block_size)["lumpiness"]
return lumpiness_
def stability(self, data, block_size: int):
"""
Returns the variance of the means of all (non-overlapping) blocks of size ``block_size``.
"""
# min = 0, max = 1 (for z-norm data)
data = TimeSeriesCharacteristics._ensure_ndarray(data)
stability_ = self._block_metrics(data, block_size)["stability"]
return stability_
##########################################################################
# 1.3. Distributional Duplicates Features
##########################################################################
def normalized_duplicates_max(self, data):
"""
Returns ``x / len(data)`` where ``x`` is the number of duplicates that
have the maximum value of ``data``. If there are no duplicates, i.e.,
the maximum value occurs only once, 0 is returned.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
count = np.sum(data == np.max(data))
if count > 1:
denom = len(data)
return count / denom if denom != 0 else np.nan
return 0
def normalized_duplicates_min(self, data):
"""
Returns ``x / len(data)`` where ``x`` is the number of duplicates that
have the minimum value of ``data``. If there are no duplicates, i.e.,
the minimum value occurs only once, 0 is returned.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
count = np.sum(data == np.min(data))
if count > 1:
denom = len(data)
return count / denom if denom != 0 else np.nan
return 0
@util.copy_doc_from(fc.percentage_of_reoccurring_datapoints_to_all_datapoints)
def percentage_of_reoccurring_datapoints(self, data):
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.percentage_of_reoccurring_datapoints_to_all_datapoints(data)
@util.copy_doc_from(fc.percentage_of_reoccurring_values_to_all_values)
def percentage_of_reoccurring_values(self, data):
# min = 0, max = 1
return fc.percentage_of_reoccurring_values_to_all_values(data)
@util.copy_doc_from(fc.ratio_value_number_to_time_series_length)
def percentage_of_unique_values(self, data):
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.ratio_value_number_to_time_series_length(data)
##########################################################################
# 1.4. Distributional Distribution Features
##########################################################################
@util.copy_doc_from(fc.quantile)
def quantile(self, data, q: float):
# min = -inf, max = inf
return fc.quantile(data, q)
@util.copy_doc_from(fc.ratio_beyond_r_sigma)
def ratio_beyond_r_sigma(self, data, r):
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.ratio_beyond_r_sigma(data, r)
def ratio_large_standard_deviation(self, data):
"""
Returns the ratio between the standard deviation and the ``(max โ min)`` range
of the data, based on the range rule of thumb.
"""
# min = 0, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = np.max(data) - np.min(data)
return np.std(data) / denom if denom != 0 else np.nan
##########################################################################
# 2. Temporal Features
##########################################################################
##########################################################################
# 2.1. Temporal Dispersion Features
##########################################################################
@util.copy_doc_from(fc.mean_abs_change)
def mean_abs_change(self, data):
# min = 0, max = 2 * sqrt((m ** 2 + m + 1 / 4) / (m ** 2 + m))
# with m = ceil(N / 2) and with N = data length (for z-norm data)
# range normalized to [0, 1] assuming z-normed data
data = TimeSeriesCharacteristics._ensure_ndarray(data)
mac = fc.mean_abs_change(data)
m = len(data) // 2
denom = 2 * sqrt((m ** 2 + m + 1 / 4) / (m ** 2 + m))
return mac / denom
@util.copy_doc_from(fc.mean_second_derivative_central)
def mean_second_derivative_central(self, data):
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.mean_second_derivative_central(data)
##########################################################################
# 2.2. Temporal Dispersion Blockwise Features
##########################################################################
def level_shift(self, data, block_size: int):
"""
Returns the maximum difference in mean between consecutive (non-overlapping) blocks of size ``block_size``.
"""
# min = 0, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
level_shift_ = self._block_metrics(data, block_size)["level_shift"]
return level_shift_
def variance_change(self, data, block_size: int):
"""
Returns the maximum difference in variance between consecutive (non-overlapping) blocks of size ``block_size``.
"""
# min = 0, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
variance_change_ = self._block_metrics(data, block_size)["variance_change"]
return variance_change_
##########################################################################
# 2.3. Temporal Similarity Features
##########################################################################
@util.copy_doc_from(nolds.hurst_rs)
def hurst(self, data, **kwargs):
# min = -inf, max = inf (should be between 0 and 1)
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return nolds.hurst_rs(data, **kwargs)
@util.copy_doc_from(fc.autocorrelation)
def autocorrelation(self, data, lag: int):
# min = -1, max = 1
# range normalized to [0, 1]
return (fc.autocorrelation(data, lag) + 1) / 2
##########################################################################
# 2.4. Temporal Frequency Features
##########################################################################
@util.copy_doc_from(per.periodicity)
def periodicity(self, data, periods=None, dt_min=None, replace_nan=0):
# min = 0, max = inf
if periods is None:
periods = per.PERIODS
result = per.periodicity(data, periods=periods, dt_min=dt_min)
return result if replace_nan is None else result.fillna(replace_nan)
def agg_periodogram(self, data, funcs, lowest_freq=None, highest_freq=None, dt_min=None):
"""
Returns a list of tuples of aggregated periodogram power values. The first entry of
a tuple is the name of applied function, the second is the calculated value.
:param data: The time series.
:param funcs: A list of numpy function strings or tuples. For a tuple, the first entry must be the
numpy function string and the second entry must be a dict containing keyword arguments.
:param lowest_freq: The lowest frequency to consider. Lower frequencies are discarded.
:param highest_freq: The highest frequency to consider. Higher frequencies are discarded.
:param dt_min: The time interval between values of ``data`` in minutes. If None, ``data`` must have a
DateTimeIndex with a set frequency (e.g., via ``data = data.asfreq("1min")``) so the time interval
can be inferred (default: None = infer time interval from ``data``).
:return: A list of tuples of aggregated periodogram power values.
"""
# min = 0, max = inf
# time interval in minutes
if dt_min is None:
dt_min = pd.to_timedelta(to_offset(data.index.freq)).total_seconds() / 60
fxx, pxx = signal.periodogram(data.values, fs=dt_min, return_onesided=True, detrend=None, scaling="spectrum")
# skip the offset
fxx = fxx[1:]
pxx = pxx[1:]
if lowest_freq is not None and highest_freq is not None:
assert lowest_freq < highest_freq
indices = np.argwhere((fxx >= lowest_freq) & (fxx <= highest_freq)).flatten()
pxx = pxx[indices]
elif lowest_freq is not None:
indices = np.argwhere(fxx >= lowest_freq).flatten()
pxx = pxx[indices]
elif highest_freq is not None:
indices = np.argwhere(fxx <= highest_freq).flatten()
pxx = pxx[indices]
result = []
for f in funcs:
if isinstance(f, str):
method = getattr(np, f)
result.append((f, method(pxx)))
else:
f, params = f
method = getattr(np, f)
params_str = "".join([f"__{k}_{v}" for k, v in params.items()])
result.append((f"{f}{params_str}", method(pxx, **params)))
return result
##########################################################################
# 2.5. Temporal Linearity Features
##########################################################################
@util.copy_doc_from(fc.linear_trend)
def linear_trend_slope(self, data):
"""
This method only returns the slope, i.e., param=[{"attr": "slope"}].
"""
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
# return value is a list where each list entry corresponds to one attribute; since we only
# have one attribute, this list only has one entry; this entry is a tuple where the first
# part is the attribute name ('attr_"slope"') and the second is the actual value
return fc.linear_trend(data, param=[{"attr": "slope"}])[0][1]
@util.copy_doc_from(fc.linear_trend)
def linear_trend_rvalue2(self, data):
"""
This method only returns the squared rvalue (= coefficient of determination),
i.e., param=[{"attr": "rvalue"}].
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
# return value is a list where each list entry corresponds to one attribute; since we only
# have one attribute, this list only has one entry; this entry is a tuple where the first
# part is the attribute name ('attr_"rvalue"') and the second is the actual value
rvalue = fc.linear_trend(data, param=[{"attr": "rvalue"}])[0][1]
return rvalue ** 2
@util.copy_doc_from(fc.agg_linear_trend)
def agg_linear_trend_slope(self, data, block_sizes):
"""
This method only returns the variance-aggregated slopes,
i.e., param=[{..., "f_agg": "var", "attr": "slope"}].
"""
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
param = [{"f_agg": "var", "attr": "slope", "chunk_len": b} for b in block_sizes]
return fc.agg_linear_trend(data, param)
@util.copy_doc_from(fc.agg_linear_trend)
def agg_linear_trend_rvalue2(self, data, block_sizes):
"""
This method only returns the mean-aggregated squared rvalues (= coefficient of determination),
i.e., param=[{..., "f_agg": "mean", "attr": "rvalue"}].
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
param = [{"f_agg": "mean", "attr": "rvalue", "chunk_len": b} for b in block_sizes]
result = fc.agg_linear_trend(data, param)
return [(key, val ** 2) for key, val in result]
@util.copy_doc_from(fc.c3)
def c3(self, data, lag):
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.c3(data, lag)
@util.copy_doc_from(fc.time_reversal_asymmetry_statistic)
def time_reversal_asymmetry_statistic(self, data, lag: int):
# min = -inf, max = inf
return fc.time_reversal_asymmetry_statistic(data, lag)
##########################################################################
# 3. Complexity Features
##########################################################################
##########################################################################
# 3.1. Complexity Entropy Features
##########################################################################
@util.copy_doc_from(fc.binned_entropy)
def binned_entropy(self, data, max_bins: int):
# min = 0, max = np.log(max_bins)
# normalized to [0, 1]
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = np.log(max_bins)
return fc.binned_entropy(data, max_bins) / denom
def kullback_leibler_score(self, data: Union[np.ndarray, pd.Series], block_size: int,
interval: Union[str, Tuple[int, int]] = "infer", resolution: int = 100):
"""
Computes the Kullback-Leibler score, which is the difference of
Kullback-Leibler divergences of consecutive blocks.
The distribution within a block is estimated with a Gaussian KDE.
The maximum difference between Kullback-Leibler divergences is returned.
Invented by Hyndman et al. https://doi.org/10.1109/ICDMW.2015.104
:param data: The time series as a one-dimensional set of data points
:param block_size: The number of data points per block
:param interval: The (min, max) interval of the data, on which the distribution shall be estimated.
If you choose "infer", the minimum and maximum of the data is inferred automatically. (default: "infer")
:param resolution: The resolution of the density estimation (default: 100)
:return: The Kullback-Leibler score
"""
# min = -inf, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return self._kullback_leibler_core(data, block_size, interval, resolution)["value"]
def index_of_kullback_leibler_score(self, data: Union[np.ndarray, pd.Series], block_size: int,
interval: Union[str, Tuple[int, int]] = "infer", resolution: int = 100):
"""
Computes the index of the Kullback-Leibler score, which is the difference of
Kullback-Leibler divergences of consecutive blocks.
The distribution within a block is estimated with a Gaussian KDE.
The maximum difference between Kullback-Leibler divergences is returned.
Invented by Hyndman et al. https://doi.org/10.1109/ICDMW.2015.104
:param data: The time series as a one-dimensional set of data points
:param block_size: The number of data points per block
:param interval: The (min, max) interval of the data, on which the distribution shall be estimated.
If you choose "infer", the minimum and maximum of the data is inferred automatically. (default: "infer")
:param resolution: The resolution of the density estimation (default: 100)
:return: The (normalized) index of the Kullback-Leibler score
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return self._kullback_leibler_core(data, block_size, interval, resolution)["index"]
##########################################################################
# 3.2. Complexity (Miscellaneous) Complexity Features
##########################################################################
@util.copy_doc_from(fc.cid_ce)
def cid_ce(self, data, normalize: bool = False):
# min = 0, max = inf
data = TimeSeriesCharacteristics._ensure_ndarray(data)
return fc.cid_ce(data, normalize)
def permutation_analysis(self, data):
warnings.warn("not implemented due to NDA, returning 0")
return 0
def swinging_door_compression_rate(self, data, eps):
"""
Returns the compression ratio of the data when using the swinging door compression algorithm:
0 = no compression (number of compressed datapoints = number of original datapoints)
1 = total compression (original datapoints can be represented with only 2 points: the start point + the end point)
"""
# min = 0, max = 1
# do not include error measures (such as in swd.compression_quality) because we cannot
# return the error measures together with the compression rate as this would cause
# problems with the distance measure in the clustering step
x = range(len(data))
y = TimeSeriesCharacteristics._ensure_ndarray(data)
xc, yc = swd.swinging_door_sampling(x, y, eps)
# the minimum compressed signal has a length of 2 (start point + end point), so subtract
# 2 to get values in the range [0, 1]
return 1 - ((len(yc) - 2) / (len(y) - 2))
##########################################################################
# 3.3. Complexity Flatness Features
##########################################################################
def normalized_crossing_points(self, data):
"""
Returns the (normalized) number of times a time series crosses the mean line.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
# the maximum number of segments above the mean is reached when the time series
# starts above the mean and then continuously crosses the mean every timestamp;
# this means that we get a segment above the mean every second timestamp
# (up - below - up - below - up ...); we thus have at maximum len(data) / 2
# segments above the mean + the additional one at the start of the time series
# this one segment at the start is actually only relevant if the length of the
# time series is odd; therefore, we could also write ceil(len(data) / 2)
mean = np.mean(data)
above = (data > mean).astype("int")
count = ((np.diff(above) == 1).sum() + above[0])
denom = (len(data) + 1) / 2 # equal to: np.math.ceil(len(data) / 2)
return count / denom if denom != 0 else np.nan
@util.copy_doc_from(fc.count_above_mean)
def normalized_above_mean(self, data):
"""
Returns ``r / (len(data) - 1)`` where ``r`` is the result of ``count_above_mean``.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = len(data) - 1
return fc.count_above_mean(data) / denom if denom != 0 else np.nan
@util.copy_doc_from(fc.count_below_mean)
def normalized_below_mean(self, data):
"""
Returns ``r / (len(data) - 1)`` where ``r`` is the result of ``count_below_mean``.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = len(data) - 1
return fc.count_below_mean(data) / denom if denom != 0 else np.nan
@util.copy_doc_from(fc.longest_strike_above_mean)
def normalized_longest_strike_above_mean(self, data):
"""
Returns ``r / len(data)`` where ``r`` is the result of ``longest_strike_above_mean``.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = len(data)
return fc.longest_strike_above_mean(data) / denom if denom != 0 else np.nan
@util.copy_doc_from(fc.longest_strike_below_mean)
def normalized_longest_strike_below_mean(self, data):
"""
Returns ``r / len(data)`` where ``r`` is the result of ``longest_strike_below_mean``.
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
denom = len(data)
return fc.longest_strike_below_mean(data) / denom if denom != 0 else np.nan
def flat_spots(self, data, n_intervals=10, mode="quantile", lower_bound=None, upper_bound=None):
"""
Flat spots are computed by dividing the sample space of a time series into ten equal-sized intervals,
and computing the maximum run length within any single interval. We can use ``n_intervals`` for the number
of intervals and we can choose whether they should be equal-sized in the sense of equal value range
(mode = "linear") or in the sense of equal number of datapoints in the intervals (mode = "quantile").
We normalize the maximum run length of each interval with the length of the time series, i.e.,
the sum of all interval max run lengths is at most 1
:param data: The time series
:param n_intervals: The number of bins into which the value space is divided
:param mode: "linear" divides the value space equally, while "quantile"
ensures that there is an equal number of data points per interval
:param lower_bound: Enforce a lower bound on the value range
:param upper_bound: Enforce an upper bound on the value range
:return: The (normalized) maximum run length per interval
"""
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
max_run_lengths = dict()
if mode == "quantile":
bound_ids = np.linspace(0, 1, n_intervals + 1)
intervals = np.quantile(data, q=bound_ids)
elif mode == "linear":
if lower_bound is None:
lower_bound = np.min(data)
if upper_bound is None:
upper_bound = np.max(data)
bound_ids = [i for i in range(n_intervals + 1)]
intervals = np.linspace(lower_bound, upper_bound, n_intervals + 1)
else:
raise ValueError(f"unknown 'mode': {mode}")
for j, (lower, upper, id_lower, id_upper) in enumerate(zip(intervals[:-1], intervals[1:], bound_ids[:-1], bound_ids[1:])):
# to not miss any values, include everything below the lowest bound and everything above the upper most bound
if j == 0:
indices = np.argwhere(data < upper).flatten()
bound_id = f"(-inf,{id_upper})"
elif j == len(intervals) - 2:
indices = np.argwhere(data >= lower).flatten()
bound_id = f"[{id_lower},+inf)"
else:
indices = np.argwhere((data >= lower) & (data < upper)).flatten()
bound_id = f"[{id_lower},{id_upper})"
# if there are fewer than two values within this interval, there is no maximum run length (set to 0)
if len(indices) < 2:
max_run_lengths[bound_id] = 0
continue
i_diff = np.diff(indices) # indices that follow each other (neighboring indices) have the value 1
max_run_length = 0
cur_run_length = 0
for i in i_diff:
if i == 1: # we found another directly following index
cur_run_length += 1
else:
if cur_run_length > max_run_length:
max_run_length = cur_run_length
cur_run_length = 0
if cur_run_length > max_run_length:
max_run_length = cur_run_length
# since we work with diffs, the actual run length is max_run_length + 1
# normalize against the total data length
max_run_lengths[bound_id] = (max_run_length + 1) / len(data)
assert len(max_run_lengths) == n_intervals
return max_run_lengths
##########################################################################
# 3.4. Complexity Peaks Features
##########################################################################
@util.copy_doc_from(fc.number_peaks)
def normalized_number_peaks(self, data, n: int):
# min = 0, max = 1
data = TimeSeriesCharacteristics._ensure_ndarray(data)
# the maximum number of peaks is reached when the peaks are evenly distributed
# within the time series signal and have the minimum required distance from each
# other in order to be classified as peak, which is the distance "n"; this distance
# must also be right at the start of the time series and at the end of the time
# series; this means that there are "p" peaks in the time series and "p + 1" gaps
# in between them (like: _^_^_^_ where "_" is the gap and "^" the peak); the length
# of all gaps + the number of peaks must yield the total time series length;
# mathematically, this can be expressed as "(p + 1) * n + p = len(data)" which can
# rewritten to get the maximum number of peaks "p" like "p = (len(data) - n) / (n + 1)"
denom = (len(data) - n) / (n + 1)
return fc.number_peaks(data, n) / denom if denom != 0 else np.nan
def step_changes(self, data, window_len):
"""
A step change is counted whenever ``|y_i - mean{y_i-w...y_i-1}| > 2 * sigma{y_i-w...y_i-1}``,
where, for every value ``y_i`` of the series, ``mean{y_i-w...y_i-1}`` and ``sigma{y_i-w...y_i-1}``
are the mean and standard deviation in the preceding sliding window (of length ``w``) from point
``y_i-w`` to ``i-1``. Only full windows are considered, so the first ``window_len`` points are
discarded. The result is normalized by the len of the sequences (- ``window_len``).
:param data: The time series
:param window_len: The length of the sliding window
:return: The (normalized) number of step changes
"""
# min = 0, max = 1
if not isinstance(data, pd.Series):
data = | pd.Series(data) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = | pd.Series([True, True, True, True]) | pandas.Series |
from datetime import datetime
from statistics import median
import pandas as pd
import recordlinkage
from recordlinkage.preprocessing import clean
from app import app, database as db
from app.data_transform.helpers import create_serial_sequence
from app.data_service.models import DataLoader, Table
from app.history.models import History
def _ci(*args: str):
if len(args) == 1:
return '"{}"'.format(str(args[0]).replace('"', '""'))
return ['"{}"'.format(str(arg).replace('"', '""')) for arg in args]
def _cv(*args: str):
if len(args) == 1:
return "'{}'".format(str(args[0]).replace("'", "''"))
return ["'{}'".format(str(arg).replace("'", "''")) for arg in args]
history = History()
class DataTransformer:
def __init__(self):
pass
def impute_missing_data_on_average(self, schema_id, table, column):
"""" impute missing data based on the average"""
try:
schema_name = 'schema-' + str(schema_id)
rows = db.engine.execute('SELECT AVG({}) FROM {}.{};'.format(*_ci(column, schema_name, table)))
average = rows.first()[0]
if not average:
average = 0
columns = DataLoader().get_column_names_and_types(schema_id, table)
type = ""
for column_t in columns:
if column_t.name == column:
type = column_t.type
break
if type == "integer":
average = int(round(average))
null_rows = [row['id'] for row in db.engine.execute(
'SELECT id from {}.{} WHERE {} IS NULL;'.format(*_ci(schema_name, table, column))).fetchall()]
db.engine.execute('UPDATE {0}.{1} SET {2} = {3} WHERE {2} IS NULL;'.format(*_ci(schema_name, table, column),
_cv(average)))
inverse_query = 'UPDATE {}.{} SET {} = NULL WHERE id in ({});'.format(*_ci(schema_name, table, column), ', '.join(_cv(row) for row in null_rows))
history.log_action(schema_id, table, datetime.now(), 'Imputed missing data on average', inverse_query)
except Exception as e:
app.logger.error("[ERROR] Unable to impute missing data for column {} by average".format(column))
app.logger.exception(e)
raise e
def impute_missing_data_on_median(self, schema_id, table, column):
"""" impute missing data based on the average"""
try:
schema_name = 'schema-' + str(schema_id)
rows = db.engine.execute('SELECT {} FROM {}.{};'.format(*_ci(column, schema_name, table)))
values = list()
for value in rows:
if value[0] is not None:
values.append(value[0])
if (len(values)) == 0:
median_val = 0
else:
median_val = median(values)
null_rows = [row['id'] for row in db.engine.execute(
'SELECT id from {}.{} WHERE {} IS NULL;'.format(*_ci(schema_name, table, column))).fetchall()]
db.engine.execute('UPDATE {0}.{1} SET {2} = {3} WHERE {2} IS NULL;'.format(*_ci(schema_name, table, column),
_cv(median_val)))
inverse_query = 'UPDATE {}.{} SET {} = NULL WHERE id in ({});'.format(*_ci(schema_name, table, column), ', '.join(_cv(row) for row in null_rows))
history.log_action(schema_id, table, datetime.now(), 'Imputed missing data on median', inverse_query)
except Exception as e:
app.logger.error("[ERROR] Unable to impute missing data for column {} by median".format(column))
app.logger.exception(e)
raise e
def impute_missing_data_on_value(self, schema_id, table, column, value, function):
"""" impute missing data based on the average"""
try:
schema_name = 'schema-' + str(schema_id)
null_rows = [row['id'] for row in db.engine.execute(
'SELECT id from {}.{} WHERE {} IS NULL;'.format(*_ci(schema_name, table, column))).fetchall()]
db.engine.execute('UPDATE {0}.{1} SET {2} = {3} WHERE {2} IS NULL;'.format(*_ci(schema_name, table, column),
_cv(value)))
inverse_query = 'UPDATE {}.{} SET {} = NULL WHERE id in ({});'.format(*_ci(schema_name, table, column), ', '.join(_cv(row) for row in null_rows))
history.log_action(schema_id, table, datetime.now(), 'Imputed missing data on ' + function.lower(), inverse_query)
except Exception as e:
app.logger.error("[ERROR] Unable to impute missing data for column {}".format(column))
app.logger.exception(e)
raise e
def impute_missing_data(self, schema_id, table, column, function, custom_value=None):
""""impute missing data based on the average"""
if function == "AVG":
return self.impute_missing_data_on_average(schema_id, table, column)
elif function == "MEDIAN":
return self.impute_missing_data_on_median(schema_id, table, column)
elif function == "MCV":
value = DataLoader().calculate_most_common_value(schema_id, table, column)
return self.impute_missing_data_on_value(schema_id, table, column, value, "most common value")
elif function == "CUSTOM":
return self.impute_missing_data_on_value(schema_id, table, column, custom_value, "custom value")
else:
app.logger.error("[ERROR] Unable to impute missing data for column {}".format(column))
raise Exception
def find_and_replace(self, schema_id, table, column, to_be_replaced, replacement, replacement_function):
"""" find and replace """
try:
schema_name = 'schema-' + str(schema_id)
query = ""
updated_rows = list()
if replacement_function == "substring":
updated_rows = [row['id'] for row in db.engine.execute('SELECT id FROM {}.{} WHERE {} LIKE {};'.format(
*_ci(schema_name, table, column), _cv('%%'+to_be_replaced+'%%'))).fetchall()]
query = 'UPDATE {0}.{1} SET {2} = REPLACE({2}, {3}, {4});'.format(*_ci(schema_name, table, column),
*_cv(to_be_replaced, replacement))
elif replacement_function == "full replace":
updated_rows = [row['id'] for row in db.engine.execute('SELECT id FROM {}.{} WHERE {}={};'.format(
*_ci(schema_name, table, column), _cv(to_be_replaced))).fetchall()]
query = 'UPDATE {0}.{1} SET {2} = {3} WHERE {2} = {4};'.format(*_ci(schema_name, table, column),
*_cv(replacement, to_be_replaced))
else:
app.logger.error("[ERROR] Unable to perform find and replace")
db.engine.execute(query)
if replacement_function == 'substring':
inverse_query = ''
for row_id in updated_rows:
inverse_query += 'UPDATE {0}.{1} SET {2} = REPLACE({2}, {3}, {4}) WHERE id = {5};'.format(
*_ci(schema_name, table, column), *_cv(replacement, to_be_replaced), row_id)
else:
inverse_query = ''
for row_id in updated_rows:
inverse_query += 'UPDATE {}.{} SET {} = {} WHERE id = {};'.format(*_ci(schema_name, table, column),
*_cv(to_be_replaced, row_id))
history.log_action(schema_id, table, datetime.now(), 'Used find and replace', inverse_query)
except Exception as e:
app.logger.error("[ERROR] Unable to perform find and replace")
app.logger.exception(e)
raise e
def find_and_replace_by_regex(self, schema_id, table, column, regex, replacement):
"""" find and replace """
try:
regex = regex.replace('%', '%%')
schema_name = 'schema-' + str(schema_id)
query = 'UPDATE {0}.{1} SET {2} = regexp_replace({2}, {3}, {4});'.format(*_ci(schema_name, table, column),
*_cv(regex, replacement))
updated_rows = [(row['id'], row[column]) for row in db.engine.execute('SELECT id, {2} FROM {0}.{1} WHERE {2} LIKE {3};'.format(
*_ci(schema_name, table, column), _cv(regex))).fetchall()]
inverse_query = 'UPDATE {}.{} SET {} = CASE id '.format(*_ci(schema_name, table, column))
row_ids = []
for row_id, original_data in updated_rows:
row_ids.append(row_id)
inverse_query += 'WHEN {} THEN {} '.format(*_cv(row_id, original_data))
inverse_query += 'END WHERE id IN ({});'.format(', '.join(_cv(row_id) for row_id in row_ids))
db.engine.execute(query)
history.log_action(schema_id, table, datetime.now(), 'Used find and replace', inverse_query)
except Exception as e:
app.logger.error("[ERROR] Unable to perform find and replace by regex")
app.logger.exception(e)
raise e
class DateTimeTransformer:
def __init__(self):
pass
def extract_element_from_date(self, schema_id, table, column, element):
"""" Return element of date out of timestamp"""
try:
schema_name = 'schema-' + str(schema_id)
new_column = column + ' (' + element + ')'
data_loader = DataLoader()
data_loader.insert_column(schema_id, table, new_column, "double precision", False)
db.engine.execute(
'UPDATE {}.{} SET {} = (EXTRACT({} FROM {}::TIMESTAMP));'.format(*_ci(schema_name, table, new_column),
element, _ci(column)))
except Exception as e:
app.logger.error("[ERROR] Unable to extract " + element + " from column '{}'".format(column))
app.logger.exception(e)
raise e
# Log action to history
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table, new_column))
history.log_action(schema_id, table, datetime.now(), 'Extracted ' + element + ' from column ' + column, inverse_query)
def extract_date_or_time(self, schema_id, table, column, element):
"""extract date or time from datetime type"""
try:
schema_name = 'schema-' + str(schema_id)
new_column = column + ' (' + element + ')'
data_loader = DataLoader()
data_loader.insert_column(schema_id, table, new_column, "varchar(255)", False)
db.engine.execute(
'UPDATE {0}.{1} SET {2} = {3}::{4};'.format(*_ci(schema_name, table, new_column, column), element))
except Exception as e:
app.logger.error("[ERROR] Unable to extract " + element + " from column '{}'".format(column))
app.logger.exception(e)
raise e
# Log action to history
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table, new_column))
history.log_action(schema_id, table, datetime.now(), 'Extracted ' + element + ' from column ' + column, inverse_query)
def get_transformations(self):
trans = ["extract day of week", "extract month", "extract year", "extract date", "extract time"]
return trans
def transform(self, schema_id, table, column, operation):
if operation == "extract day of week":
return self.extract_element_from_date(schema_id, table, column, "DOW")
elif operation == "extract month":
return self.extract_element_from_date(schema_id, table, column, "MONTH")
elif operation == "extract year":
return self.extract_element_from_date(schema_id, table, column, "YEAR")
elif operation == "extract date":
return self.extract_date_or_time(schema_id, table, column, "DATE")
elif operation == "extract time":
return self.extract_date_or_time(schema_id, table, column, "TIME")
class NumericalTransformations:
def __init__(self):
pass
def normalize(self, schema_id, table_name, column_name):
connection = db.engine.connect()
transaction = connection.begin()
try:
schema_name = 'schema-' + str(schema_id)
df = pd.read_sql_query('SELECT * FROM {}.{};'.format(*_ci(schema_name, table_name)), db.engine)
new_column_name = column_name + '_norm'
df[new_column_name] = df[column_name]
if df[column_name].std(ddof=0):
df[new_column_name] = (df[column_name] - df[column_name].mean()) / df[column_name].std(ddof=0)
df.to_sql(name=table_name, con=db.engine, schema=schema_name, if_exists='replace', index=False)
create_serial_sequence(schema_name, table_name)
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table_name, new_column_name))
history.log_action(schema_id, table_name, datetime.now(),
'Normalized data of column {}'.format(column_name), inverse_query)
except Exception as e:
transaction.rollback()
app.logger.error("[ERROR] Couldn't normalize data")
app.logger.exception(e)
raise e
def equal_width_interval(self, schema_id, table_name, column_name, num_intervals):
connection = db.engine.connect()
transaction = connection.begin()
try:
schema_name = 'schema-' + str(schema_id)
df = pd.read_sql_query('SELECT * FROM {}.{};'.format(*_ci(schema_name, table_name)), db.engine)
new_column_name = column_name + '_intervals_eq_w_' + str(num_intervals)
df[new_column_name] = pd.cut(df[column_name], num_intervals, precision=9).apply(str)
df.to_sql(name=table_name, con=db.engine, schema=schema_name, if_exists='replace', index=False)
create_serial_sequence(schema_name, table_name)
transaction.commit()
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table_name, new_column_name))
history.log_action(schema_id, table_name, datetime.now(),
'Generated equal width intervals for data of column {}'.format(column_name), inverse_query)
except Exception as e:
transaction.rollback()
app.logger.error("[ERROR] Couldn't process intervals with equal width")
app.logger.exception(e)
raise e
def equal_freq_interval(self, schema_id, table_name, column_name, num_intervals):
connection = db.engine.connect()
transaction = connection.begin()
try:
schema_name = 'schema-' + str(schema_id)
df = pd.read_sql_query('SELECT * FROM {}.{};'.format(*_ci(schema_name, table_name)), db.engine)
new_column_name = column_name + '_intervals_eq_f_' + str(num_intervals)
sorted_data = list(df[column_name].sort_values())
data_length = len(df[column_name])
interval_size = data_length // num_intervals
intervals_list = []
for i in range(0, data_length, interval_size):
intervals_list.append(sorted_data[i] - (sorted_data[i] / 1000)) #
df[new_column_name] = pd.cut(df[column_name], intervals_list, precision=9).apply(str)
df.to_sql(name=table_name, con=db.engine, schema=schema_name, if_exists='replace', index=False)
create_serial_sequence(schema_name, table_name)
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table_name, new_column_name))
history.log_action(schema_id, table_name, datetime.now(),
'Generated equal frequency intervals for data of column {}'.format(column_name), inverse_query)
transaction.commit()
except Exception as e:
transaction.rollback()
app.logger.error("[ERROR] Couldn't process intervals with equal frequency")
app.logger.exception(e)
raise e
def manual_interval(self, schema_id, table_name, column_name, intervals):
connection = db.engine.connect()
transaction = connection.begin()
try:
schema_name = 'schema-' + str(schema_id)
df = pd.read_sql_query('SELECT * FROM {}.{};'.format(*_ci(schema_name, table_name)), db.engine)
new_column_name = column_name + '_intervals_custom'
df[new_column_name] = pd.cut(df[column_name], intervals).apply(str)
df.to_sql(name=table_name, con=db.engine, schema=schema_name, if_exists='replace', index=False)
create_serial_sequence(schema_name, table_name)
inverse_query = 'ALTER TABLE {}.{} DROP COLUMN IF EXISTS {};'.format(*_ci(schema_name, table_name, new_column_name))
history.log_action(schema_id, table_name, datetime.now(),
'Generated manual intervals for data of column {}'.format(column_name), inverse_query)
transaction.commit()
except Exception as e:
transaction.rollback()
app.logger.error("[ERROR] Couldn't process manuel intervals")
app.logger.exception(e)
raise e
def remove_outlier(self, schema_id, table_name, column_name, value, less_than=False):
connection = db.engine.connect()
transaction = connection.begin()
try:
schema_name = 'schema-' + str(schema_id)
if less_than:
outlier_rows = [row for row in db.engine.execute('SELECT * FROM {}.{} WHERE {} < {};'.format(
*_ci(schema_name, table_name, column_name), _cv(value))).fetchall()]
db.engine.execute(
'DELETE FROM {}.{} WHERE {} < {};'.format(*_ci(schema_name, table_name, column_name), _cv(value)))
else:
outlier_rows = [row for row in db.engine.execute('SELECT * FROM {}.{} WHERE {} > {};'.format(
*_ci(schema_name, table_name, column_name), _cv(value))).fetchall()]
db.engine.execute(
'DELETE FROM {}.{} WHERE {} > {};'.format(*_ci(schema_name, table_name, column_name), _cv(value)))
inverse_query = ''
for row in outlier_rows:
inverse_query += 'INSERT INTO {}.{} VALUES ({});'.format(*_ci(schema_name, table_name), ', '.join(_cv(value) for value in row))
history.log_action(schema_id, table_name, datetime.now(),
'Removed outliers from column {}'.format(column_name), inverse_query)
transaction.commit()
except Exception as e:
transaction.rollback()
app.logger.error("[ERROR] Couldn't remove outliers from " + column_name)
app.logger.exception(e)
raise e
def chart_data_numerical(self, schema_id, table_name, column_name):
schema_name = 'schema-' + str(schema_id)
df = pd.read_sql_query('SELECT * FROM {}.{};'.format(*_ci(schema_name, table_name)), db.engine)
intervals = | pd.cut(df[column_name], 10) | pandas.cut |
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
# EMISE SVET - SROVNANI (#111) ---
# 1. Load all datasets, possibly with metadata ---
# 2. Unify country codes ---
# 3. Cross-check it against each other and against sums ---
# 4. Create an aggregate dataset ---
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
path_wb = root + '\\worldbank\\API_EN.ATM.GHGT.KT.CE_DS2_en_csv_v2_49299.csv'
path_wb_countries = root + '\\worldbank\\Metadata_Country_API_EN.ATM.GHGT.KT.CE_DS2_en_csv_v2_49299.csv'
path_oecd = root + '\\oecd\\AIR_GHG_19082019230046941.csv'
path_gca = root + '\\global-carbon-atlas\\export_20190819_2250.csv'
# data loading and sanitization
wb = | pd.read_csv(path_wb, skiprows=4) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#import libraries
import pandas as pd
from gurobipy import GRB, Model
def optimize(inputFile,outputFile):
"""
this function will take inputFile and run a series of optimization, whose result will be expored to outputFile.
"""
# Load data
Flavor_Prod=pd.read_excel(inputFile,sheet_name='Flavor_Prod')
S=pd.read_excel(inputFile,sheet_name='S',index_col=0)
S2=pd.read_excel(inputFile,sheet_name='S2',index_col=0)
D=pd.read_excel(inputFile,sheet_name='D',index_col=0)
D2= | pd.read_excel(inputFile,sheet_name='D2',index_col=0) | pandas.read_excel |
import pandas as pd
import pickle
alter_list = pd.read_pickle("../input/alter_lists.pkl")
alter_list = alter_list.set_index('author').to_dict()
nodes = pd.read_csv("../input/coauthorship_nodeAttributes.csv")
with open("../input/author_metadata.pkl", "rb") as pkl:
author_metadata = pickle.load(pkl)
num_citations = {}
num_papers = {}
career_start = {}
num_alter1 = {}
num_alter2 = {}
for author in author_metadata:
num_citations[author] = author_metadata[author]["wosTimesCited"]
num_papers[author] = author_metadata[author]["num_papers"] = len(
author_metadata[author]["wosString"])
author_metadata[author]["year"] = list(
filter(None, author_metadata[author]["year"]))
try:
career_start[author] = min(
[int(i) for i in author_metadata[author]["year"]])
except ValueError:
career_start[author] = 2018
try:
num_alter1[author] = len(alter_list['alter'][author])
except KeyError:
pass
try:
num_alter2[author] = len(alter_list['alter_2'][author])
except KeyError:
pass
covariates = pd.DataFrame.from_dict(num_citations, orient='index')
covariates['num_citations'] = pd.Series(num_citations)
covariates['num_papers'] = pd.Series(num_papers)
covariates['career_start'] = pd.Series(career_start)
covariates['num_alter1'] = | pd.Series(num_alter1) | pandas.Series |
# %%
from sre_constants import error
import pandas as pd
import openpyxl as pxl
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common import exceptions as e
import time
import datetime
import re
import traceback
#! EXTRACTION
def call_driver_(url):
"""
This function instantiates a remotely operated browser.
Returns:
[WebDriver]: A driver.
"""
DRIVER_PATH = r'/Users/studocu/Downloads/chromedriver'
driver = webdriver.Chrome(DRIVER_PATH)
driver.get(url)
driver.maximize_window()
return driver
def load_button_(driver):
"""
This function clicks load button until the last load.
"""
LOAD_MORE_XPATH = r'//span[text()="Load More"]'
while True:
try:
load_more = driver.find_element_by_xpath(LOAD_MORE_XPATH)
actions = ActionChains(driver)
actions.move_to_element(load_more).perform()
driver.execute_script('arguments[0].scrollIntoView({behavior: "smooth", block: "center", inline: "center"});', load_more)
WebDriverWait(driver, 4).until(
EC.element_to_be_clickable((By.XPATH, LOAD_MORE_XPATH)))
load_more.click()
except:
break
def get_links_(driver):
LINKS_PATH = r'//ul[@class="MuiList-root MuiList-padding"]//a'
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH,LINKS_PATH)))
links_web_elem = driver.find_elements_by_xpath(LINKS_PATH)
links = []
for link in links_web_elem:
links.append(link.get_attribute('href'))
return links
def pull_association_info_(links, driver):
all_rows = []
error_links = []
for i, link in enumerate(links):
driver.get(link)
try:
NAME_XPATH = r'//h1'
DESC_XPATH = r'//div[@class="bodyText-large userSupplied"]'
ADDRESS_XPATH = r'//span[text()="Address"]/..'
EMAIL_XPATH = r'//span[text()="Contact Email"]/..'
PHONE_XPATH = r'//span[text()="Phone Number"]/..'
XPATH_LINK = r''
INFO_XPATHS = [NAME_XPATH, DESC_XPATH, ADDRESS_XPATH, EMAIL_XPATH, PHONE_XPATH, XPATH_LINK]
INFO_NAMES = ['ASSOCIATION NAME','ASSOCIATION DESCRIPTION', 'ASSOCIATION ADDRESS', 'ASSOCIATION EMAIL', 'ASSOCIATION PHONE', 'ASSOCIATION LINK']
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, INFO_XPATHS[0])))
all_info_row = []
print('PULLING DATA FROM ASSOCIATION ' + str(i) + ' OUT OF ' + str(len(links)) + ' ASSOCIATIONS...')
for info_name, info_xpath in zip(INFO_NAMES, INFO_XPATHS):
try:
if info_xpath != '':
info_data_web_elem = driver.find_element_by_xpath(info_xpath)
info_data = info_data_web_elem.text
if info_name == 'ASSOCIATION NAME':
info_data = info_data_web_elem.text.title()
# treating if description is empty
if info_data == '':
all_info_row.append('Null')
# treating if address is empty
elif info_data == 'Address':
all_info_row.append('Null')
# treating if email is empty
elif info_data == 'Contact Email\nE: ':
all_info_row.append('Null')
# cleaning email data
elif info_data.startswith('Contact Email'):
info_data = re.sub('Contact Email\nE: ', '', info_data)
all_info_row.append(info_data.lower())
# cleaning phone data
elif info_data.startswith('Phone'):
info_data = re.sub('Phone Number\nP: ', '', info_data)
all_info_row.append(info_data)
else:
all_info_row.append(info_data)
else:
all_info_row.append(link)
except:
all_info_row.append('Null')
except:
print(e)
traceback.print_exc()
error_links.append(link)
pass
all_rows.append(all_info_row)
return all_rows, error_links
def extract_(url):
print('CALLING DRIVER...')
driver = call_driver_(url)
print('DRIVER CALLED.')
print('LOADIND BUTTONS...')
load_button_(driver)
print('ALL BUTTONS LOADED.')
print('PULLING LINKS...')
links = get_links_(driver)
print('LINKS PULLED.')
print('PULLING ASSOCIATION DATA...')
all_rows, error_links = pull_association_info_(links, driver)
print('ASSOCIATION DATA PULLED')
print('CLOSING DRIVER...')
driver.close()
print('DRIVER CLOSED.')
if len(error_links)==0:
return all_rows
else:
if((len(error_links)))>1:
print(str(len(error_links)) + ' association sites failed.\n')
for link in error_links:
print(link)
elif((len(error_links)))==1:
print('One association link failed: ' + error_links)
#! here we could call the function again on the error_links
elif ((len(error_links)))==0:
print('All associations was scraped.')
return all_rows
# ! WRANGLING
def transform_(all_rows):
try:
df = pd.DataFrame(all_rows, columns=['Name', 'Descrip', 'Address', 'Email', 'Phone', 'Link'])
df = df[['Name', 'Email']]
df = df.loc[(df['Name'] != 'Null') & (df['Email'] != 'Null')]
print(df)
except:
print(e)
traceback.print_exc()
pass
return df
def load_(file_name, df):
"""
This function gets a file name and a DataFrame and converts into a excel file, and save it at excel_files folder.
Args:
file_name (str): the excel file name that it will be created, WITHOUT the extension.
df (pd.DataFrame): a DataFrame containing the code (if any) and courses name.
"""
EXCEL_FILES_PATH = r'/Users/studocu/Downloads'
EXTENSION = '.xlsx'
PATH_FILE = EXCEL_FILES_PATH + '/' + file_name + EXTENSION
df.to_excel(PATH_FILE, index=False, engine='xlsxwriter')
def pipeline(url, uniID, uni_name):
file_name = uniID + ' ' + uni_name + ' ASSOCIATIONS'
file_name = re.sub(' ', '_', file_name)
all_rows = extract_(url)
df_ = transform_(all_rows)
load_(file_name, df_)
def scrape_single(url, uniID, uni_name):
pipeline(url, uniID, uni_name)
def scrape_multiples():
start_time = time.time()
EXCEL_PATH = r'/Users/studocu/Desktop/excel_input/input.xlsx'
df_ = | pd.read_excel(EXCEL_PATH) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 10:17:13 2018
@author: David
"""
# Built-in libraries
#import argparse
#import collections
#import multiprocessing
import os
#import pickle
#import time
# External libraries
#import rasterio
#import gdal
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.stats import linregress
from scipy.stats import median_abs_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
from meltcurves import melt_fromdebris_func
#%%% ===== SCRIPT OPTIONS =====
option_melt_comparison = False
option_hd_comparison = True
option_hd_centerline = False
option_hd_spatial_compare = False
hd_obs_fp = debris_prms.main_directory + '/../hd_obs/'
melt_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_melt_compare/'
hd_compare_fp = debris_prms.main_directory + '/../hd_obs/figures/hd_obs_compare/'
hd_centerline_fp = debris_prms.main_directory + '/../hd_obs/centerline_hd/'
if os.path.exists(melt_compare_fp) == False:
os.makedirs(melt_compare_fp)
if os.path.exists(hd_compare_fp) == False:
os.makedirs(hd_compare_fp)
#%% ===== FUNCTIONS =====
def plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn, ds_names=None,
hd_min=0, hd_max=2, hd_tick_major=0.25, hd_tick_minor=0.05,
melt_min=0, melt_max=70, melt_tick_major=10, melt_tick_minor=5,
plot_meltfactor=False, z_value = 1.645, fontsize=11):
#%%
""" Plot comparison of debris vs. melt for various sites """
# Dataset of melt data
ds_ostrem = xr.open_dataset(melt_fp + melt_fn)
ds_ostrem = ds_ostrem.sortby('hd_cm')
time_year = pd.to_datetime(ds_ostrem.time.values).year
time_daysperyear = np.array([366 if x%4 == 0 else 365 for x in time_year])
time_yearfrac = time_year + (pd.to_datetime(ds_ostrem.time.values).dayofyear-1) / time_daysperyear
color_dict = {0:'k', 1:'b', 2:'r'}
symbol_dict = {0:'D', 1:'o', 2:'^'}
# ===== PLOT DEBRIS VS. SURFACE LOWERING =====
fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False,
gridspec_kw = {'wspace':0.4, 'hspace':0.15})
melt_obs_all = []
hd_obs_all = []
melt_mod_all = []
melt_mod_bndlow_all = []
melt_mod_bndhigh_all = []
for n in np.arange(0,len(measured_hd_list)):
measured_hd = measured_hd_list[n]
measured_melt = measured_melt_list[n]
melt_obs_all.extend(measured_melt)
hd_obs_all.extend(measured_hd)
yearfracs = yearfracs_list[n]
start_yearfrac = yearfracs[0]
end_yearfrac = yearfracs[1]
if ds_names is not None:
ds_name = ds_names[n]
else:
ds_name = None
start_idx = np.where(abs(time_yearfrac - start_yearfrac) == abs(time_yearfrac - start_yearfrac).min())[0][0]
end_idx = np.where(abs(time_yearfrac - end_yearfrac) == abs(time_yearfrac - end_yearfrac).min())[0][0]
# Ostrem Curve
debris_thicknesses = ds_ostrem.hd_cm.values
debris_melt_df = pd.DataFrame(np.zeros((len(debris_thicknesses),3)),
columns=['debris_thickness', 'melt_mmwed', 'melt_std_mmwed'])
nelev = 0
for ndebris, debris_thickness in enumerate(debris_thicknesses):
# Units: mm w.e. per day
melt_mmwed = (ds_ostrem['melt'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
melt_std_mmwed = (ds_ostrem['melt_std'][ndebris,start_idx:end_idx,nelev].values.sum()
* 1000 / len(time_yearfrac[start_idx:end_idx]))
debris_melt_df.loc[ndebris] = debris_thickness / 100, melt_mmwed, melt_std_mmwed
debris_melt_df['melt_bndlow_mmwed'] = debris_melt_df['melt_mmwed'] - z_value * debris_melt_df['melt_std_mmwed']
debris_melt_df['melt_bndhigh_mmwed'] = debris_melt_df['melt_mmwed'] + z_value * debris_melt_df['melt_std_mmwed']
#%%
# MEAN CURVE
fit_idx = list(np.where(debris_thicknesses >= 5)[0])
func_coeff, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_mmwed.values[fit_idx])
melt_cleanice = debris_melt_df.loc[0,'melt_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve = melt_fromdebris_func(debris_4curve, func_coeff[0], func_coeff[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve = np.concatenate([[melt_cleanice], melt_4curve])
# Linearly interpolate between 0 cm and 2 cm for the melt rate
def melt_0to2cm_adjustment(melt, melt_clean, melt_2cm, hd):
""" Linearly interpolate melt factors between 0 and 2 cm
based on clean ice and 2 cm sub-debris melt """
melt[(hd >= 0) & (hd < 0.02)] = (
melt_clean + hd[(hd >= 0) & (hd < 0.02)] / 0.02 * (melt_2cm - melt_clean))
return melt
melt_mod = melt_fromdebris_func(measured_hd, func_coeff[0], func_coeff[1])
melt_2cm = melt_fromdebris_func(0.02, func_coeff[0], func_coeff[1])
melt_mod = melt_0to2cm_adjustment(melt_mod, melt_cleanice, melt_2cm, measured_hd)
melt_mod_all.extend(melt_mod)
# LOWER BOUND CURVE
func_coeff_bndlow, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndlow_mmwed.values[fit_idx])
melt_cleanice_bndlow = debris_melt_df.loc[0,'melt_bndlow_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndlow = melt_fromdebris_func(debris_4curve, func_coeff_bndlow[0], func_coeff_bndlow[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndlow = np.concatenate([[melt_cleanice_bndlow], melt_4curve_bndlow])
melt_mod_bndlow = melt_fromdebris_func(measured_hd, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_2cm_bndlow = melt_fromdebris_func(0.02, func_coeff_bndlow[0], func_coeff_bndlow[1])
melt_mod_bndlow = melt_0to2cm_adjustment(melt_mod_bndlow, melt_cleanice_bndlow, melt_2cm_bndlow, measured_hd)
melt_mod_bndlow_all.extend(melt_mod_bndlow)
# UPPER BOUND CURVE
func_coeff_bndhigh, pcov = curve_fit(melt_fromdebris_func,
debris_melt_df.debris_thickness.values[fit_idx],
debris_melt_df.melt_bndhigh_mmwed.values[fit_idx])
melt_cleanice_bndhigh = debris_melt_df.loc[0,'melt_bndhigh_mmwed']
# Fitted curve
debris_4curve = np.arange(0.02,5.01,0.01)
melt_4curve_bndhigh = melt_fromdebris_func(debris_4curve, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
# add clean ice
debris_4curve = np.concatenate([[0.0], debris_4curve])
melt_4curve_bndhigh = np.concatenate([[melt_cleanice_bndhigh], melt_4curve_bndhigh])
melt_mod_bndhigh = melt_fromdebris_func(measured_hd, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_2cm_bndhigh = melt_fromdebris_func(0.02, func_coeff_bndhigh[0], func_coeff_bndhigh[1])
melt_mod_bndhigh = melt_0to2cm_adjustment(melt_mod_bndhigh, melt_cleanice_bndhigh,melt_2cm_bndhigh, measured_hd)
melt_mod_bndhigh_all.extend(melt_mod_bndhigh)
if plot_meltfactor:
melt_4curve = melt_4curve / melt_cleanice
melt_4curve_bndlow = melt_4curve_bndlow / melt_cleanice
melt_4curve_bndhigh = melt_4curve_bndhigh / melt_cleanice
# Plot curve
ax[0,0].plot(measured_hd, measured_melt, symbol_dict[n], color=color_dict[n],
markersize=3, markerfacecolor="None", markeredgewidth=0.5, zorder=5, label=ds_name, clip_on=False)
ax[0,0].plot(debris_4curve, melt_4curve,
color=color_dict[n], linewidth=1, linestyle='--', zorder=5-n)
ax[0,0].fill_between(debris_4curve, melt_4curve_bndlow, melt_4curve_bndhigh,
color=color_dict[n], linewidth=0, zorder=5-n, alpha=0.2)
# text
# ax[0,0].text(0.5, 1.09, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
# transform=ax[0,0].transAxes)
ax[0,0].text(0.5, 1.11, glac_name, size=fontsize-2, horizontalalignment='center', verticalalignment='top',
transform=ax[0,0].transAxes)
# eqn_text = r'$b = \frac{b_{0}}{1 + kb_{0}h}$'
# coeff1_text = r'$b_{0} = ' + str(np.round(func_coeff[0],2)) + '$'
# coeff2_text = r'$k = ' + str(np.round(func_coeff[1],2)) + '$'
# # coeff$\frac{b_{0}}{1 + 2kb_{0}h}$'
# ax[0,0].text(0.9, 0.95, eqn_text, size=12, horizontalalignment='right', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.615, 0.83, 'where', size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.77, coeff1_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# ax[0,0].text(0.66, 0.7, coeff2_text, size=10, horizontalalignment='left', verticalalignment='top',
# transform=ax[0,0].transAxes)
# X-label
# ax[0,0].set_xlabel('Debris thickness (m)', size=fontsize)
ax[0,0].set_xlim(hd_min, hd_max)
ax[0,0].xaxis.set_major_locator(plt.MultipleLocator(hd_tick_major))
ax[0,0].xaxis.set_minor_locator(plt.MultipleLocator(hd_tick_minor))
# Y-label
# if plot_meltfactor:
# ylabel_str = 'Melt (-)'
# else:
# ylabel_str = 'Melt (mm w.e. d$^{-1}$)'
# ax[0,0].set_ylabel(ylabel_str, size=fontsize)
ax[0,0].set_ylim(melt_min, melt_max)
ax[0,0].yaxis.set_major_locator(plt.MultipleLocator(melt_tick_major))
ax[0,0].yaxis.set_minor_locator(plt.MultipleLocator(melt_tick_minor))
# Tick parameters
ax[0,0].yaxis.set_ticks_position('both')
ax[0,0].tick_params(axis='both', which='major', labelsize=fontsize-2, direction='inout')
ax[0,0].tick_params(axis='both', which='minor', labelsize=fontsize-4, direction='in')
# Legend
ax[0,0].legend(ncol=1, fontsize=fontsize-3, frameon=True, handlelength=1,
handletextpad=0.15, columnspacing=0.5, borderpad=0.25, labelspacing=0.5, framealpha=0.5)
# Save plot
fig.set_size_inches(2, 1.5)
fig.savefig(melt_compare_fp + fig_fn, bbox_inches='tight', dpi=300, transparent=True)
plt.close()
return hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all
#%%
if option_melt_comparison:
# glaciers = ['1.15645', '2.14297', '6.00474', '7.01044', '10.01732', '11.00719', '11.02810', '11.02858', '11.03005',
# '12.01012', '12.01132', '13.05000', '13.43232', '14.06794', '14.16042', '15.03733', '15.03743',
# '15.04045', '15.07886', '15.11758', '18.02397']
glaciers = ['1.15645', '2.14297', '7.01044', '11.00719', '11.02472', '11.02810', '11.02858', '11.03005',
'12.01012', '12.01132', '13.05000', '13.43165', '13.43232', '14.06794', '14.16042', '15.03733',
'15.03743', '15.04045', '15.07122', '15.07886', '15.11758', '18.02375', '18.02397']
# glaciers = ['10.01732']
# glaciers = ['13.43165']
# glaciers = ['13.43232']
# glaciers = ['11.02858']
z_value = 1.645
hd_obs_all, melt_obs_all, melt_mod_all, melt_mod_bndlow_all, melt_mod_bndhigh_all, reg_all = [], [], [], [], [], []
rgiid_all = []
# ===== KENNICOTT (1.15645) ====
if '1.15645' in glaciers:
print('\nmelt comparison with Anderson et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/1.15645_kennicott_anderson_2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Kennicott (1.15645)"
fig_fn = '1.15645_hd_melt_And2019.png'
# ds_names = ['Anderson 2019\n(6/18/11$\u2009$-$\u2009$8/16/11)']
ds_names = ['6/18/11$\u2009$-$\u2009$8/16/11']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6150N-21700E-debris_melt_curve.nc'
yearfracs_list = [[2011 + 169/365, 2011 + 228/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['01'])
rgiid_all.append(['1.15645'])
# ===== Emmons (2.14297) ====
if '2.14297' in glaciers:
print('\nmelt comparison with Moore et al. 2019')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/2.14297_moore2019-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Emmons (2.14297)"
fig_fn = '2.14297_hd_melt_Moo2019.png'
# ds_names = ['Moore 2019\n(7/31/14$\u2009$-$\u2009$8/10/14)']
ds_names = ['7/31/14$\u2009$-$\u2009$8/10/14']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-23825E-debris_melt_curve.nc'
yearfracs_list = [[2014 + 212/365, 2014 + 222/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['02'])
rgiid_all.append(['2.14297'])
# ===== Svinafellsjokull (06.00474) ====
if '6.00474' in glaciers:
print('\nmelt comparison with Moller et al (2016)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/6.00474_moller2016-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df['melt_mf'].values]
glac_name = "Svinafellsjokull (6.00474)"
fig_fn = '6.00474_hd_melt_Moller2016.png'
# ds_names = ['Moller 2016\n(5/17/13$\u2009$-$\u2009$5/30/13)']
ds_names = ['5/17/13$\u2009$-$\u2009$5/30/13']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '6400N-34325E-debris_melt_curve.nc'
yearfracs_list = [[2013 + 137/365, 2013 + 150/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 0.1) * 0.1,1) + 0.1
melt_tick_major, melt_tick_minor = 0.5, 0.1
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor,
plot_meltfactor=True)
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['06'])
rgiid_all.append(['6.00474'])
# ===== Larsbreen (7.01044) ====
if '7.01044' in glaciers:
print('\nmelt comparison with Nicholson and Benn 2006')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/7.01044_larsbreen_NB2006-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Larsbreen (7.01044)"
fig_fn = '7.01044_hd_melt_NichBenn2006.png'
# ds_names = ['Nicholson 2006\n(7/09/02$\u2009$-$\u2009$7/20/02)']
ds_names = ['7/09/02$\u2009$-$\u2009$7/20/02']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '7825N-1600E-debris_melt_curve.nc'
yearfracs_list = [[2002 + 191/366, 2002 + 202/366]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.2, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['07'])
rgiid_all.append(['7.01044'])
# ===== <NAME> (10.01732) ====
if '10.01732' in glaciers:
# print('\nmelt comparison with Mayer et al (2011)')
assert True == False, '10.01732 NEEDS TO DO THE MODELING FIRST!'
# # Data: debris thickness (m) and melt rate (mm w.e. d-1)
# mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/10.01732_mayer2011-melt.csv')
# measured_hd_list = [mb_df.hd_m.values]
# measured_melt_list = [mb_df['melt_mf'].values]
# glac_name = "<NAME> (10.01732)"
# fig_fn = '10.01732_hd_melt_Mayer2011.png'
## ds_names = ['Mayer 2011\n(7/11/07$\u2009$-$\u2009$7/30/07)']
# ds_names = ['7/11/07$\u2009$-$\u2009$7/30/07']
# melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
# melt_fn = '5000N-8775E-debris_melt_curve.nc'
# yearfracs_list = [[2007 + 192/365, 2007 + 211/365]]
#
# hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
# hd_tick_major, hd_tick_minor = 0.1, 0.02
# melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
# melt_tick_major, melt_tick_minor = 10, 5
#
# for n in np.arange(0,len(measured_hd_list)):
# assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
#
# hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
# plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
# melt_fp, melt_fn,
# ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
# hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
# melt_min=melt_min, melt_max=melt_max,
# melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
# hd_obs_all.append(hd_obs)
# melt_obs_all.append(melt_obs)
# melt_mod_all.append(melt_mod)
# melt_mod_bndlow_all.append(melt_mod_bndlow)
# melt_mod_bndhigh_all.append(melt_mod_bndhigh)
# reg_all.append(['10'])
# ===== Vernagtferner (11.00719) ====
if '11.00719' in glaciers:
print('\nmelt comparison with Juen et al (2013)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.00719_vernagtferner_juen2013-melt.csv')
measured_hd_list = [mb_df.hd_m.values]
measured_melt_list = [mb_df.melt_mmwed.values]
glac_name = "Vernagtferner (11.00719)"
fig_fn = '11.00719_hd_melt_Juen2013.png'
# ds_names = ['Juen 2013\n(6/25/10$\u2009$-$\u2009$7/10/10)']
ds_names = ['6/25/10$\u2009$-$\u2009$7/10/10']
melt_fp = debris_prms.output_fp + 'ostrem_curves/exp4/'
melt_fn = '4700N-1075E-debris_melt_curve.nc'
yearfracs_list = [[2010 + 176/365, 2010 + 191/365]]
hd_min, hd_max = 0, np.ceil(np.max([x.max() for x in measured_hd_list])/0.1)*0.1 + 0.05
hd_tick_major, hd_tick_minor = 0.1, 0.05
melt_min, melt_max = 0, np.round(np.ceil(np.max([x.max() for x in measured_melt_list]) / 10) * 10,0) + 5
melt_tick_major, melt_tick_minor = 20, 5
for n in np.arange(0,len(measured_hd_list)):
assert len(measured_hd_list[n]) == len(measured_melt_list[n]), 'length of hd differs from melt'
hd_obs, melt_obs, melt_mod, melt_mod_bndlow, melt_mod_bndhigh = (
plot_hd_vs_melt_comparison(measured_hd_list, measured_melt_list, yearfracs_list, glac_name, fig_fn,
melt_fp, melt_fn,
ds_names=ds_names, hd_min=hd_min, hd_max=hd_max,
hd_tick_major=hd_tick_major, hd_tick_minor=hd_tick_minor,
melt_min=melt_min, melt_max=melt_max,
melt_tick_major=melt_tick_major, melt_tick_minor=melt_tick_minor))
hd_obs_all.append(hd_obs)
melt_obs_all.append(melt_obs)
melt_mod_all.append(melt_mod)
melt_mod_bndlow_all.append(melt_mod_bndlow)
melt_mod_bndhigh_all.append(melt_mod_bndhigh)
reg_all.append(['11'])
rgiid_all.append(['11.00719'])
# ===== Vernocolo (11.02472) =====
if '11.02472' in glaciers:
print('\nmelt comparison with bocchiola et al. (2015)')
# Data: debris thickness (m) and melt rate (mm w.e. d-1)
mb_df = | pd.read_csv(debris_prms.main_directory + '/../hd_obs/datasets/11.02472_bocchiola2015-melt.csv') | pandas.read_csv |
from datetime import datetime, timedelta
import time
import pandas as pd
import database_funcs as _db
import math_funcs as _mth
def get_candles_from_db(logger,rd,data_base_config,currency_pair):
if rd['time_frame'] not in [300,900,1800,7200,14400,86400]:
logger.error ("Invalid time_frame")
return False
try:
from_time = int(datetime(rd['from_date']['year'],rd['from_date']['month'],rd['from_date']['day'],0,0).timestamp())
to_time = int(datetime(rd['to_date']['year'],rd['to_date']['month'],rd['to_date']['day'],0,0).timestamp())
except Exception as e:
logger.error(e)
return False
if to_time <= from_time:
logger.error("Invalid dates")
return False
return _db.getCandlesFromTickets(logger,data_base_config,currency_pair,rd['time_frame'],from_time,to_time)
def validate_candles_from_db(logger,data_frame,time_frame):
if not "date" in data_frame.columns:
logger.error("Not date in data frame")
return False
if not "close" in data_frame.columns:
logger.error("Not close in data frame")
return False
if not "open" in data_frame.columns:
logger.error("Not open in data frame")
return False
date_series = | pd.Series(data_frame['date']) | pandas.Series |
import os
import unittest
import pandas as pd
import argopandas.index as dfi
from argopandas.mirror import FileMirror
class TestDataFrameIndex(unittest.TestCase):
def setUp(self) -> None:
this_file = os.path.dirname(__file__)
mirror_dir = "argo-test-mirror"
self.mirror = FileMirror(os.path.join(this_file, mirror_dir))
def test_subset(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_prof.nc'}])
df = dfi.DataFrameIndex(df)
self.assertIsInstance(df[[]], dfi.DataFrameIndex)
self.assertIsInstance(df.iloc[[], :], dfi.DataFrameIndex)
def test_info(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_prof.nc'}])
df = dfi.DataFrameIndex(df, _mirror=self.mirror)
self.assertIn('DATA_TYPE', df.info.keys())
def test_prof(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/profiles/D2900313_002.nc'}])
df = dfi.ProfIndex(df, _mirror=self.mirror)
self.assertIn('PRES', df.levels.keys())
self.assertIn('PRES', df.levels_('PRES').keys())
self.assertIn('PLATFORM_NUMBER', df.prof.keys())
self.assertIn('PLATFORM_NUMBER', df.prof_('PLATFORM_NUMBER').keys())
self.assertIn('PARAMETER', df.calib.keys())
self.assertIn('STATION_PARAMETERS', df.param.keys())
self.assertIn('HISTORY_DATE', df.history.keys())
def test_traj(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_Rtraj.nc'}])
df = dfi.TrajIndex(df, _mirror=self.mirror)
self.assertIn('LATITUDE', df.measurement.keys())
self.assertIn('LATITUDE', df.measurement_('LATITUDE').keys())
self.assertIn('JULD_DESCENT_START', df.cycle.keys())
self.assertIn('JULD_DESCENT_START', df.cycle_('JULD_DESCENT_START').keys())
self.assertIn('TRAJECTORY_PARAMETERS', df.param.keys())
self.assertIn('HISTORY_DATE', df.history.keys())
def test_tech(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_tech.nc'}])
df = dfi.TechIndex(df, _mirror=self.mirror)
self.assertIn('CYCLE_NUMBER', df.tech_param.keys())
def test_meta(self):
df = pd.DataFrame.from_records([{'file': 'csio/2900313/2900313_meta.nc'}])
df = dfi.MetaIndex(df, _mirror=self.mirror)
self.assertIn('CONFIG_PARAMETER_VALUE', df.config_param.keys())
self.assertIn('CONFIG_PARAMETER_NAME', df.config_param.keys())
self.assertIn('CONFIG_MISSION_NUMBER', df.missions.keys())
self.assertIn('TRANS_SYSTEM', df.trans_system.keys())
self.assertIn('POSITIONING_SYSTEM', df.positioning_system.keys())
self.assertIn('LAUNCH_CONFIG_PARAMETER_NAME', df.launch_config_param.keys())
self.assertIn('SENSOR', df.sensor.keys())
self.assertIn('PARAMETER', df.param.keys())
def test_zero_length(self):
df = pd.DataFrame({'file': []})
df = dfi.DataFrameIndex(df)
self.assertEqual({k: list(v) for k, v in df.info.items()}, {'file': []})
class TestDataFrameIndexHelpers(unittest.TestCase):
def setUp(self) -> None:
# recs = prof_all.iloc[[0, 1000, 100000]].to_records()
# [{k: r[k] for k in prof_all.columns} for r in recs]
records = [
{
'file': 'aoml/13857/profiles/R13857_001.nc',
'date': pd.Timestamp('1997-07-29 20:03:00+0000', tz='UTC'),
'latitude': 0.267,
'longitude': -16.032,
'ocean': 'A',
'profiler_type': 845,
'institution': 'AO',
'date_update': pd.Timestamp('2018-10-11 18:05:20+0000', tz='UTC')
},
{
'file': 'aoml/15854/profiles/R15854_030.nc',
'date': pd.Timestamp('1998-07-01 02:22:54+0000', tz='UTC'),
'latitude': -5.997,
'longitude': -9.028,
'ocean': 'A',
'profiler_type': 845,
'institution': 'AO',
'date_update': | pd.Timestamp('2018-10-11 18:11:16+0000', tz='UTC') | pandas.Timestamp |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# from subprocess import check_output
# print(check_output(["ls", "../input"]).decode("utf8"))
from pylab import plot, show, hist, bar
import gc
issub = "N"
train_data = pd.read_csv("../input/train.tsv", sep="\t")
train_data['log_price'] = np.log(train_data.price + 1)
test_data = | pd.read_csv("../input/test.tsv", sep="\t") | pandas.read_csv |
# Conversion of Washington DC Taxi Trips (2017): https://www.kaggle.com/bvc5283/dc-taxi-trips
import argparse
import pandas as pd
import numpy as np
def convertData(inFile, outFile, startDate, endDate):
df = | pd.read_csv(inFile) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[35]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[36]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_data,y_train)
# In[37]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# ## Features Observations:
#
# 1. MGR_ID is the most important feature followed by RESOURCE and ROLE_DEPTNAME
# In[38]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'rf_raw.csv')
# 
# ## 1.5 Xgboost with Raw Feature
# In[39]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_data,y_train)
# In[40]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[41]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[42]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_data,y_train)
# In[43]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[44]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'xgb_raw.csv')
# 
# 
# In[45]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','Raw', 0.67224, 0.68148])
x.add_row(['SVM', 'Raw', 0.50286, 0.51390])
x.add_row(['Logistic Regression', 'Raw', 0.53857, 0.53034])
x.add_row(['Random Forest', 'Raw', 0.87269, 0.87567])
x.add_row(['Xgboost', 'Raw', 0.86988, 0.87909])
print(x)
# # Observations:
#
# 1. Xgboost perform best on the raw features
# 2. Random forest also perform good on raw features
# 3. Tree based models performs better than linear models for raw features
# ## Build model on one hot encoded features
# ### 2.1 KNN with one hot encoded features
# In[46]:
train_ohe = sparse.load_npz('data/train_ohe.npz')
test_ohe = sparse.load_npz('data/test_ohe.npz')
train_ohe.shape, test_ohe.shape, y_train.shape
# In[47]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=4)
best_model = clf.fit(train_ohe,y_train)
# In[48]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[49]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[50]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[51]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[52]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, "knn_ohe.csv")
# 
# ## 2.2 SVM with one hot encoded features
# In[53]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[54]:
best_c=best_model.best_params_['C']
best_c
# In[55]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[56]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[57]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_ohe,y_train)
# In[58]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'svm_ohe.csv')
# 
# ## 2.3 Logistic Regression with one hot encoded features
# In[59]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[60]:
best_c=best_model.best_params_['C']
best_c
# In[61]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[62]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[63]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_ohe,y_train)
# In[64]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'lr_ohe.csv')
# 
# ## 2.4 Random Forest with one hot encoded features
# In[65]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[66]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[67]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[68]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_ohe,y_train)
# In[69]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[70]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'rf_ohe.csv')
# 
# ## 2.5 Xgboost with one hot encoded features
# In[71]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_ohe,y_train)
# In[72]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[73]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[74]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[75]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[76]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'xgb_ohe.csv')
# 
# 
# In[77]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','ohe', 0.81657, 0.81723])
x.add_row(['SVM', 'ohe', 0.87249, 0.87955])
x.add_row(['Logistic Regression', 'ohe', 0.87436, 0.88167])
x.add_row(['Random Forest', 'ohe', 0.84541, 0.84997])
x.add_row(['Xgboost', 'ohe', 0.84717, 0.85102])
print(x)
# # Observations:
#
# 1. One hot encoding features performs better than other encoding technique
# 2. Linear models (Logistic Regression and SVM) performs better on higher dimension
# # 3 Build Model on frequency encoding feature
# ## 3.1 KNN with frequency encoding
# In[78]:
train_df_fc = pd.read_csv('data/train_df_fc.csv')
test_df_fc = pd.read_csv('data/test_df_fc.csv')
# In[79]:
train_df_fc.shape, test_df_fc.shape, y_train.shape
# In[80]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[81]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[82]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[83]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[84]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[85]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, "knn_fc.csv")
# 
# ## 3.2 SVM with frequency encoding
# In[86]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[87]:
best_c=best_model.best_params_['C']
best_c
# In[88]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[89]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[90]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_df_fc,y_train)
# In[91]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'svm_fc.csv')
# 
# ## 3.3 Logistic Regression with frequency encoding
# In[92]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[93]:
best_c=best_model.best_params_['C']
best_c
# In[94]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[95]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[96]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_df_fc,y_train)
# In[97]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'lr_fc.csv')
# 
# ## 3.4 Random Forest with frequency encoding
# In[98]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[99]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[100]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[101]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[103]:
features=train_df_fc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[106]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'rf_fc.csv')
# 
# ## 3.5 Xgboost with frequency encoding
# In[107]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_df_fc,y_train)
# In[108]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[109]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[110]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[111]:
features=train_df_fc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[112]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'xgb_fc.csv')
# 
# 
# In[113]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','fc', 0.79715, 0.79125])
x.add_row(['SVM', 'fc', 0.60085, 0.59550])
x.add_row(['Logistic Regression', 'fc', 0.59896, 0.59778])
x.add_row(['Random Forest', 'fc', 0.87299, 0.87616])
x.add_row(['Xgboost', 'fc', 0.86987, 0.86944])
print(x)
# # Observations:
#
# 1. Tree based models performs better for this feature than linear models
# 2. KNN is doing good for every feature
# # 4 Build Model using response encoding feature
# In[114]:
train_df_rc = pd.read_csv('data/train_df_rc.csv')
test_df_rc = pd.read_csv('data/test_df_rc.csv')
# In[115]:
train_df_rc.shape, test_df_rc.shape, y_train.shape
# ## 4.1 KNN with response encoding
# In[116]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[117]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[118]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[119]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[120]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[121]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, "knn_rc.csv")
# 
# ## 4.2 SVM with response encoding
# In[122]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[123]:
best_c=best_model.best_params_['C']
best_c
# In[124]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[125]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[126]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_df_rc,y_train)
# In[127]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'svm_rc.csv')
# 
# ## 4.3 Logistic Regression with response encoding
# In[128]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[129]:
best_c=best_model.best_params_['C']
best_c
# In[130]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[131]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[132]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_df_rc,y_train)
# In[133]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'lr_rc.csv')
# 
# ## 4.4 Random Forest with response encoding
# In[134]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[135]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[136]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[137]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[138]:
features=train_df_rc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[139]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'rf_rc.csv')
# 
# ## 4.5 Xgboost with response encoding
# In[140]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_df_rc,y_train)
# In[141]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[142]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[143]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[144]:
features=train_df_rc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[145]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'xgb_rc.csv')
# 
# 
# In[146]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','rc', 0.84352, 0.85351])
x.add_row(['SVM', 'rc', 0.85160, 0.86031])
x.add_row(['Logistic Regression', 'rc', 0.85322, 0.86180])
x.add_row(['Random Forest', 'rc', 0.83136, 0.83892])
x.add_row(['Xgboost', 'rc', 0.84135, 0.84190])
print(x)
# # Observations:
#
# 1. Every model performs good for this feature
# 2. Linear models performs better than Tree based models
# # 5 Build model on SVD feature
# In[147]:
train_svd = pd.read_csv('data/train_svd.csv')
test_svd = pd.read_csv('data/test_svd.csv')
# In[148]:
train_svd.shape, test_svd.shape, y_train.shape
# ## 5.1 KNN with SVD
# In[149]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[150]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[151]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[152]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[153]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_svd,y_train)
# In[154]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, "knn_svd.csv")
# 
# ## 5.2 SVM with SVD
# In[155]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[156]:
best_c=best_model.best_params_['C']
best_c
# In[157]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[158]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[159]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_svd,y_train)
# In[160]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, 'svm_svd.csv')
# 
# ## 5.3 Logistic Regression with SVD
# In[161]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[162]:
best_c=best_model.best_params_['C']
best_c
# In[163]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[164]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[165]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_svd,y_train)
# In[166]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, 'lr_svd.csv')
# 
# ## 5.4 Random Forest with SVD
# In[167]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[168]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[169]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[170]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_svd,y_train)
# In[171]:
features=train_svd.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[172]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, 'rf_svd.csv')
# 
# ## 5.5 Xgboost with SVD
# In[173]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_svd,y_train)
# In[174]:
results = | pd.DataFrame(best_model.cv_results_) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = | pd.timedelta_range("0 days", periods=10) | pandas.timedelta_range |
import glob
import importlib
import multiprocessing as mp
import numbers
import os
import pathlib
import sys
import warnings
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
from IPython import get_ipython
from IPython.display import display
from tsfel.utils.progress_bar import display_progress_bar, progress_bar_notebook
from tsfel.utils.signal_processing import merge_time_series, signal_window_splitter
def dataset_features_extractor(main_directory, feat_dict, verbose=1, **kwargs):
"""Extracts features from a dataset.
Parameters
----------
main_directory : String
Input directory
feat_dict : dict
Dictionary with features
verbose : int
Level of function communication
(0 or 1 (Default))
\**kwargs:
See below:
* *search_criteria* (``list``) --
List of file names to compute features. (Example: 'Accelerometer.txt')
(default: ``None``)
* *time_unit* (``float``) --
Time unit
(default: ``1e9``)
* *resampling_rate* (``int``) --
Resampling rate
(default: ``100``)
* *window_size* (``int``) --
Window size in number of samples
(default: ``100``)
* *overlap* (``float``) --
Overlap between 0 and 1
(default: ``0``)
* *pre_process* (``function``) --
Function with pre processing code
(default: ``None``)
* *output_directory* (``String``) --
Output directory
(default: ``'output_directory', str(Path.home()) + '/tsfel_output'``)
* *features_path* (``string``) --
Directory of script with personal features
* *header_names* (``list or array``) --
Names of each column window
Returns
-------
file
csv file with the extracted features
"""
search_criteria = kwargs.get('search_criteria', None)
time_unit = kwargs.get('time_unit', 1e9)
resample_rate = kwargs.get('resample_rate', 30)
window_size = kwargs.get('window_size', 100)
overlap = kwargs.get('overlap', 0)
pre_process = kwargs.get('pre_process', None)
output_directory = kwargs.get('output_directory', str(Path.home()) + '/tsfel_output')
features_path = kwargs.get('features_path', None)
names = kwargs.get('header_names', None)
# Choosing default of n_jobs by operating system
if sys.platform[:-2] == 'win':
n_jobs_default = None
else:
n_jobs_default = -1
# Choosing default of n_jobs by python interface
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell' or \
get_ipython().__class__.__name__ == 'Shell':
n_jobs_default = -1
n_jobs = kwargs.get('n_jobs', n_jobs_default)
if main_directory[-1] != os.sep:
main_directory = main_directory+os.sep
folders = [f for f in glob.glob(main_directory + "**/", recursive=True)]
for fl in folders:
sensor_data = {}
if search_criteria:
for c in search_criteria:
if os.path.isfile(fl + c):
key = c.split('.')[0]
sensor_data[key] = pd.read_csv(fl + c, header=None)
else:
all_files = np.concatenate((glob.glob(fl + '/*.txt'), glob.glob(fl + '/*.csv')))
for c in all_files:
key = c.split(os.sep)[-1].split('.')[0]
try:
data_file = pd.read_csv(c, header=None)
except pd.io.common.CParserError:
continue
if np.dtype('O') in np.array(data_file.dtypes):
continue
sensor_data[key] = | pd.read_csv(c, header=None) | pandas.read_csv |
import pandas as pd
def create_pivot_table(data):
# Diccionarios para nombres de columnas
def change_column_names(data_frame, relative=False):
cols_shape = data_frame.shape[1]
if relative:
names_changes = {i: "grupo{}".format(i+1+(10 - cols_shape)) for i in range(cols_shape)}
else:
names_changes = {i: "grupo{}".format(i + 1) for i in range(cols_shape)}
data_frame.rename(columns=names_changes, inplace=True)
return data_frame
group_column = data["grupo"].str.split(";", expand=True)
change_column_names(group_column)
if group_column.shape[1] < 10:
_rows = group_column.shape[0]
_cols = 10 - group_column.shape[1]
temp_df = pd.DataFrame([[None]*_cols]*_rows)
change_column_names(temp_df, True)
group_column = | pd.concat([group_column, temp_df], axis=1) | pandas.concat |
import pymatgen.core as mg
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from .atom_init import Atom_init
from bokeh.sampledata.periodic_table import elements
class Features:
def info(self):
print("featurename is " + self.featurename)
def get_VEC(self, elementstr):
elobj = mg.Element(elementstr)
VEC = 0
if elobj.is_lanthanoid:
VEC = 3
elif elobj.is_actinoid:
if elobj.name == "Np":
VEC = 5
elif elobj.name == "Pu":
VEC = 4
elif elobj.name == "Es":
VEC = 2
else:
VEC = 3
else:
group = elobj.group
if group > 12:
VEC = group - 10
else:
VEC = group
if group == 17:
VEC = 0
return VEC
def get_comp_dict(self, composition):
try:
return dict(mg.Composition(composition).fractional_composition.get_el_amt_dict())
except:
return {}
def get_atom_init(self):
atom_init = Atom_init.cgcnn_atom_init()
return atom_init
def get_ave_atom_init(self, composition):
compdict = self.get_comp_dict(composition)
atom_init = Atom_init.cgcnn_atom_init()
el_dict = dict(elements[["symbol","atomic number"]].values)
try:
if len(compdict) > 1:
tmp = 0
for el, frac in compdict.items():
tmp += frac*np.array(atom_init[el_dict[el]])#/len(compdict)
return np.array(tmp)
elif len(compdict) == 1:
tmp = atom_init[el_dict[list(compdict.keys())[0]]]
return np.array(tmp)
except:
#Elements with atomic number more than 100
return np.array([np.nan]*len(atom_init[1]))
def ave(self, composition, description):
try:
composition = self.get_comp_dict(composition)
if description == "group":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += mg.Element(el).group*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = mg.Element(list(composition.keys())[0]).group
return tmp
elif description == "row":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += mg.Element(el).row*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = mg.Element(list(composition.keys())[0]).row
return tmp
elif description == "block":
block_dict = dict(zip(["s","p","d","f"],[0]*4))
if len(composition) > 1:
for el, frac in composition.items():
block_dict[mg.Element(el).block] += frac / sum(composition.values())
return block_dict
elif len(composition) == 1:
block_dict[mg.Element(list(composition.keys())[0]).block] = 1
return block_dict
elif description == "VEC":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += self.get_VEC(el)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = self.get_VEC(list(composition.keys())[0])
return tmp
elif description == "First ionization energies":
if len(composition) > 1:
try:
tmp = 0
for el, frac in composition.items():
tmp += np.log10(mg.Element(el).data["Ionization energies"][0])*frac / sum(composition.values())
return tmp
except:
pass
elif len(composition) == 1:
tmp = np.log10(mg.Element(list(composition.keys())[0]).data["Ionization energies"][0])
return tmp
elif description == "Molar volume":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += np.log10(float(mg.Element(el).data[description].split("cm<sup>3</sup>")[0]))*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = np.log10(float(mg.Element(list(composition.keys())[0]).data[description].split("cm<sup>3</sup>")[0]))
return tmp
elif description == "Bulk modulus":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += float(mg.Element(el).data[description].split("(liquid)GPa")[0].split("GPa")[0])*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = float(mg.Element(list(composition.keys())[0]).data[description].split("(liquid)GPa")[0].split("GPa")[0])
return tmp
elif description == "Melting point":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += float(mg.Element(el).data[description].split("K")[0])*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = float(mg.Element(list(composition.keys())[0]).data[description].split("K")[0])
return tmp
elif description == "Boiling point":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += float(mg.Element(el).data[description].split("K")[0])*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
tmp = float(mg.Element(list(composition.keys())[0]).data[description].split("K")[0])
return tmp
else:
if len(composition) > 1:
try:
tmp = 0
for el, frac in composition.items():
tmp += mg.Element(el).data[description]*frac / sum(composition.values())
return tmp
except:
return tmp
elif len(composition) == 1:
return mg.Element(list(composition.keys())[0]).data[description]
except:
#print(composition, description)
return np.nan
def var(self, composition, description):
composition = self.get_comp_dict(composition=composition)
if description == "group":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((mg.Element(el).group-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "row":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((mg.Element(el).row-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "block":
block_dict = dict(zip(["s","p","d","f"],[0]*4))
ave_block = self.ave(composition,description)
if len(composition) > 1:
for el, frac in composition.items():
block_idx = dict(zip(["s","p","d","f"],[0]*4))
block_idx[mg.Element(el).block] = 1
for k, v in block_idx.items():
block_dict[k] += ((v - ave_block[k])**2)*frac / sum(composition.values())
return block_dict
elif len(composition) == 1:
return dict(zip(["s","p","d","f"],[0]*4))
elif description == "VEC":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((self.get_VEC(el)-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "First ionization energies":
if len(composition) > 1:
try:
tmp = 0
for el, frac in composition.items():
tmp += ((np.log10(mg.Element(el).data["Ionization energies"][0])-self.ave(composition, description))**2)*frac / sum(composition.values())
return tmp
except:
pass
elif len(composition) == 1:
return 0
elif description == "Molar volume":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp +=( (np.log10(float(mg.Element(el).data[description].split("cm<sup>3</sup>")[0]))-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "Bulk modulus":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((float(mg.Element(el).data[description].split("(liquid)GPa")[0].split("GPa")[0])-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "Melting point":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((float(mg.Element(el).data[description].split("K")[0])-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
elif description == "Boiling point":
if len(composition) > 1:
tmp = 0
for el, frac in composition.items():
tmp += ((float(mg.Element(el).data[description].split("K")[0])-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
elif len(composition) == 1:
return 0
else:
if len(composition) > 1:
try:
tmp = 0
for el, frac in composition.items():
tmp += ((mg.Element(el).data[description]-self.ave(composition,description))**2)*frac / sum(composition.values())
return tmp
except:
pass
elif len(composition) == 1:
return 0
def main_max1min1diff(self, composition, description):
composition = self.get_comp_dict(composition=composition)
if description == "group":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).group)
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "row":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).row)
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "block":
if len(composition) > 1:
block_max_dict = dict(zip(["s","p","d","f"],[0]*4))
block_min_dict = dict(zip(["s","p","d","f"],[1]*4))
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
block_idx = dict(zip(["s","p","d","f"],[0]*4))
block_idx[mg.Element(el).block] = 1
for k, v in block_idx.items():
if v >= block_max_dict[k]:
block_max_dict[k] = v
if v <= block_min_dict[k]:
block_min_dict[k] = v
return dict(zip(["s","p","d","f"],np.array(list(block_max_dict.values())) - np.array(list(block_min_dict.values()))))
elif len(composition) == 1:
return dict(zip(["s","p","d","f"],[0]*4))
elif description == "VEC":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(self.get_VEC(el))
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "First ionization energies":
if len(composition) > 1:
try:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = np.log10(float(mg.Element(el).data["Ionization energies"][0]))
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
except:
pass
elif len(composition) == 1:
return 0
elif description == "Molar volume":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = np.log10(float(mg.Element(el).data[description].split("cm<sup>3</sup>")[0]))
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "Boiling point":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).data[description].split("K")[0])
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "Bulk modulus":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).data[description].split("(liquid)GPa")[0].split("GPa")[0])
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
elif description == "Melting point":
if len(composition) > 1:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).data[description].split("K")[0])
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
elif len(composition) == 1:
return 0
else:
if len(composition) > 1:
try:
maxval = 0
minval = 1000000
for el in np.array(list(composition.keys()))[np.array(list(composition.values()))>=0.1]:
val = float(mg.Element(el).data[description])
if val >= maxval:
maxval = val
if val <= minval:
minval = val
return np.abs(maxval - minval)
except:
pass
elif len(composition) == 1:
return 0
def get_comp_desc(self,composition, func=["ave","var","main_max1min1diff"],desclist=["comp_length","compbase_length","Atomic no","group","row","Mendeleev no","Atomic mass","Atomic radius","X","VEC"]):
try:
compdict = self.get_comp_dict(composition=composition)
comp_length = len(compdict)
compbase_length = len(np.array(list(compdict.keys()))[np.array(list(compdict.values()))>=0.1])
response = {}
desc_tmp = desclist.copy()
if "comp_length" in desc_tmp:
response.update({"comp_length":comp_length})
desc_tmp.remove("comp_length")
if "compbase_length" in desc_tmp:
response.update({"compbase_length":compbase_length})
desc_tmp.remove("compbase_length")
for desc in desc_tmp:
if "ave" in func:
if "block" == desc:
blocks = self.ave(compdict,desc)
for k, v in blocks.items():
response.update({"ave:"+k: v})
else:
ave_tmp = self.ave(compdict,desc)
if ave_tmp != "no data":
response.update({"ave:"+desc: ave_tmp})
else:
response.update({"ave:"+desc: 0})
if "var" in func:
if "block" == desc:
blocks = self.var(compdict,desc)
for k, v in blocks.items():
response.update({"var:"+k: v})
else:
var_tmp = self.var(compdict,desc)
if var_tmp != "no data":
response.update({ "var:"+desc: var_tmp})
else:
response.update({"var:"+desc: 0})
if "main_max1min1diff" in func:
if "block" == desc:
blocks = self.main_max1min1diff(compdict,desc)
for k, v in blocks.items():
response.update({"main_max1min1diff:"+k: v})
else:
diff_tmp = self.main_max1min1diff(compdict,desc)
if diff_tmp != "no data":
response.update({ "main_max1min1diff:"+desc: diff_tmp})
else:
response.update({"main_max1min1diff:"+desc: 0})
return response
except:
response = {}
block_dict = dict(zip(["s","p","d","f"],[np.nan]*4))
desc_tmp = desclist.copy()
if "comp_length" in desc_tmp:
response.update({"comp_length":np.nan})
desc_tmp.remove("comp_length")
if "compbase_length" in desc_tmp:
response.update({"compbase_length":np.nan})
desc_tmp.remove("compbase_length")
for desc in desc_tmp:
if "ave" in func:
if "block" == desc:
blocks = block_dict
for k, v in blocks.items():
response.update({"ave:"+k: v})
else:
response.update({"ave:"+desc: np.nan})
if "var" in func:
if "block" == desc:
blocks = block_dict
for k, v in blocks.items():
response.update({"var:"+k: v})
else:
response.update({ "var:"+desc: np.nan})
if "main_max1min1diff" in func:
if "block" == desc:
blocks = block_dict
for k, v in blocks.items():
response.update({"main_max1min1diff:"+k: v})
else:
response.update({"main_max1min1diff:"+desc: np.nan})
return response
def get_comp_descfeatures(self, complist, func=["ave","var","main_max1min1diff"], desclist=["comp_length", "compbase_length", "Atomic no", "group", "row", "Mendeleev no", "Atomic mass", "Atomic radius", "X", "VEC"]):
features = []
for comp in complist:
tmp = {"composition": comp}
tmp.update(self.get_comp_desc(comp, func, desclist))
features.append(tmp)
df_feature = | pd.DataFrame(features) | pandas.DataFrame |
import unittest
from typing import List, Optional
import pandas as pd
from kbc_pul.data_structures.pandas_kb import PandasKnowledgeBaseWrapper
from kbc_pul.data_structures.rule_wrapper import RuleWrapper
from kbc_pul.rule_metrics.prediction_cache_rule_metrics.rule_ipw_and_ipw_pca_confidences_from_cached_predictions import \
calculate_rule_ipw_and_ipw_pca_confidences_from_df_cached_predictions
from kbc_pul.test.rule_wrapper_testing_utils import get_rule_wrapper_from_str_repr
from pylo.language.lp import (Clause as PyloClause, global_context as pylo_global_context)
class TestingRegularConfidenceMetrics(unittest.TestCase):
def setUp(self):
data: List[List[str]] = [
["'adam'", "livesin", "'paris'"],
["'adam'", "livesin", "'rome'"],
["'bob'", "livesin", "'zurich'"],
["'adam'", "wasbornin", "'paris'"],
["'carl'", "wasbornin", "'rome'"],
["'dennis'", "wasbornin", "'zurich'"] # added to have a different PCA conf in both directions
]
columns = ["Subject", "Rel", "Object"]
self.df: pd.DataFrame = | pd.DataFrame(data=data, columns=columns) | pandas.DataFrame |
from django.test import TestCase
from transform_layer.services.data_service import DataService, KEY_SERVICE, KEY_MEMBER, KEY_FAMILY
from transform_layer.calculations import CalculationDispatcher
from django.db import connections
import pandas
from pandas.testing import assert_frame_equal, assert_series_equal
import unittest
class HasDataTestCase(unittest.TestCase):
def test_has_data_empty_dataframe(self):
data = pandas.DataFrame()
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_nonempty_dataframe(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = | pandas.DataFrame(d1) | pandas.DataFrame |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
dataset.head(5)
# Check if there's any column which is not useful and remove it like the column id
dataset.drop('Id', axis = 1, inplace = True)
dataset.describe()
# check the statistical description
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
size = len(dataset.drop('Cover_Type', axis = 1).columns)
#x-axis has target attribute to distinguish between classes
x = dataset['Cover_Type']
y = dataset.drop('Cover_Type', axis = 1)
#y-axis shows values of an attribute
#Plot violin for all attributes
for i in range(0,size+1):
sns.violinplot(x = dataset[cols[i]])
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:,0:10]
data_corr = subset_train.corr()
plt.figure(figsize = (14,10))
sns.heatmap(data = data_corr, square = True, annot = True, cmap = 'coolwarm')
correlation = data_corr.unstack().sort_values(kind = 'quicksort')
corr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
import numpy as np
# Identify the unnecessary columns and remove it
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
r,c = dataset.shape
X = dataset.iloc[:,:-1]
Y = dataset.iloc[:,-1]
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.2, random_state=0)
#Standardized
scaler = StandardScaler()
#Apply transform only for non-categorical data
X_train_temp = scaler.fit_transform(X_train.iloc[:,:10])
X_test_temp = scaler.transform(X_test.iloc[:,:10])
#Concatenate non-categorical data and categorical
X_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,10:c-1]),axis=1)
X_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,10:c-1]),axis=1)
scaled_features_train_df = pd.DataFrame(X_train1, index=X_train.index, columns=X_train.columns)
scaled_features_test_df = | pd.DataFrame(X_test1, index=X_test.index, columns=X_test.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import classifier_utils
import feature_utils
# =============================================================================
# Available classifier keywords
# "GradientBoosting""
# "RandomForest"
# "NeuralNetwork
# "SVM"
# "ExtremeGradientBoosting"
# =============================================================================
CLASSIFIER = "RandomForest"
FOLD = 10
# =============================================================================
# Training through K-folds on train data
# =============================================================================
df = pd.read_csv('train.csv')
df.info()
df.describe()
feature_utils.feature_analysis(df) #Analyse Features
df = feature_utils.feature_engineering(df) #Extract and add features from existing ones
df = feature_utils.feature_selection(df) #Drop insignificant features
X = df.drop(['Survived'],axis=1) #Drop target variable
y = df['Survived'] #Define target variable
#Determine best performing model through K-Folds and varying classifier
classifier_utils.determine_model_performance(CLASSIFIER,X,y,FOLD) #Submit selected classifier and data for K-fold performance analysis
model = classifier_utils.get_model(CLASSIFIER, X,y) #train selected and parameter-tuned classifier on whole dataset
# =============================================================================
# Predictions on test data
# =============================================================================
test_df = pd.read_csv('test.csv')
test_df = feature_utils.feature_engineering(test_df) #Extract and add features from existing ones
test_df = feature_utils.feature_selection(test_df) #Drop insignificant features
test_df = feature_utils.data_model_preparation(test_df) #Fare+Price: Fill Na with mean values and use standard scale
y_pred = classifier_utils.get_prediction(model,len(test_df),test_df) #Get predictions on test data
submission_result = pd.read_csv('submission_template.csv') #Create dataframe from submission template
submission_result['Survived'] = | pd.Series(y_pred,dtype="int32") | pandas.Series |
"""Read in hourly weather file."""
import os
import glob
import yaml
from datetime import datetime
from dateutil import tz
import numpy as np
import pandas as pd
import xarray as xr
from timezonefinder import TimezoneFinder
from ideotype import DATA_PATH
from ideotype.utils import CC_RH, CC_VPD
from ideotype.data_process import read_data
from ideotype.nass_process import nass_summarize
def read_wea(year_start, year_end, climate_treatment=None):
"""
Read in raw hourly weather data.
- Data source: NOAA Integrated Surface Hourly Database
- Link: https://www.ncdc.noaa.gov/isd
- Weather data: temperature, RH, precipitation
- Raw data stored: ~/data/ISH/
- Output csv files stored: ~/upscale/weadata/process/
* note:
For years 1991-2010, only select data from class 1
(refer to NSRDB manual p.7-8 for more details)
- class 1: have complete period of record of 1991-2010.
- class 2: have complete period of record but with
significant periods of interpolated, filler,
or otherwise low-quality input data for solar models.
- class 3: have have some gaps in the period of record
but have at least 3 years of data.
Parameters
----------
year_start : int
year_end : int
climate_treatment : int
Create weather data for future climate projections.
2050 or 2100.
"""
# setting up np.read_fwf arguments
colnames = ['time',
'temp', 'temp_quality',
'dew_temp', 'dtemp_quality',
'precip', 'precip_time',
'precip_depth', 'precip_quality',
'precip_perhr', 'rh']
colspecs = [(15, 25), # time
(87, 92), # temp
(92, 93), # temp_quality
(93, 98), # dew_temp
(98, 99), # dtemp_quality
(105, 8193)] # precip string
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in info on conversion between WBAN & USAF id numbering system
fpath_id_conversion = os.path.join(
DATA_PATH, 'sites', dict_fpaths['id_conversion'])
df_stations = pd.read_csv(fpath_id_conversion, header=None, dtype=str)
df_stations.columns = ['WBAN', 'USAF']
# Read in stations info
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_sites = pd.read_csv(fpath_stations_info)
# Set up basepath
if climate_treatment is None:
basepath = dict_fpaths['basepath']
else:
basepath = f'{dict_fpaths["basepath"]}_f{climate_treatment}'
# Set up years
if year_start == year_end:
years = [year_start]
else:
years = np.arange(year_start, year_end+1)
# Set up date parser for pandas
dateparser = lambda dates: [datetime.strptime(d, '%Y%m%d%H') for d in dates] # noqa
# Loop through years to read in data
for year in years:
print(year) # track progress
# Check first if file exists already
if os.path.isfile(os.path.join(basepath, f'temp_{year}.csv')):
raise ValueError(f'temp_{year}.csv exists!')
# Set up default timeline
season_start = '02-01-'
season_end = '11-30-'
times = pd.date_range(f'{season_start + str(year)}',
f'{season_end + str(year)} 23:00:00',
freq='1H')
arr_temp_sites = np.zeros(shape=(len(times),))
arr_rh_sites = np.zeros(shape=(len(times),))
arr_precip_sites = np.zeros(shape=(len(times),))
# initiate empty list to store all site ids (USAF)
siteid_all = []
# For years 1961-1990
if year < 1991:
fnames = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH', str(year), '*'))
# For years 1991-2010
else:
# Select class1 weather station sites
sites = df_sites.query(
'CLASS == 1').reset_index().USAF.astype('str')
# Select sites within specified year that are class1
sites_year = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH', str(year), '*'))
sites_year = pd.Series([
site.split('/')[-1].split('-')[0] for site in sites_year])
sites_year = sites_year[
sites_year.isin(sites)].reset_index(drop=True)
# Drop duplicates in sites_year
sites_year.drop_duplicates(keep='first', inplace=True)
fnames = []
for site in sites_year:
fname = glob.glob(os.path.join(os.path.expanduser('~'),
'data', 'ISH',
str(year),
f'{site}-*'))
if len(fname) == 1:
fnames.append(fname[0])
else:
print(f'choose from files: {fname}')
fname = glob.glob(os.path.join(os.path.expanduser('~'),
'data', 'ISH',
str(year),
f'{site}-99999-*'))
fnames.append(fname[0])
for name in fnames:
# site_id
siteid_usaf = name.split('/')[-1].split('-')[0]
siteid_wban = name.split('/')[-1].split('-')[1]
if siteid_usaf == '999999':
siteid_usaf = df_stations.query(
f'WBAN == "{siteid_wban}"').USAF.item()
siteid_all.append(siteid_usaf)
# Read in fixed width weather data
df = pd.read_fwf(name,
names=colnames,
colspecs=colspecs,
header=None,
index_col='time',
encoding='latin_1',
dtype={'temp': int, 'precip': str},
parse_dates=['time'],
date_parser=dateparser)
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
# *** note: can't just use df.index.drop_duplicates() since
# * that only returns a list of the non-duplicated index
# * but you can't just use that to select non-duplicated rows
# * since it will also pick up the duplicated rows
df = df[~df.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df = df.reindex(times, fill_value=np.nan)
# Find precip data
df.precip_time = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(0, 2).astype(float)
df.precip_depth = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(2, 6).astype(float)
df.precip_quality = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(7, 8)
# Filter out weather data based on quality code (data manual p.26)
# Masking unqualified data with NANs:
# code 3 (Erroneous) &
# code 7 (Erroneous, data originated from an NCEI data source)
# *** temp
quality_temp = (
df.temp_quality == '3') | (df.temp_quality == '7')
rows_temp = df[quality_temp].index
df.loc[rows_temp, 'temp'] = np.nan
# *** dew temp
quality_dtemp = (
df.dtemp_quality == '3') | (df.dtemp_quality == '7')
rows_dtemp = df[quality_dtemp].index
df.loc[rows_dtemp, 'dew_temp'] = np.nan
# *** precip
quality_precip = (
df.precip_quality == '3') | (df.precip_quality == '7')
rows_precip = df[quality_precip].index
df.loc[rows_precip, 'precip'] = np.nan
# Replace missing data with NaN
df.temp = df.temp.replace({9999: np.nan})
df.dew_temp = df.dew_temp.replace({9999: np.nan})
df.precip_time = df.precip_time.replace({99: np.nan})
df.precip_depth = df.precip_depth.replace({9999: np.nan})
# Calculate hourly precip depth
df.precip_perhr = df.precip_depth/df.precip_time
# Account for cases where precip_hr = 0
# which produces infinite precip_perhr
df.precip_perhr = df.precip_perhr.replace({np.inf: np.nan})
# Unit conversion
df.temp = np.round(df.temp/10, 2)
df.dew_temp = np.round(df.dew_temp/10, 2)
df.precip_perhr = np.round(df.precip_perhr/10, 1)
# Apply climate treatment if requested
if climate_treatment is not None:
try:
lat = df_sites.query(
f'USAF == {siteid_usaf}')['ISH_LAT (dd)'].item()
lon = df_sites.query(
f'USAF == {siteid_usaf}')['ISH_LON(dd)'].item()
months = list(df.index.month)
scales_temp = scale_climate(lat, lon, months, 'T')
scales_rh = scale_climate(lat, lon, months, 'RH')
# Calculate temperature anomalies based on scaling pattern
# based on CMIP6 SSP3-7.0 scenario
if climate_treatment == 2050:
temp_anomaly = 1.4
precip_anomaly = 0.85 # 15% reduction
elif climate_treatment == 2100:
temp_anomaly = 3.1
precip_anomaly = 0.7 # 30% reduction
# Fetch temperature anomalies
temp_anomalies = [
scale * temp_anomaly for scale in scales_temp]
# Apply temperature anomalies
temp_presentday = df.temp
temp_future = np.round(temp_presentday + temp_anomalies, 2)
df.temp = temp_future
except(ValueError):
print(year, name.split('/')[-1])
# calculate RH through Clausius Clapeyron
df.rh = CC_RH(df.temp, df.dew_temp)
if df[df.rh > 100].rh.sum() > 100:
print('rh > 100: ', year, name)
# fetch RH anomalies
rh_anomalies = [
scale * temp_anomaly for scale in scales_rh]
# apply RH anomalies
rh_presentday = df.rh
rh_future = np.round(rh_presentday + rh_anomalies, 2)
df.rh = rh_future
# apply precip anomalies
precip_presentday = df.precip_perhr
precip_future = np.round(precip_presentday * precip_anomaly, 2)
df.precip_perhr = precip_future
# stack site data
arr_temp_sites = np.vstack([arr_temp_sites, df.temp])
arr_rh_sites = np.vstack([arr_rh_sites, df.rh])
arr_precip_sites = np.vstack([arr_precip_sites, df.precip_perhr])
# Convert all data for single year into pd.DataFrame
df_temp_sites = pd.DataFrame(arr_temp_sites.transpose(), index=times)
df_temp_sites.drop(df_temp_sites.columns[0], axis=1, inplace=True)
df_temp_sites.columns = siteid_all
df_temp_sites.sort_index(axis=1, inplace=True)
df_rh_sites = pd.DataFrame(arr_rh_sites.transpose(), index=times)
df_rh_sites.drop(df_rh_sites.columns[0], axis=1, inplace=True)
df_rh_sites.columns = siteid_all
df_rh_sites.sort_index(axis=1, inplace=True)
df_precip_sites = pd.DataFrame(
arr_precip_sites.transpose(), index=times)
df_precip_sites.drop(df_precip_sites.columns[0], axis=1, inplace=True)
df_precip_sites.columns = siteid_all
df_precip_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_temp_sites.to_csv(os.path.join(basepath, f'temp_{year}.csv'))
df_rh_sites.to_csv(os.path.join(basepath, f'rh_{year}.csv'))
df_precip_sites.to_csv(os.path.join(basepath, f'precip_{year}.csv'))
def read_solrad(year_start, year_end):
"""
Read in raw hourly solar radiation data.
- Data source: NSRDB
- Source: https://nsrdb.nrel.gov/about/u-s-data.html
- METSTAT Glo (Wh/m2):
Total amount of direct and diffuse solar radiation (METSTAT-modeled)
received on a horizontal surface during the 60-minute period
ending at the timestamp (refer to NSRDB data manla p.15 Table 3)
- Raw data stored: ~/data/ISH_NSRD/
- Output csv files stored: ~/upscale/weadata/process/
* note:
For years 1991-2010, only select data from class 1
(refer to NSRDB manual p.7-8 for more details)
- class 1: have complete period of record of 1991-2010.
- class 2: have complete period of record but with
significant periods of interpolated, filler,
or otherwise low-quality input data for solar models.
- class 3: have have some gaps in the period of record
but have at least 3 years of data.
Parameters
----------
year_start : int
year_end : int
"""
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Set up basepath
basepath = dict_fpaths['basepath']
# Read in info on conversion between WBAN & USAF id numbering system
fpath_id_conversion = os.path.join(
DATA_PATH, 'sites', dict_fpaths['id_conversion'])
df_stations = pd.read_csv(fpath_id_conversion, header=None, dtype=str)
df_stations.columns = ['WBAN', 'USAF']
stations_usaf = df_stations.USAF
# Set up years
if year_start == year_end:
years = [year_start]
else:
years = np.arange(year_start, year_end+1)
# Dataframe setup for years 1961-1990
colnames = ['year', 'month', 'day', 'hour', 'solrad']
colspecs = [(1, 3), (4, 6), (7, 9), (10, 12), (23, 27)]
# Loop through years to read in data
for year in years:
print(year) # track progress
# Check first if file exists already
if os.path.isfile(os.path.join(basepath, f'solrad_{year}.csv')):
raise ValueError(f'solrad_{year}.csv exists!')
# Set up default timeline
season_start = '02-01-'
season_end = '11-30-'
datetimes_season = pd.date_range(
f'{season_start + str(year)}',
f'{season_end + str(year)} 23:00:00', freq='1H')
# Initiate empty array to store data
arr_solrad_sites = np.zeros(shape=len(datetimes_season),)
# initiate empty list to store all site ids (USAF)
siteid_all = []
# For years 1961-1990
if year < 1991:
# Fetch all file names within year
fnames = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH_NSRD', str(year), '*'))
for name in fnames:
siteid_wban = name.split('/')[-1].split('_')[0]
siteid_usaf = df_stations.query(
f'WBAN == "{siteid_wban}"').USAF.item()
siteid_all.append(siteid_usaf)
# Read in fixed-width data
df = pd.read_fwf(name,
skiprows=[0],
header=None,
names=colnames,
colspecs=colspecs)
# Structure date-time info
datetimes = df.apply(lambda row: datetime(
year, row['month'], row['day'], row['hour']-1), axis=1)
# Fetch solrad - Global Horizontal Radiation (Wh/m2)
df_solrad = pd.DataFrame(df.solrad)
df_solrad.index = datetimes
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
df_solrad = df_solrad[
~df_solrad.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df_solrad = df_solrad.reindex(datetimes_season,
fill_value=np.nan)
# Replace missing data with NaN
df_solrad.replace({9999: np.nan}, inplace=True)
arr_solrad_sites = np.vstack(
[arr_solrad_sites, df_solrad.solrad])
# Convert all data for single year into pd.DataFrame
df_solrad_sites = pd.DataFrame(
arr_solrad_sites.transpose(), index=datetimes_season)
df_solrad_sites.drop(
df_solrad_sites.columns[0], axis=1, inplace=True)
df_solrad_sites.columns = siteid_all
df_solrad_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_solrad_sites.to_csv(
os.path.join(basepath, f'solrad_{year}.csv'))
# For years 1991-2010:
else:
for station in stations_usaf:
# Search for specified year-site data
fname = glob.glob(os.path.join(
os.path.expanduser('~'),
'data', 'ISH_NSRD', str(year), f'{station}_*.csv'))
if len(fname) == 1:
# Read in file
df = pd.read_csv(fname[0])
siteid_all.append(station)
else:
print('multiple files!', fname)
# Format date-time info
dates = df['YYYY-MM-DD']
hours = df['HH:MM (LST)']
hours = [int(hour.split(':')[0])-1 for hour in hours]
datetimes = [datetime.strptime(
dates[item] + '-' + str(hours[item]),
'%Y-%m-%d-%H') for item in np.arange(df.shape[0])]
# Fetch solrad - Global Horizontal Radiation (Wh/m2)
df_solrad = pd.DataFrame(df['METSTAT Glo (Wh/m^2)'])
df_solrad.columns = ['solrad']
df_solrad.index = datetimes
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
df_solrad = df_solrad[
~df_solrad.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df_solrad = df_solrad.reindex(datetimes_season,
fill_value=np.nan)
# Replace missing data with NaN
df_solrad.replace({9999: np.nan}, inplace=True)
# Stacking all data as arrays to make sure
# all dimensions are correct
arr_solrad_sites = np.vstack(
[arr_solrad_sites, df_solrad.solrad])
# Convert all data for single year into pd.DataFrame
df_solrad_sites = pd.DataFrame(
arr_solrad_sites.transpose(), index=datetimes_season)
df_solrad_sites.drop(
df_solrad_sites.columns[0], axis=1, inplace=True)
df_solrad_sites.columns = siteid_all
df_solrad_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_solrad_sites.to_csv(
os.path.join(basepath, f'solrad_{year}.csv'))
def wea_combine(basepath):
"""
Combine weather data for all years.
Parameters
----------
basepath : str
path where all weather data csv files are stored.
"""
# Set up loop iterables
csv_files = ['temp_*.csv', 'rh_*.csv', 'precip_*.csv', 'solrad_*.csv']
csv_names = ['temp_all.csv', 'rh_all.csv',
'precip_all.csv', 'solrad_all.csv']
for csvs, csv_name in zip(csv_files, csv_names):
print(csv_name)
# Check if compiled csv file exists already
if os.path.isfile(os.path.join(basepath, csv_name)):
print(f'{csv_name} exists already!')
# Combine data for all years
else:
fnames = glob.glob(os.path.join(basepath, csvs))
# Read in and concat data from all years
df_all = pd.concat(
[pd.read_csv(name, index_col=0) for name in fnames])
# Order df by column so sites are ascending
df_all.sort_index(axis=1, inplace=True)
# Order df by index so time is ordered
# * note: glob.glob doesn't always grab filenames in
# the order you might think so better to order
# in this case, solrad was not ordered by year
df_all.sort_index(axis=0, inplace=True)
# Output concatenated and sorted dataframe
df_all.to_csv(os.path.join(basepath, csv_name))
def wea_preprocess(basepath):
"""
Process weather data.
Parameters
----------
basepath: str
path to access weather data
Returns
-------
df_temp
df_rh
df_precip
df_solrad
"""
# Read in processed weather data
df_temp = pd.read_csv(
os.path.join(basepath, 'temp_all.csv'),
index_col=0, parse_dates=True)
df_rh = pd.read_csv(
os.path.join(basepath, 'rh_all.csv'),
index_col=0, parse_dates=True)
df_precip = pd.read_csv(
os.path.join(basepath, 'precip_all.csv'),
index_col=0, parse_dates=True)
df_solrad = pd.read_csv(
os.path.join(basepath, 'solrad_all.csv'),
index_col=0, parse_dates=True)
# Identify overlapping stations (columns) between
# temp/rh/precip dataset & solrad dataset
cols1 = df_temp.columns
cols2 = df_solrad.columns
sites = list(cols1.intersection(cols2))
# Filter for overlapping sites only
df_temp = df_temp.loc[:, sites]
df_rh = df_rh.loc[:, sites]
df_precip = df_precip.loc[:, sites]
df_solrad = df_solrad.loc[:, sites]
return(df_temp, df_rh, df_precip, df_solrad)
def wea_siteyears(df_temp, df_rh, df_precip, df_solrad,
gseason_start, gseason_end, crthr):
"""
Identify valid site-years that satisfy critical hours for gap-flling.
Parameters
----------
df_temp : pd.DataFrame
df_rh : pd.dataFrame
df_precip : pd.DataFrame
df_solrad : pd.DataFrame
gseason_start : int
Start of growing season (month)
gseason_end : int
End of growing season (month)
crthr : int
critical hours for gap-filling
Returns
-------
siteyears : list
"""
# Identify site-years that satisfy critical hours for gap-filling
dfs = [df_temp, df_rh, df_precip, df_solrad]
final_list = []
years = np.arange(1961, 2011)
sites = list(df_temp.columns)
for df in dfs:
siteyears_all = list()
for year in years:
# Filter out specific year
df_year = df[(df.index.year == year) &
(df.index.month >= gseason_start) &
(df.index.month <= gseason_end)]
siteyears = list()
for site in sites:
# Filter out specific site-year
df_siteyear = pd.DataFrame(df_year.loc[:, site])
# 4000: ~55% of the number of rows
# Used as a threshold to toss out site-years
# that have too many gaps to fill
# even if they satisfy the critical hours.
# This is set since I noticed some sites have data
# recorded every 3 hrs.
# Valide data collection method, but I wanted to avoid
# having to gap-fill throuhout that time period,
# especially for precipitation.
lim = 4000
# Only continue processing if have less than ~55% of NaNs
if int(df_siteyear.isna().sum()) < lim:
# Identify whether data entry is NaN
# df.notnull() returns TRUE or FALSE,
# astype(int) turns TRUE into 1, and FALSE into 0
df_siteyear['present'] = df_siteyear.notnull().astype(int)
# Calculate cumulative sum based on whether data is
# Nan value (1) or not (0)
# If there are consecutive missing data,
# the cumulative sum for those two rows will be the same,
# and can further be used for grouping purposes
# to count the number of consecutive missing rows
# within each streak of missing data.
df_siteyear['csum'] = df_siteyear.present.cumsum()
# Select individual timesteps that have missing data
df_siteyear = df_siteyear[
df_siteyear.loc[:, site].isnull()]
# Count the number of consecutive NaNs
nans_list = df_siteyear.groupby('csum')['csum'].count()
# Only record site-years that have fewer consecutive NaNs
# than the critical value set
if nans_list[nans_list > crthr].shape[0] == 0:
use_siteyear = str(year) + '_' + str(site)
siteyears.append(use_siteyear)
siteyears_all.extend(siteyears)
final_list.append(siteyears_all)
# Assign site-years
siteyears_temp = final_list[0]
siteyears_rh = final_list[1]
siteyears_precip = final_list[2]
siteyears_solrad = final_list[3]
# Identify all overlapping site-years
siteyears = list(
set(siteyears_temp) &
set(siteyears_rh) &
set(siteyears_precip) &
set(siteyears_solrad))
return(siteyears)
def wea_filter(siteyears, area_threshold, irri_threshold, yearspersite):
"""
Filter valid site-years based on location, area & irri.
- Location: limit to continental US (boundaries -123, -72, 19, 53)
- Planting area
- Irrigation area
- Estimated pdate
Parameters
----------
siteyears : list
Output of site-years from wea_preprocess()
area: int
Planting area threshold.
irri: int
Percent land irrigated.
yearspersite : int
Minimum number of years of data for each site.
"""
# Identify total number of unique sites within valid site-years
sites = list(set([siteyear.split('_')[1] for siteyear in siteyears]))
sites.sort()
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in stations info
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_stations = pd.read_csv(fpath_stations_info, dtype={'USAF': str})
# Summarize nass data to fetch planting area & percent irrigated info
df_nass = nass_summarize(1961, 2005)
# Continental US site boundaries
lat_min = 19
lat_max = 53
lon_min = -123
lon_max = -72
# Initiate empty list
areas = []
perct_irris = []
sites_inbound = []
sites_outbound = []
for site in sites:
# Fetch site lat/lon info
lat = df_stations.query(f'USAF == "{site}"')['ISH_LAT (dd)'].item()
lon = df_stations.query(f'USAF == "{site}"')['ISH_LON(dd)'].item()
# Only include sites within continental US boundaries
if (lat_min <= lat <= lat_max) & (lon_min <= lon <= lon_max):
# Append sites within bound
sites_inbound.append(site)
# Calculate distance between site & all nass sites
dist = list(enumerate(
np.sqrt((lat - df_nass.lat)**2 + (lon - (df_nass.lon))**2)))
df_dist = | pd.DataFrame(dist, columns=['rownum', 'distance']) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import corner
import numpy as np
import pandas as pd
import emcee
import sys
import mm_likelihood
from astropy.time import Time
import commentjson as json
import mm_param
import mm_make_geo_pos
from tqdm import tqdm
class ReadJson(object):
def __init__(self, filename):
print('Read the runprops.txt file')
self.data = json.load(open(filename))
def outProps(self):
return self.data
#chain = (nwalkers, nlink, ndim)
def predictions(sampler, fit_scale, float_names, obsdf, runprops, geo_obj_pos, fixed_df, total_df_names):
numdraws = 20
# Getting log likelihood posterior values and flatchain for use throughout
burnin = int(runprops.get('nburnin'))
clusterburn = int(runprops.get('clustering_burnin'))
thin_plots = int(runprops.get('nthinning'))
flatchain = sampler.get_chain(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
print(flatchain.shape, 'shape')
llhoods = sampler.get_log_prob(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
#ind = np.argmax(llhoods)
#params = flatchain[ind,:].flatten()
# Getting parameter names
names = []
for i in float_names:
names.append(i)
names_dict = runprops.get("names_dict")
# Choose random draws from the flatchain
drawsindex = np.random.randint(flatchain.shape[0], size = numdraws)
draws = flatchain[drawsindex,:]
# Get time arrays
converttimes = ["2022-10-01","2023-09-30"]
t = Time(converttimes)
timesdic = {'start': t.isot[0], 'stop': t.isot[1], 'step': '1d'}
# Make a geocentric position file
geo_obj_pos = mm_make_geo_pos.mm_make_geo_pos(objname, timesdic, runprops, True)
# Creating a fake observtions data frame
times = geo_obj_pos.values[:,0].flatten()
fakeobsdf = obsdf.loc[[0,1],:]
for i in range(len(times)):
if i == 0 or i == 1:
fakeobsdf.iloc[i,0] = times[i]
fakeobsdf = fakeobsdf.append(fakeobsdf.iloc[-1,:])
fakeobsdf['time'].iloc[-1] = times[i]
fakeobsdf = fakeobsdf.iloc[2:]
# Creating arrays to hold outputs
dlong = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
dlat = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
# Holding paramvalues
nobj = runprops.get('numobjects')
print(mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0])
ndims = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].iloc[:,:-nobj].size
print(ndims)
paramnames = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].columns.tolist()[0:-nobj]
print(paramnames)
drawparams = np.zeros((ndims, numdraws))
# Looping to get model values
print('draws',draws)
for i in tqdm(range(draws.shape[0])):
paramdf = mm_param.from_fit_array_to_param_df(draws[i,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0]
drawparams[:,i] = paramdf.iloc[:,:-nobj].values
print(paramdf)
DeltaLong_Model, DeltaLat_Model, fakeobsdf = mm_likelihood.mm_chisquare(paramdf, fakeobsdf, runprops, geo_obj_pos, gensynth = True)
print(DeltaLong_Model)
for j in range(1,runprops.get('numobjects')):
dlong[i,j-1,:] = DeltaLong_Model[j-1]
dlat[i,j-1,:] = DeltaLat_Model[j-1]
# Now collapse the arrays with a std call
dlongstd = np.std(dlong,axis = 0)
dlatstd = np.std(dlat,axis = 0)
dlongmean = np.mean(dlong,axis = 0)
dlatmean = np.mean(dlat,axis = 0)
print(dlongstd.shape)
print(dlatstd.shape)
totaldf = pd.DataFrame(drawparams.T, columns = paramnames)
#print(totaldf)
# Calculate average (mean for now) error in the real data
name_dict = runprops.get("names_dict")
objectnames = []
for i in name_dict.values():
objectnames.append(i)
typicalerror = np.zeros((2,runprops.get('numobjects')-1))
for i in range(1,runprops.get('numobjects')):
typicalerror[0,i-1] = np.median(obsdf["DeltaLong_" + objectnames[i] + "_err"].values.flatten())
typicalerror[1,i-1] = np.median(obsdf["DeltaLat_" + objectnames[i] + "_err"].values.flatten())
# Now create info gain arrays
infogain = np.zeros((runprops.get('numobjects')-1, times.size))
infogain2 = np.zeros((runprops.get('numobjects')-1, times.size))
print(dlongstd[0,:], typicalerror, np.sqrt(dlongstd[0,:]/typicalerror[0,0])**2)
for i in range(1,runprops.get('numobjects')):
infogain[i-1,:] = np.sqrt( (dlongstd[i-1,:]/typicalerror[0,i-1])**2 + (dlatstd[i-1,:]/typicalerror[1,i-1])**2 )
# Plot
colorcycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3','#999999', '#e41a1c', '#dede00']
fig = plt.figure(figsize = (12.8,4.8))
t = Time(times, format = "jd")
for i in range(1,runprops.get('numobjects')):
plt.plot_date(t.plot_date, infogain[i-1,:].flatten(), "-", color = colorcycle[i-1], label = objectnames[i], alpha = 0.5)
plt.xlabel("Time")
plt.ylabel("Info gained")
plt.legend()
plt.savefig("predictions.pdf", format = "pdf")
plt.close()
# Plot dlong vs dlat with color for j2
from matplotlib.backends.backend_pdf import PdfPages
predictionspdf = PdfPages("predictions_params.pdf")
for i in range(len(paramnames)):
plt.figure()
plt.scatter(0,0, color = "black")
plt.scatter(dlong[:,0,15], dlat[:,0,15], c = totaldf[paramnames[i]], edgecolor = None, alpha = 0.5, s = 10, cmap = "coolwarm")
plt.errorbar(np.median(dlong[:,0,15]), np.median(dlat[:,0,15]), xerr = typicalerror[0,0], yerr = typicalerror[1,0], ecolor = "red")
plt.xlabel("dLon")
plt.ylabel("dLat")
plt.title(paramnames[i])
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
predictionspdf.savefig()
predictionspdf.close()
#Actually build the plots here
#====================================================================================================
import glob, os
if 'results' in os.getcwd():
getData = ReadJson('runprops.txt')
else:
getData = ReadJson('most_recent_runprops.txt')
runprops = getData.outProps()
objname = runprops.get("objectname")
if not 'results' in os.getcwd():
os.chdir('../../../results/'+objname+'/')
results = max(glob.glob(os.path.join(os.getcwd(), '*/')), key=os.path.getmtime)
os.chdir(results)
backend = emcee.backends.HDFBackend('chain.h5')
fit_scale = pd.read_csv('fit_scale.csv',index_col=0)
float_names = runprops.get('float_names')
obsdf = | pd.read_csv(objname+'_obs_df.csv',index_col=0) | pandas.read_csv |
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
pd.DataFrame(q1)
qf=q1[0]
u= sort_data[(sort_data['Class']=='C')][['perc_usage']]
w=u.sum()
s1=w*100
pd.DataFrame(s1)
sf=s1[0]
test=p1,q1,s1
tt2 = np.array([])
tte2 = np.append(tt2,test)
#############values//Cost*Annual Usage
sort_data['Value'] = sort_data['Cost'] * sort_data['Annual Usage']
fz= sort_data[(sort_data['Class']=='A')][['Value']]
vz=fz.sum()
pd.DataFrame(vz)
vzz=vz[0]
fz1= sort_data[(sort_data['Class']=='B')][['Value']]
uz=fz1.sum()
pd.DataFrame(uz)
uzf=uz[0]
fz2= sort_data[(sort_data['Class']=='C')][['Value']]
vzf=fz2.sum()
pd.DataFrame(vzf)
kzf=vzf[0]
h=[{'Scenario':'A','Values':vzz,'product number':vif,'perc_usage':nf,'perc_cost ':kf},
{'Scenario':'B','Values':uzf,'product number':uif,'perc_usage':qf,'perc_cost ':mf},
{'Scenario':'C','Values':kzf,'product number':kif,'perc_usage':sf,'perc_cost ':pf}]
df = pd.DataFrame(h)
lo=sort_data[['Product Description','Product number','Cost','Annual Usage','Class']]
cur = conn.cursor()
cur.execute("SELECT * FROM `abc1`")
all_data4 = cur.fetchall()
all_data4 = pd.DataFrame(all_data4)
lolz=all_data4[['Product number','Product Description','Cost','Annual Usage','Average Stay','Average Consumption','Criticality']]
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
curr = conn.cursor()
curr.execute("SELECT * FROM `fsn`")
all_data1 = curr.fetchall()
all_data1 = pd.DataFrame(all_data1)
F_cat=.2
S_cat=.5
tot_stay=all_data1['Average Stay'].sum()
tot_consupt=all_data1['Average Consumption'].sum()
all_data1['perc_stay']=all_data1['Average Stay']/tot_stay
all_data1['perc_cons']=all_data1['Average Consumption']/tot_consupt
all_data1.sort_values(by=['perc_stay'], inplace=True, ascending=True)
sort_data1=all_data1.reset_index()
sort_data1['cum_stay']=np.nan
sort_data1['cum_cons']=np.nan
sort_data1['Class']=''
for i in range(len(sort_data1)):
if(i==0):
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i])
sort_data1.set_value(i,'Class','F')
else:
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i]+sort_data1['cum_stay'][i-1])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i]+sort_data1['cum_cons'][i-1])
if(sort_data1['cum_stay'][i]<=F_cat) :
sort_data1.set_value(i,'Class','F')
elif(sort_data1['cum_stay'][i]<=S_cat):
sort_data1.set_value(i,'Class','S')
else:
sort_data1.set_value(i,'Class','N')
x71=sort_data1[['cum_stay']]
x11=x71*100
x31=np.round(x11)
x21=np.array([])
x51 = np.append(x21,x31)
y71= sort_data1[['cum_cons']]
y11=y71*100
y31=np.round(y11)
y21=np.array([])
y51 = np.append(y21,y31)
###############% of Total cost//
a1= sort_data1[(sort_data1['Class']=='F')][['perc_stay']]
j1=a1.sum()
k1=j1*100
pd.DataFrame(k1)
kf1=k1[0]
b1= sort_data1[(sort_data1['Class']=='S')][['perc_stay']]
n1=b1.sum()
m1=n1*100
pd.DataFrame(m1)
mf1=m1[0]
c1= sort_data1[(sort_data1['Class']=='N')][['perc_stay']]
o1=c1.sum()
p1=o1*100
pd.DataFrame(p1)
pf1=p1[0]
tes1=k1,m1,p1
t21 = np.array([])
te21 = np.append(t21,tes1)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f1= sort_data1[(sort_data1['Class']=='F')][['Product number']]
v1=f1.sum()
pd.DataFrame(v1)
vif1=v1[0]
f11= sort_data1[(sort_data1['Class']=='S')][['Product number']]
u1=f11.sum()
pd.DataFrame(u1)
uif1=u1[0]
f21= sort_data1[(sort_data1['Class']=='N')][['Product number']]
vf1=f21.sum()
pd.DataFrame(vf1)
kif1=vf1[0]
#################% of Total units // Annual Usage
t1= sort_data1[(sort_data1['Class']=='F')][['perc_cons']]
i1=t1.sum()
p11=i1*100
pd.DataFrame(p11)
nf1=p11[0]
l1= sort_data1[(sort_data1['Class']=='S')][['perc_cons']]
t1=l1.sum()
q11=t1*100
pd.DataFrame(q11)
qf1=q11[0]
u1= sort_data1[(sort_data1['Class']=='N')][['perc_cons']]
w1=u1.sum()
s11=w1*100
pd.DataFrame(s11)
sf1=s11[0]
test1=p11,q11,s11
tt21 = np.array([])
tte21 = np.append(tt21,test1)
#############values//Cost*Annual Usage
sort_data1['Value'] = sort_data1['Average Stay'] * sort_data1['Average Consumption']
fz1= sort_data1[(sort_data1['Class']=='F')][['Value']]
vz1=fz1.sum()
| pd.DataFrame(vz1) | pandas.DataFrame |
import pandas as pd
import psycopg2
import pickle
import numpy as np
# counterS = 0
# global counterS
# global valGlob
# from sqlalchemy import create_engine
# -*- coding: utf-8 -*-
import os
import sys
import copy
# fileName = '/Users/alessandro/Documents/PhD/OntoHistory/WDTaxo_October2014.csv'
# connection parameters
def get_db_params():
params = {
'database': 'wikidb',
'user': 'postgres',
'password': '<PASSWORD>',
'host': 'localhost',
'port': '5432'
}
conn = psycopg2.connect(**params)
return conn
def queryexecutor():
# dictStats = {}
# conn = get_db_params()
# cur = conn.cursor()
npCoso = np.load('/data/wikidata-project/WDOntoHistory/automated_revs.npy')
setCoso = set(npCoso)
for i in range(13, 18):
for j in range(1, 7):
date = "20" + str(i) + "-0" + str(j) + "-01"
if j == 1:
mt = "12"
datePrev = "20" + str(i-1) + "-" + mt + "-01"
else:
datePrev = "20" + str(i) + "-0" + str(j-1) + "-01"
print(date)
try:
queryStart = """SELECT item_id AS itemid, rev_id AS revid, time_stamp AS timestamp, user_name AS username, automated_tool FROM revision_history_201710 WHERE (time_stamp > '"""+ datePrev + """ 00:00:00' AND time_stamp < '"""+ date + """ 00:00:00');"""
conn = get_db_params()
cur = conn.cursor()
cur.execute(queryStart)
cur.close()
conn.commit()
# print(query)
timetable_temp = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step05 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step05&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-5).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import interpolate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.pricing import bond_value, bsm_function, cash_flow_reinv
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step05-parameters)
# +
# indicates which projection to continue from
# True: use copula-marginal projections
# False: use historical projections
copula_marginal = True
recrate_ge = 0.6 # recovery rate for GE bond
recrate_jpm = 0.7 # recovery rate for JPM bond
n_plot = 3 # index of instrument to plot
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step05-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
# Risk drivers identification
# risk driver values
db_riskdrivers_series = pd.read_csv(path+'db_riskdrivers_series.csv',
index_col=0)
x = db_riskdrivers_series.values
# values at t_now
db_v_tnow = pd.read_csv(path+'db_v_tnow.csv')
v_tnow = db_v_tnow.values[0]
# additional information
db_riskdrivers_tools = pd.read_csv(path+'db_riskdrivers_tools.csv',
parse_dates=True)
d_ = int(db_riskdrivers_tools['d_'].dropna())
n_stocks = int(db_riskdrivers_tools['n_stocks'].dropna())
n_bonds = int(db_riskdrivers_tools.n_bonds.dropna())
n_ = n_stocks+n_bonds+3
d_implvol = int(db_riskdrivers_tools['d_implvol'].dropna())
tend_option = np.datetime64(db_riskdrivers_tools['tend_option'][0], 'D')
k_strk = db_riskdrivers_tools['k_strk'][0]
l_ = int(db_riskdrivers_tools['l_'].dropna())
m_moneyness = db_riskdrivers_tools['m_moneyness'].values[:l_]
tau_implvol = db_riskdrivers_tools['tau_implvol'].values
y = db_riskdrivers_tools['y'][0]
tend_ge = np.datetime64(db_riskdrivers_tools['tend_ge'][0], 'D')
tend_jpm = np.datetime64(db_riskdrivers_tools['tend_jpm'][0], 'D')
coupon_ge = db_riskdrivers_tools['coupon_ge'][0]
coupon_jpm = db_riskdrivers_tools['coupon_jpm'][0]
c_ = int(db_riskdrivers_tools.c_.dropna())
t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D')
# index of risk drivers for options and bonds
idx_options = np.array(range(n_stocks+1, n_stocks+d_implvol+1))
idx_gebond = np.array(range(n_stocks+d_implvol+1, n_stocks+d_implvol+5))
idx_jpmbond = np.array(range(n_stocks+d_implvol+5, n_stocks+d_implvol+9))
# Projection
# load projections from copula-marginal approach
if copula_marginal:
# projected risk driver paths
db_projection_riskdrivers = pd.read_csv(path+'db_projection_riskdrivers.csv')
# projected rating paths
db_projection_ratings = pd.read_csv(path+'db_projection_ratings.csv')
# projected scenarios probabilities
db_scenario_probs = pd.read_csv(path+'db_scenario_probs.csv')
p = db_scenario_probs['p'].values
# additional information
db_projection_tools = pd.read_csv(path+'db_projection_tools.csv')
j_ = int(db_projection_tools['j_'][0])
t_hor = np.datetime64(db_projection_tools['t_hor'][0], 'D')
# load projections from historical approach
else:
# projected risk driver paths
db_projection_riskdrivers = \
pd.read_csv(path+'db_projection_bootstrap_riskdrivers.csv')
# projected scenarios probabilities
db_scenario_probs = | pd.read_csv(path+'db_scenario_probs_bootstrap.csv') | pandas.read_csv |
# 3rd party
import numpy # type: ignore
import pandas # type: ignore
import pandas.testing as tm # type: ignore
from domdf_python_tools.testing import not_windows
from hypothesis import given
from hypothesis.strategies import integers, lists
from pandas._testing import assert_numpy_array_equal # type: ignore
from pandas.core.internals import ExtensionBlock # type: ignore
# this package
import si_unit_pandas
# ----------------------------------------------------------------------------
# Block Methods
# ----------------------------------------------------------------------------
from si_unit_pandas import Celsius
def test_concatenate_blocks():
v1 = si_unit_pandas.TemperatureArray([1, 2, 3])
s = pandas.Series(v1, index=pandas.RangeIndex(3), fastpath=True)
result = pandas.concat([s, s], ignore_index=True)
expected = pandas.Series(si_unit_pandas.TemperatureArray([1, 2, 3, 1, 2, 3]))
tm.assert_series_equal(result, expected)
# ----------------------------------------------------------------------------
# Public Constructors
# ----------------------------------------------------------------------------
def test_series_constructor():
v = si_unit_pandas.TemperatureArray([1, 2, 3])
result = pandas.Series(v)
assert result.dtype == v.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
def test_dataframe_constructor():
v = si_unit_pandas.TemperatureArray([1, 2, 3])
df = pandas.DataFrame({'A': v})
assert isinstance(df.dtypes['A'], si_unit_pandas.CelsiusType)
assert df.shape == (3, 1)
str(df)
def test_dataframe_from_series_no_dict():
s = pandas.Series(si_unit_pandas.TemperatureArray([1, 2, 3]))
result = pandas.DataFrame(s)
expected = pandas.DataFrame({0: s})
tm.assert_frame_equal(result, expected)
s = pandas.Series(si_unit_pandas.TemperatureArray([1, 2, 3]), name='A')
result = pandas.DataFrame(s)
expected = | pandas.DataFrame({'A': s}) | pandas.DataFrame |
from music21 import *
import music21 as m21
import time
# import requests
# httpx appears to be faster than requests, will fit better with an async version
import httpx
from pathlib import Path
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from itertools import combinations
# Unncessary at the moment
# MEINSURI = 'http://www.music-encoding.org/ns/mei'
# MEINS = '{%s}' % MEINSURI
# mei_doc = ET.fromstring(requests.get(path).text)
# # Find the title from the MEI file and update the Music21 Score metadata
# title = mei_doc.find(f'{MEINS}meiHead//{MEINS}titleStmt/{MEINS}title').text
# score.metadata.title = title
# mei_doc = ET.fromstring(requests.get(path).text)
# # Find the composer from the MEI file and update the Music21 Score metadata
# composer = mei_doc.find(f'{MEINS}meiHead//{MEINS}respStmt/{MEINS}persName').text
# score.metadata.composer = composer
# An extension of the music21 note class with more information easily accessible
pathDict = {}
class NoteListElement:
"""
An extension of the music21 note class
Attributes
----------
note : m21.note.Note
music21 note class
offset : int
cumulative offset of note
id : int
unique music21 id
metadata : music21.metadata
piece metadata- not normally attached to a music21 note
part : str
voice name
partNumber : int
voice number, not 0 indexed
duration : int
note duration
piece_url : str
piece url for note
prev_note : NoteListElement
prior non-rest note element
"""
def __init__(self, note: m21.note.Note, metadata, part, partNumber, duration, piece_url, prev_note=None):
self.note = note
self.prev_note = prev_note
self.offset = self.note.offset
self.id = self.note.id
self.metadata = metadata
self.part = part
self.partNumber = partNumber
self.duration = duration
self.piece_url = piece_url
def __str__(self):
return "<NoteListElement: {}>".format(self.note.name)
class ImportedPiece:
def __init__(self, score):
self.score = score
self.analyses = {'note_list': None}
self._intervalMethods = {
# (quality, directed, compound): function returning the specified type of interval
# diatonic with quality
('q', True, True): ImportedPiece._qualityUndirectedCompound,
('q', True, False): ImportedPiece._qualityDirectedSimple,
('q', False, True): lambda cell: cell.name if hasattr(cell, 'name') else cell,
('q', False, False): lambda cell: cell.semiSimpleName if hasattr(cell, 'semiSimpleName') else cell,
# diatonic interals without quality
('d', True, True): lambda cell: cell.directedName[1:] if hasattr(cell, 'directedName') else cell,
('d', True, False): ImportedPiece._noQualityDirectedSimple,
('d', False, True): lambda cell: cell.name[1:] if hasattr(cell, 'name') else cell,
('d', False, False): lambda cell: cell.semiSimpleName[1:] if hasattr(cell, 'semiSimpleName') else cell,
# chromatic intervals
('c', True, True): lambda cell: str(cell.semitones) if hasattr(cell, 'semitones') else cell,
('c', True, False): lambda cell: str(cell.semitones % 12) if hasattr(cell, 'semitones') else cell,
('c', False, True): lambda cell: str(abs(cell.semitones)) if hasattr(cell, 'semitones') else cell,
('c', False, False): lambda cell: str(abs(cell.semitones) % 12) if hasattr(cell, 'semitones') else cell
}
def _getPartSeries(self):
if 'PartSeries' not in self.analyses:
part_series = []
for i, flat_part in enumerate(self._getSemiFlatParts()):
notesAndRests = flat_part.getElementsByClass(['Note', 'Rest'])
part_name = flat_part.partName or 'Part_' + str(i + 1)
ser = pd.Series(notesAndRests, name=part_name)
ser.index = ser.apply(lambda noteOrRest: noteOrRest.offset)
ser = ser[~ser.index.duplicated()] # remove multiple events at the same offset in a given part
part_series.append(ser)
self.analyses['PartSeries'] = part_series
return self.analyses['PartSeries']
def _getSemiFlatParts(self):
"""
Return and store flat parts inside a piece using the score attribute.
"""
if 'SemiFlatParts' not in self.analyses:
parts = self.score.getElementsByClass(stream.Part)
self.analyses['SemiFlatParts'] = [part.semiFlat for part in parts]
return self.analyses['SemiFlatParts']
def _getPartNames(self):
"""
Return flat names inside a piece using the score attribute.
"""
if 'PartNames' not in self.analyses:
part_names = []
for i, part in enumerate(self._getSemiFlatParts()):
part_names.append(part.partName or 'Part_' + str(i + 1))
self.analyses['PartNames'] = part_names
return self.analyses['PartNames']
def _getM21Objs(self):
if 'M21Objs' not in self.analyses:
part_names = self._getPartNames()
self.analyses['M21Objs'] = pd.concat(self._getPartSeries(), names=part_names, axis=1)
return self.analyses['M21Objs']
def _remove_tied(self, noteOrRest):
if hasattr(noteOrRest, 'tie') and noteOrRest.tie is not None and noteOrRest.tie.type != 'start':
return None
return noteOrRest
def _getM21ObjsNoTies(self):
if 'M21ObjsNoTies' not in self.analyses:
df = self._getM21Objs().applymap(self._remove_tied).dropna(how='all')
self.analyses['M21ObjsNoTies'] = df
return self.analyses['M21ObjsNoTies']
def regularize(self, df, unit=2):
'''
Return the passed `pandas.DataFrame` (df) with its observations
regularized rhythmically. Pass a duration as the `unit` parameter to
control at what regular distance observations will be made. Durations
are measured according to the music21 convention where:
eighth note = .5
quarter note = 1
half note = 2
etc.
For example, if you pass a dataframe of the notes and rests of a piece,
and set `unit` to 4, a new whatever is "sounding" (whether a note or a
rest) at every regular whole note will be kept, and any intervening
notes or rests will be removed. A breve would get renotated as two
whole notes.
Regularization also works with non-integer values. So if you wanted to
regularize at the swung eigth note, for example, you could set:
`unit=1/3`
'''
spot = df.index[0] * 1000
end = self.score.highestTime * 1000
vals = []
step = unit * 1000
while spot < end:
vals.append(spot)
spot += step
new_index = pd.Index(vals).map(lambda i: round(i) / 1000)
res = df.ffill().reindex(new_index, method='pad')
return res
def getDuration(self, df=None, n=1):
'''
If no dataframe is passed as the df parameter (the default), return a
`pandas.DataFrame` of floats giving the duration of notes and rests in
each part where 1 = quarternote, 1.5 = a dotted quarter, 4 = a whole
note, etc. If a df is passed, then return a df of the same shape giving
the duration of each of the slices of this df. This is useful if you
want to know what the durations of something other than single notes
and rests, such as the durations of intervals.
If n is set, it must be an integer >= 1 and less than the number of
rows in df. It determines how many adjacent items have their durations
grouped together. To get the duration of single events, n should be 1
(default). You could set n=3 if you wanted to get the duration of all
consecutive 3-note groups, for example.'''
if 'Duration' not in self.analyses or df is not None or n != 1:
_df = self._getM21ObjsNoTies() if df is None else df.copy()
highestTime = self.score.highestTime
_df.loc[highestTime, :] = 0
newCols = []
for i in range(len(_df.columns)):
ser = _df.iloc[:, i]
ser.dropna(inplace=True)
vals = ser.index[n:] - ser.index[:-n]
ser.drop(labels=ser.index[-n:], inplace=True)
ser[:] = vals
newCols.append(ser)
result = pd.concat(newCols, axis=1)
if df is None and n == 1:
self.analyses['Duration'] = result
else:
return result
return self.analyses['Duration']
def _noteRestHelper(self, noteOrRest):
if noteOrRest.isRest:
return 'Rest'
return noteOrRest.nameWithOctave
def getNoteRest(self):
'''Return a table of the notes and rests in the piece. Rests are
designated with the string "Rest". Notes are shown such that middle C
is "C4".'''
if 'NoteRest' not in self.analyses:
df = self._getM21ObjsNoTies().applymap(self._noteRestHelper, na_action='ignore')
self.analyses['NoteRest'] = df
return self.analyses['NoteRest']
def getBeat(self):
'''
Return a table of the beat positions of all the notes and rests.
'''
if 'Beat' not in self.analyses:
df = self._getM21ObjsNoTies().applymap(lambda note: note.beat, na_action='ignore')
self.analyses['Beat'] = df
return self.analyses['Beat']
def _getBeatIndex(self):
'''
Return a series of the first valid value in each row of .getBeat().
'''
if 'BeatIndex' not in self.analyses:
ser = self.getBeat().apply(lambda row: row.dropna()[0], axis=1)
self.analyses['BeatIndex'] = ser
return self.analyses['BeatIndex']
def detailIndex(self, df, offset=True, measure=True, beat=True):
'''
Return the passed dataframe with a multi-index of the measure and beat
position.
'''
cols = [df, self.getMeasure().iloc[:, 0], self._getBeatIndex()]
names = ['Measure', 'Beat']
temp = pd.concat(cols, axis=1)
temp2 = temp.iloc[:, len(df.columns):].ffill()
temp2.iloc[:, 0] = temp2.iloc[:, 0].astype(int)
mi = pd.MultiIndex.from_frame(temp2, names=names)
ret = temp.iloc[:, :len(df.columns)]
ret.index = mi
ret.dropna(inplace=True, how='all')
ret.sort_index(inplace=True)
return ret
def _beatStrengthHelper(self, noteOrRest):
if hasattr(noteOrRest, 'beatStrength'):
return noteOrRest.beatStrength
return noteOrRest
def getBeatStrength(self):
''' Returns a table of the beat strengths of all the notes and rests in
the piece. This follows the music21 conventions where the downbeat is
equal to 1, and all other metric positions in a measure are given
smaller numbers approaching zero as their metric weight decreases.
Results from this method should not be sent to the regularize method.
'''
if 'BeatStrength' not in self.analyses:
df = self._getM21ObjsNoTies().applymap(self._beatStrengthHelper)
self.analyses['BeatStrength'] = df
return self.analyses['BeatStrength']
def getTimeSignature(self):
"""
Return a data frame containing the time signatures and their offsets
"""
if 'TimeSignature' not in self.analyses:
time_signatures = []
for part in self._getSemiFlatParts():
time_signatures.append(pd.Series({ts.offset: ts for ts in part.getTimeSignatures()}))
df = pd.concat(time_signatures, axis=1)
df = df.applymap(lambda ts: ts.ratioString, na_action='ignore')
df.columns = self._getPartNames()
self.analyses['TimeSignature'] = df
return self.analyses['TimeSignature']
def getMeasure(self):
"""
This method retrieves the offsets of each measure in each voices.
"""
if "Measure" not in self.analyses:
parts = self._getSemiFlatParts()
partMeasures = []
for part in parts:
partMeasures.append(pd.Series({m.offset: m.measureNumber \
for m in part.getElementsByClass(['Measure'])}))
df = pd.concat(partMeasures, axis=1)
df.columns = self._getPartNames()
self.analyses["Measure"] = df
return self.analyses["Measure"]
def getSoundingCount(self):
"""
This would return a series with the number of parts that currently have
a note sounding.
"""
if not 'SoundingCount' in self.analyses:
nr = self.getNoteRest().ffill()
df = nr[nr != 'Rest']
ser = df.count(axis=1)
ser.name = 'Sounding'
self.analyses['SoundingCount'] = ser
return self.analyses['SoundingCount']
def _zeroIndexIntervals(ntrvl):
'''
Change diatonic intervals so that they count the number of steps, i.e.
unison = 0, second = 1, etc.
'''
if ntrvl == 'Rest':
return ntrvl
val = int(ntrvl)
if val > 0:
return str(val - 1)
return str(val + 1)
def _harmonicIntervalHelper(row):
if hasattr(row[1], 'isRest') and hasattr(row[0], 'isRest'):
if row[1].isRest or row[0].isRest:
return 'Rest'
elif row[1].isNote and row[0].isNote:
return interval.Interval(row[0], row[1])
return None
def _melodicIntervalHelper(row):
if hasattr(row[0], 'isRest'):
if row[0].isRest:
return 'Rest'
elif row[0].isNote and hasattr(row[1], 'isNote') and row[1].isNote:
return interval.Interval(row[1], row[0])
return None
def _melodifyPart(ser):
ser.dropna(inplace=True)
shifted = ser.shift(1)
partDF = pd.concat([ser, shifted], axis=1)
res = partDF.apply(ImportedPiece._melodicIntervalHelper, axis=1).dropna()
return res
def _getM21MelodicIntervals(self):
if 'M21MelodicIntervals' not in self.analyses:
m21Objs = self._getM21ObjsNoTies()
df = m21Objs.apply(ImportedPiece._melodifyPart)
self.analyses['M21MelodicIntervals'] = df
return self.analyses['M21MelodicIntervals']
def _getRegularM21MelodicIntervals(self, unit):
m21Objs = self._getM21ObjsNoTies()
m21Objs = self.regularize(m21Objs, unit=unit)
return m21Objs.apply(ImportedPiece._melodifyPart)
def _qualityUndirectedCompound(cell):
if hasattr(cell, 'direction'):
if cell.direction.value >= 0:
return cell.name
else:
return '-' + cell.name
return cell
def _qualityDirectedSimple(cell):
if hasattr(cell, 'semiSimpleName'):
if cell.direction.value > 0:
return cell.semiSimpleName
else:
return '-' + cell.semiSimpleName
return cell
def _noQualityDirectedSimple(cell):
if hasattr(cell, 'semiSimpleName'):
if cell.direction.value == -1:
return '-' + cell.semiSimpleName[1:]
else:
return cell.semiSimpleName[1:]
else:
return cell
def getMelodic(self, kind='q', directed=True, compound=True, unit=0):
'''
Return melodic intervals for all voice pairs. Each melodic interval
is associated with the starting offset of the second note in the
interval. If you want melodic intervals measured at a regular duration,
do not pipe this methods result to the `unit` method. Instead,
pass the desired regular durational interval as an integer or float as
the `unit` parameter.
:param str kind: use "q" (default) for diatonic intervals with quality,
"d" for diatonic intervals without quality, "z" for zero-indexed
diatonic intervals without quality (i.e. unison = 0, second = 1,
etc.), or "c" for chromatic intervals. Only the first character is
used, and it's case insensitive.
:param bool directed: defaults to True which shows that the voice that
is lower on the staff is a higher pitch than the voice that is
higher on the staff. This is desginated with a "-" prefix.
:param bool compound: whether to use compound (True, default) or simple
(False) intervals. In the case of simple diatonic intervals, it
simplifies to within the octave, so octaves don't get simplified to
unisons. But for semitonal intervals, an interval of an octave
(12 semitones) would does get simplified to a unison (0).
:param int/float unit: regular durational interval at which to measure
melodic intervals. See the documentation of the `unit` method for
more about this.
:returns: `pandas.DataFrame` of melodic intervals in each part
'''
kind = kind[0].lower()
kind = {'s': 'c'}.get(kind, kind)
_kind = {'z': 'd'}.get(kind, kind)
settings = (_kind, directed, compound)
key = ('MelodicIntervals', kind, directed, compound)
if key not in self.analyses or unit:
df = self._getRegularM21MelodicIntervals(unit) if unit else self._getM21MelodicIntervals()
df = df.applymap(self._intervalMethods[settings])
if kind == 'z':
df = df.applymap(ImportedPiece._zeroIndexIntervals, na_action='ignore')
if unit:
return df
else:
self.analyses[key] = df
return self.analyses[key]
def _getM21HarmonicIntervals(self):
if 'M21HarmonicIntervals' not in self.analyses:
m21Objs = self._getM21ObjsNoTies()
pairs = []
combos = combinations(range(len(m21Objs.columns) - 1, -1, -1), 2)
for combo in combos:
df = m21Objs.iloc[:, list(combo)].dropna(how='all').ffill()
ser = df.apply(ImportedPiece._harmonicIntervalHelper, axis=1)
# name each column according to the voice names that make up the intervals, e.g. 'Bassus_Altus'
ser.name = '_'.join((m21Objs.columns[combo[0]], m21Objs.columns[combo[1]]))
pairs.append(ser)
if pairs:
ret = pd.concat(pairs, axis=1)
else:
ret = pd.DataFrame()
self.analyses['M21HarmonicIntervals'] = ret
return self.analyses['M21HarmonicIntervals']
def getHarmonic(self, kind='q', directed=True, compound=True):
'''
Return harmonic intervals for all voice pairs. The voice pairs are
named with the voice that's lower on the staff given first, and the two
voices separated with an underscore, e.g. "Bassus_Tenor".
:param str kind: use "q" (default) for diatonic intervals with quality,
"d" for diatonic intervals without quality, "z" for zero-indexed
diatonic intervals without quality (i.e. unison = 0, second = 1,
etc.), or "c" for chromatic intervals. Only the first character is
used, and it's case insensitive.
:param bool directed: defaults to True which shows that the voice that
is lower on the staff is a higher pitch than the voice that is
higher on the staff. This is desginated with a "-" prefix.
:param bool compound: whether to use compound (True, default) or simple
(False) intervals. In the case of simple diatonic intervals, it
simplifies to within the octave, so octaves don't get simplified to
unisons. But for semitonal intervals, an interval of an octave
(12 semitones) would does get simplified to a unison (0 semitones).
'''
kind = kind[0].lower()
kind = {'s': 'c'}.get(kind, kind)
_kind = {'z': 'd'}.get(kind, kind)
settings = (_kind, directed, compound)
key = ('HarmonicIntervals', kind, directed, compound)
if key not in self.analyses:
df = self._getM21HarmonicIntervals()
df = df.applymap(self._intervalMethods[settings])
if kind == 'z':
df = df.applymap(ImportedPiece._zeroIndexIntervals, na_action='ignore')
self.analyses[key] = df
return self.analyses[key]
def _ngrams_offsets_helper(col, n, offsets):
"""
Generate a list of series that align the notes from one ngrams according
to the first or the last note's offset.
:param pandas.Series col: A column that originally contains
notes and rests.
:param int n: The size of the ngram.
:param str offsets: We could input 'first' if we want to group
the ngrams by their first note's offset, or 'last' if we
want to group the ngram by the last note's offset.
:return pandas.Series: a list of shifted series that could be grouped by
first or the last note's offset.
"""
if offsets == 'first':
chunks = [col.shift(-i) for i in range(n)]
else: # offsets == 'last':
chunks = [col.shift(i) for i in range(n - 1, -1, -1)]
return chunks
def _ngramHelper(col, n, exclude, offsets):
col.dropna(inplace=True)
if n == -1:
# get the starting and ending elements of ngrams
starts = col[(col != 'Rest') & (col.shift(1).isin(('Rest', np.nan)))]
ends = col[(col != 'Rest') & (col.shift(-1).isin(('Rest', np.nan)))]
si = tuple(col.index.get_loc(i) for i in starts.index)
ei = tuple(col.index.get_loc(i) + 1 for i in ends.index)
ind = starts.index if offsets == 'first' else ends.index
vals = [', '.join(col.iloc[si[i] : ei[i]]) for i in range(len(si))]
ser = pd.Series(vals, name=col.name, index=ind)
return ser
chunks = ImportedPiece._ngrams_offsets_helper(col, n, offsets)
chains = pd.concat(chunks, axis=1)
for excl in exclude:
chains = chains[(chains != excl).all(1)]
chains.dropna(inplace=True)
chains = chains.apply(lambda row: ', '.join(row), axis=1)
return chains
def getNgrams(self, df=None, n=3, how='columnwise', other=None, held='Held',
exclude=['Rest'], interval_settings=('d', True, True), unit=0,
offsets='first'):
'''
Group sequences of observations in a sliding window "n" events long
(default n=3). If the `exclude` parameter is passed and any item in that
list is found in an ngram, that ngram will be removed from the resulting
DataFrame. Since `exclude` defaults to `['Rest']`, pass an empty list if
you want to allow rests in your ngrams.
There are two primary modes for the `how` parameter. When set to
"columnwise" (default), this is the simple case where the events in each
column of the `df` DataFrame has its events grouped at the offset of the
first event in the window. For example, to get 4-grams of melodic
intervals:
ip = ImportedPiece('path_to_piece')
ngrams = ip.getNgrams(df=ip.getMelodic(), n=4)
If `how` is set to 'modules' this will return contrapuntal modules. In
this case, if the `df` or `other` parameters are left as None, they will
be replaced with the current piece's harmonic and melodic intervals
respectfully. These intervals will be formed according to the
interval_settings argument, which gets passed to the getMelodic and
getHarmonic methods (see those methods for an explanation of those
settings). This makes it easy to make contrapuntal-module ngrams, e.g.:
ip = ImportedPiece('path_to_piece')
ngrams = ip.getNgrams(how='modules')
There is a special case for "open-ended" module ngrams. Set n=1 and the
module ngrams will show the vertical interval between two voices,
followed by the connecting melodic interal in the lower voice, but not
the next harmonic interval. Open-ended module ngrams can be useful if
you want to see how long the imitation in two voice parts goes on for.
Another special case is when `n` is set to -1. This finds the longest
ngrams at all time points excluding subset ngrams. The returned
dataframe will have ngrams of length varying between 1 and the longest
ngram in the piece.
The `offset` setting can have two modes. If "first" is selected (default option),
the returned ngrams will be grouped according to their first notes' offsets,
while if "last" is selected, the returned ngrams will be grouped according
to the last notes' offsets.
If you want want "module" ngrams taken at a regular durational interval,
you can omit passing `df` and `other` dataframes and instead pass the
desired `interval_settings` and an integer or float for the `unit`
parameter. See the `.regularize` documentation for how to use this
parameter. Here's an example that will generate contrapuntal-module
ngrams at regular minim (half-note) intervals.
ip = ImportedPiece('path_to_piece')
ngrams = ip.getNgrams(how='modules', unit=2)
Otherwise, you can give specific `df` and/or `other` DataFrames in which
case the `interval_settings` parameter will be ignored. Also, you can
use the `held` parameter to be used for when the lower voice sustains a
note while the upper voice moves. This defaults to 'Held' to distinguish
between held notes and reiterated notes in the lower voice, but if this
distinction is not wanted for your query, you may want to pass way a
unison gets labeled in your `other` DataFrame (e.g. "P1" or "1").
'''
if how == 'columnwise':
return df.apply(ImportedPiece._ngramHelper, args=(n, exclude, offsets))
if df is None:
df = self.getHarmonic(*interval_settings)
if unit:
df = self.regularize(df, unit)
if other is None:
other = self.getMelodic(*interval_settings, unit=unit)
cols = []
for pair in df.columns:
lowerVoice = pair.split('_')[0]
combo = pd.concat([other[lowerVoice], df[pair]], axis=1)
combo.fillna({lowerVoice: held}, inplace=True)
combo.insert(loc=1, column='Joiner', value=', ')
combo['_'] = '_'
if n == -1:
har = df[pair]
starts = har[(har != 'Rest') & (har.shift(1).isin(('Rest', np.nan)))]
ends = har[(har != 'Rest') & (har.shift(-1).isin(('Rest', np.nan)))]
starts.dropna(inplace=True)
ends.dropna(inplace=True)
si = tuple(har.index.get_loc(i) for i in starts.index)
ei = tuple(har.index.get_loc(i) + 1 for i in ends.index)
col = [''.join([cell
for row in combo.iloc[si[i] : ei[i]].values # second loop
for cell in row][2:-1]) # innermost loop
for i in range(len(si))] # outermost loop
col = pd.Series(col)
if offsets == 'first':
col.index = starts.index
else:
col.index = ends.index
else: # n >= 1
lastIndex = -1
if n == 1:
lastIndex = -3
n = 2
combo = ImportedPiece._ngrams_offsets_helper(combo, n, offsets)
combo = pd.concat(combo, axis=1)
col = combo.iloc[:, 2:lastIndex].dropna().apply(lambda row: ''.join(row), axis=1)
if exclude:
mask = col.apply(lambda cell: all([excl not in cell for excl in exclude]))
col = col[mask]
col.name = pair
cols.append(col)
# in case piece has no harmony and cols stays empty
if cols:
return pd.concat(cols, axis=1)
else:
return | pd.DataFrame() | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = | pd.to_numeric(idx) | pandas.to_numeric |
from pytest import fixture
import pandas as pd
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
@fixture
def get_cleaned_df():
matches_df = pd.read_csv(os.path.join(THIS_DIR,
'test_matches_df_pipeline.csv'))
rank_df = pd.read_csv(os.path.join(THIS_DIR, 'test_rank_df_pipeline.csv'))
return matches_df, rank_df
@fixture
def get_df_to_clean():
data = {
'league_match': [1, 2, 3, 4],
'team_1': ['barsa\n', '*madrid*', 'valencia ', 'Gimnastic*']
}
return pd.DataFrame(data)
@fixture
def get_features_df():
results_dir = os.path.join(THIS_DIR, 'results_1999.csv')
results = pd.read_csv(results_dir)
general_ranking_dir = os.path.join(THIS_DIR, 'general_ranking_1999.csv')
general_ranking = pd.read_csv(general_ranking_dir)
home_ranking_dir = os.path.join(THIS_DIR, 'home_ranking_1999.csv')
home_ranking = | pd.read_csv(home_ranking_dir) | pandas.read_csv |
from __future__ import print_function
import string
import sys
import os
from collections import deque
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import tensorflow as tf
import keras
keras.backend.image_data_format()
from keras import backend as K
from keras import regularizers
from keras.layers import Input, Dense, Reshape, Lambda, Conv1D, Flatten, MaxPooling1D, UpSampling1D, GlobalMaxPooling1D
from keras.layers import LSTM, Bidirectional, BatchNormalization, Dropout, Concatenate, Embedding, Activation, Dot, dot
from keras.models import Model, clone_model, Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.constraints import unitnorm
from keras_layer_normalization import LayerNormalization
tf.keras.backend.set_floatx('float32')
import sklearn as sk
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE
from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA, SparsePCA, TruncatedSVD, FastICA, NMF, MiniBatchDictionaryLearning
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, GroupKFold, train_test_split
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score
from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.metrics import roc_curve, precision_recall_curve, RocCurveDisplay, PrecisionRecallDisplay
from sklearn.metrics import roc_auc_score,accuracy_score,matthews_corrcoef
from scipy import stats
from scipy.stats import multivariate_normal, kurtosis, skew, pearsonr, spearmanr
import processSeq
from processSeq import load_seq_1, kmer_dict, load_signal_1, load_seq_2, load_seq_2_kmer, load_seq_altfeature
import xgboost
import pickle
import os.path
from optparse import OptionParser
import time
from timeit import default_timer as timer
import utility_1
from utility_1 import mapping_Idx
import h5py
import json
# generate sequences
# idx_sel_list: chrom, serial
# seq_list: relative positions
def generate_sequences(idx_sel_list, gap_tol=5, region_list=[]):
chrom = idx_sel_list[:,0]
chrom_vec = np.unique(chrom)
chrom_vec = np.sort(chrom_vec)
seq_list = []
print(len(chrom),chrom_vec)
for chrom_id in chrom_vec:
b1 = np.where(chrom==chrom_id)[0]
t_serial = idx_sel_list[b1,1]
prev_serial = t_serial[0:-1]
next_serial = t_serial[1:]
distance = next_serial-prev_serial
b2 = np.where(distance>gap_tol)[0]
if len(b2)>0:
if len(region_list)>0:
# print('region_list',region_list,len(b2))
b_1 = np.where(region_list[:,0]==chrom_id)[0]
# print(b2)
t_serial = idx_sel_list[b2,1]
if len(b_1)>0:
# b2 = np.setdiff1d(b2,region_list[b_1,1])
# print(region_list,region_list[b_1,1],len(b2))
t_id1 = utility_1.mapping_Idx(t_serial,region_list[b_1,1])
t_id1 = t_id1[t_id1>=0]
t_id2 = b2[t_id1]
b2 = np.setdiff1d(b2,t_id2)
# print(len(b2))
# print(idx_sel_list[b2])
# return
# print('gap',len(b2))
if len(b2)>0:
t_seq = list(np.vstack((b2[0:-1]+1,b2[1:])).T)
t_seq.insert(0,np.asarray([0,b2[0]]))
t_seq.append(np.asarray([b2[-1]+1,len(b1)-1]))
else:
t_seq = [np.asarray([0,len(b1)-1])]
# print(t_seq)
# print(chrom_id,len(t_seq),max(distance))
seq_list.extend(b1[np.asarray(t_seq)])
return np.asarray(seq_list)
# select sample
def sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=5, L=5):
num_sample = len(idx_sel_list)
num1 = len(seq_list)
size1 = 2*L+1
print(num_sample,num1,size1)
feature_dim = x_mtx.shape[1]
vec1_local = np.zeros((num_sample,size1),dtype=int)
vec1_serial = np.zeros((num_sample,size1),dtype=int)
feature_mtx = np.zeros((num_sample,size1,feature_dim),dtype=np.float32)
signal_mtx = np.zeros((num_sample,size1))
ref_serial = idx_sel_list[:,1]
id_vec = np.zeros(num_sample,dtype=np.int8)
for i in range(0,num1):
s1, s2 = seq_list[i][0], seq_list[i][1]+1
serial = ref_serial[s1:s2]
id_vec[s1:s2] = 1
# print('start stop',s1,s2,serial)
num2 = len(serial)
t1 = np.outer(list(range(s1,s2)),np.ones(size1))
t2 = t1 + np.outer(np.ones(num2),list(range(-L,L+1)))
t2[t2<s1] = s1
t2[t2>=s2] = s2-1
idx = np.int64(t2)
# print(idx)
vec1_local[s1:s2] = idx
vec1_serial[s1:s2] = ref_serial[idx]
feature_mtx[s1:s2] = x_mtx[idx]
signal_mtx[s1:s2] = y[idx]
# if i%10000==0:
# print(i,num2,vec1_local[s1],vec1_serial[s1])
id1 = np.where(id_vec>0)[0]
num2 = len(id1)
if num2<num_sample:
feature_mtx, signal_mtx = feature_mtx[id1], signal_mtx[id1]
# vec1_serial, vec1_local = vec1_serial[id1], vec1_local[id1]
vec1_serial = vec1_serial[id1]
id_1 = -np.ones(sample_num,dtype=np.int64)
id_1[id1] = np.arange(num2)
vec1_local = id_1[vec1_local]
b1 = np.where(vec1_local<0)[0]
if len(b1)>0:
print('error!',b1)
return -1
# signal_mtx = signal_mtx[:,np.newaxis]
signal_mtx = np.expand_dims(signal_mtx, axis=-1)
# signal_mtx = np.expand_dims(signal_ntx, axis=-1)
return feature_mtx, signal_mtx, vec1_serial, vec1_local
def score_2a(y, y_predicted):
score1 = mean_squared_error(y, y_predicted)
score2 = pearsonr(y, y_predicted)
score3 = explained_variance_score(y, y_predicted)
score4 = mean_absolute_error(y, y_predicted)
score5 = median_absolute_error(y, y_predicted)
score6 = r2_score(y, y_predicted)
score7, pvalue = spearmanr(y,y_predicted)
# vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6]
vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6, score7, pvalue]
return vec1
def read_phyloP(species_name):
path1 = './'
filename1 = '%s/estimate_rt/estimate_rt_%s.txt'%(path1,species_name)
# filename2a = 'test_seq_%s.1.txt'%(species_name)
file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num_sample = len(chrom_ori)
chrom_vec = np.unique(chrom_ori)
chrom_vec = ['chr22']
for chrom_id in chrom_vec:
filename1 = '%s/phyloP/hg19.phyloP100way.%s.bedGraph'%(path1,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
b = np.where(chrom_ori==chrom_id)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,16))
print(chrom_id,len(chrom),len(b))
cnt = 0
b1 = [-1]
for i in b:
t1 = b1[-1]+1
b1 = np.where((start[t1:]>=start_ori[i])&(stop[t1:]<stop_ori[i]))[0]+t1
if len(b1)==0:
b1 = [-1]
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
s1 = 0
s2 = np.sum(t_len1)
i1 = cnt
for j in range(0,12):
temp1 = (j-8)*2.5
b2 = np.where((t_score<temp1+2.5)&(t_score>=temp1))[0]
print(b2)
vec1[i1,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[i1,j]
vec1[i1,12] = s1 # average
vec1[i1,13] = np.median(t_score)
vec1[i1,14] = np.max(t_score)
vec1[i1,15] = np.min(t_score)
cnt += 1
if cnt%1000==0:
print(cnt,len(b1),s2,vec1[i1,12:16])
break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,12):
temp1 = (j-8)*2.5
fields.append('%s-%s'%(temp1,temp1+2.5))
fields.extend(range(0,4))
data1 = pd.DataFrame(data = np.hstack((b[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_phyloP_1(ref_filename,header,file_path,chrom_vec,n_level=15,offset=10,magnitude=2):
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
# chrom_vec = np.unique(chrom_ori)
# chrom_vec = [chrom_id]
# n_level, offset, magnitude = 15, 10, 2
score_max = (n_level-offset)*magnitude
for chrom_id in chrom_vec:
# filename1 = '%s/hg19.phyloP100way.%s.bedGraph'%(file_path,chrom_id)
filename1 = '%s/chr%s.phyloP100way.bedGraph'%(file_path,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
chrom_id1 = 'chr%s'%(chrom_id)
b = np.where(chrom_ori==chrom_id1)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,n_level+4))
print(chrom_id,len(chrom),len(b))
cnt = 0
m_idx = len(start)-1
start_idx = 0
print("number of regions", len(b))
for i in b:
t_start, t_stop = start_ori[i], stop_ori[i] # position of zero region
position = [t_start,t_stop]
if start_idx<=m_idx:
b1, start_idx = utility_1.search_region_include(position, start, stop, m_idx, start_idx)
# print(count,t_start,t_stop,t_stop-t_start,start_idx,len(id3))
if len(b1)==0:
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
t_score[t_score>score_max] = score_max-1e-04
s1 = 0
s2 = np.sum(t_len1)
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
b2 = np.where((t_score<temp1+magnitude)&(t_score>=temp1))[0]
# print(b2)
vec1[cnt,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[cnt,j]
vec1[cnt,n_level:n_level+4] = [s1,np.median(t_score),np.max(t_score),np.min(t_score)]
cnt += 1
pre_b1 = b1
if cnt%1000==0:
print(chrom_id,cnt,len(b1),s2,vec1[cnt,-4:])
# break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
fields.append('%s-%s'%(temp1,temp1+magnitude))
fields.extend(range(0,4))
idx = serial_ori[b]
data1 = pd.DataFrame(data = np.hstack((idx[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_motif_1(filename,output_filename=-1):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
col1, col2, col3 = colnames[0], colnames[1], colnames[2]
chrom, start, stop = np.asarray(data1[col1]), np.asarray(data1[col2]), np.asarray(data1[col3])
region_len = stop-start
m1, m2, median_len = np.max(region_len), np.min(region_len), np.median(region_len)
b1 = np.where(region_len!=median_len)[0]
print(m1,m2,median_len,len(b1))
bin_size = median_len
motif_name = colnames[3:]
mtx1 = np.asarray(data1.loc[:,motif_name])
mtx1 = mtx1*1000.0/np.outer(region_len,np.ones(mtx1.shape[1]))
print('motif',len(motif_name))
print(mtx1.shape)
print(np.max(mtx1),np.min(mtx1),np.median(mtx1))
if output_filename!=-1:
fields = colnames
data1 = pd.DataFrame(columns=fields)
data1[colnames[0]], data1[colnames[1]], data1[colnames[2]] = chrom, start, stop
num1 = len(fields)-3
for i in range(0,num1):
data1[colnames[i+3]] = mtx1[:,i]
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print(output_filename, data1.shape)
return mtx1, chrom, start, stop, colnames
def read_gc_1(ref_filename,header,filename,output_filename):
sel_idx = []
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
f_list = load_seq_altfeature(filename,sel_idx)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
if num_sample!=f_list.shape[0]:
print('error!',num_sample,f_list.shape[0])
fields = ['chrom','start','stop','serial','GC','GC_N','GC_skew']
file2 = pd.DataFrame(columns=fields)
file2['chrom'], file2['start'], file2['stop'], file2['serial'] = chrom_ori, start_ori, stop_ori, serial_ori
for i in range(0,3):
file2[fields[i+4]] = f_list[:,i]
file2.to_csv(output_filename,index=False,sep='\t')
return f_list
def generate_serial(filename1,chrom,start,stop):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,23):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
# print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec)
def generate_serial_local(filename1,chrom,start,stop,chrom_num):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
chrom_vec += ['chrM']
print(chrom_vec)
print(chrom)
print(len(chrom))
t_chrom = np.unique(chrom)
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
# serial_start = np.zeros(len(chrom))
serial_start = 0
serial_start_1 = dict()
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
serial_start_1[chrom_id] = serial_start
size1 = int(np.ceil(t_size*1.0/bin_size))
serial_start = serial_start+size1
for chrom_id in t_chrom:
b2 = np.where(chrom==chrom_id)
serial = np.int64(start[b2]/bin_size)+serial_start_1[chrom_id]
serial_vec[b2] = serial
return np.int64(serial_vec)
def generate_serial_start(filename1,chrom,start,stop,chrom_num=19):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = -np.ones(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
start_vec = dict()
for chrom_id in chrom_vec:
start_vec[chrom_id] = serial_start
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec), start_vec
def shuffle_array(vec):
num1 = len(vec)
idx = np.random.permutation(num1)
vec = vec[idx]
return vec, idx
# input: estimated attention, type_id: training, validation, or test data
# output: ranking of attention
def select_region1_sub(filename,type_id):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
# chrom start stop serial signal predicted_signal predicted_attention
chrom, start, serial = data1['chrom'], data1['start'], data1['serial']
chrom, start, serial = np.asarray(chrom), np.asarray(start), np.asarray(serial)
predicted_attention = data1['predicted_attention']
predicted_attention = np.asarray(predicted_attention)
ranking = stats.rankdata(predicted_attention,'average')/len(predicted_attention)
rank1 = np.zeros((len(predicted_attention),2))
rank1[:,0] = ranking
chrom_vec = np.unique(chrom)
for t_chrom in chrom_vec:
b1 = np.where(chrom==t_chrom)[0]
t_attention = predicted_attention[b1]
t_ranking = stats.rankdata(t_attention,'average')/len(t_attention)
rank1[b1,1] = t_ranking
data1['Q1'] = rank1[:,0] # rank across all the included chromosomes
data1['Q2'] = rank1[:,1] # rank by each chromosome
data1['typeId'] = np.int8(type_id*np.ones(len(rank1)))
return data1,chrom_vec
# merge estimated attention from different training/test splits
# type_id1: chromosome order; type_id2: training: 0, test: 1, valid: 2
def select_region1_merge(filename_list,output_filename,type_id1=0,type_id2=1):
list1 = []
chrom_numList = []
# b1 = np.where((self.chrom!='chrX')&(self.chrom!='chrY'))[0]
# ref_chrom, ref_start, ref_serial = self.chrom[b1], self.start[b1], self.serial[b1]
# num_sameple = len(ref_chrom)
i = 0
serial1 = []
num1 = len(filename_list)
vec1 = list(range(num1))
if type_id1==1:
vec1 = list(range(num1-1,-1,-1))
for i in vec1:
filename1 = filename_list[i]
# data1: chrom, start, stop, serial, signal, predicted_signal, predicted_attention, Q1, Q2, typeId
# typeId: training: 0, test: 1, valid: 2
data1, chrom_vec = select_region1_sub(filename1,type_id2)
print(filename1,len(data1))
# list1.append(data1)
# if i==0:
# serial1 = np.asarray(data1['serial'])
t_serial = np.asarray(data1['serial'],dtype=np.int64)
t_serial2 = np.setdiff1d(t_serial,serial1)
serial1 = np.union1d(serial1,t_serial)
id1 = mapping_Idx(t_serial,t_serial2)
colnames = list(data1)
data1 = data1.loc[id1,colnames]
list1.append(data1)
chrom_numList.append(chrom_vec)
data2 = | pd.concat(list1, axis=0, join='outer', ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False, copy=True) | pandas.concat |
##############################################
## Author: <NAME> ##
## Date of update: 2018/05/25 ##
## Description: Data Mining Final Project ##
## - Model Tuning ##
##############################################
import numpy as np
import pandas as pd
import time
import csv
import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
#-----------------------#
# Main Function #
#-----------------------#
# @param: None
# @return: None
def main():
startTime = time.time()
# Feature Settings
dfFile = 'nba_preprocessed.csv'
dateStart = '2017-08-01'
dateEnd = '2018-04-13'
period = 5
featureSel = 3
# Model Settings
trialName = '00_model_CVGS_yr1_feature3_param2'
modelName = '_GBDT'
model = GradientBoostingClassifier()
tuned_parameters = {
'loss': ['exponential'],
'n_estimators': [600, 800, 1000],
'learning_rate': [0.1, 0.2, 0.3],
'max_depth': [3, 5, 10],
'subsample': [0.5],
'max_features': ['auto', 'log2', 'sqrt']
}
# Feature Extraction
X, Y = featureExtraction(dfFile, dateStart, dateEnd, period, featureSel)
# Number of random trials
NUM_TRIALS = 1
(max_score, best_estimator) = CrossValidationGridSearchNested(X, Y, NUM_TRIALS, 10, model, tuned_parameters, 'roc_auc')
# Write to .csv
with open('./00_model_param/' + trialName + modelName + '.csv','w') as myFile:
for key, value in zip(best_estimator.get_params().keys(), best_estimator.get_params().values()):
myFile.write(key + ',' + str(value) + '\n')
myFile.write('max_score' + ',' + str(max_score) + '\n')
myFile.write('Execution time =' + ',' + str(time.time() - startTime) + '\n')
#-----------------------#
# Sub-Functions #
#-----------------------#
# @param X: pandas.DataFrame
# @param featureSel: int
# @return X: pandas.DataFrame
def featureEng(X, featureSel=None):
# Feature Engineering
if not featureSel or featureSel == 0:
return X
if featureSel == 1:
X['PTS_DIFF'] = X['PTS_A'] - X['PTS_B']
elif featureSel == 2:
attriToDrop = ['PTS_A', 'PTS_B']
X = X.drop(columns=attriToDrop)
elif featureSel == 3:
X['PTS_DIFF'] = X['PTS_A'] - X['PTS_B']
attriToDrop = ['PTS_A', 'PTS_B']
X = X.drop(columns=attriToDrop)
elif featureSel == 4:
attriToDrop = [
'FGM_A', 'FGA_A', '3PM_A', '3PA_A', 'FTM_A', 'FTA_A', 'OREB_A', 'DREB_A', 'PF_A',
'FGM_B', 'FGA_B', '3PM_B', '3PA_B', 'FTM_B', 'FTA_B', 'OREB_B', 'DREB_B', 'PF_B'
]
X['PTS_DIFF'] = X['PTS_A'] - X['PTS_B']
X['STL+BLK_A'] = X['STL_A'] + X['BLK_A']
X['STL+BLK_B'] = X['STL_B'] + X['BLK_B']
attriToDrop += ['PTS_A', 'PTS_B', 'STL_A', 'STL_B', 'BLK_A', 'BLK_B']
X = X.drop(columns=attriToDrop)
return X
# @param dfFile: pandas.DataFrame ('nba_preprocessed.csv')
# @param dateStart, dateEnd: str in the format of 'YYYY-MM-DD'
# @param period: int
# @param featureSel: int (0, 1, 2, and 3 corresponds to feature0, 1, 2, and 3, respectively)
# @return X, Y: pandas.DataFrame
# featureExtraction() outputs X, Y for model training.
# Game date can be assigned
# Attribute to be dropped can be assigned
def featureExtraction(dfFile, dateStart='1000-01-01', dateEnd='2999-12-31', period=5, featureSel=None):
df = | pd.read_csv(dfFile) | pandas.read_csv |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: ไธๆน่ดขๅฏ-ๆฒชๆทฑๆฟๅ-ๆฆๅฟตๆฟๅ
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import pandas as pd
def stock_board_concept_name_em() -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ-ๆฒชๆทฑๆฟๅ-ๆฆๅฟตๆฟๅ-ๅ็งฐ
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: ๆฆๅฟตๆฟๅ-ๅ็งฐ
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"ๆๅ",
"ๆๆฐไปท",
"ๆถจ่ทๅน
",
"ๆถจ่ท้ข",
"ๆขๆ็",
"_",
"ๆฟๅไปฃ็ ",
"ๆฟๅๅ็งฐ",
"_",
"_",
"_",
"_",
"ๆปๅธๅผ",
"_",
"_",
"_",
"_",
"_",
"_",
"ไธๆถจๅฎถๆฐ",
"ไธ่ทๅฎถๆฐ",
"_",
"_",
"้ขๆถจ่ก็ฅจ",
"_",
"_",
"้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
",
]
temp_df = temp_df[
[
"ๆๅ",
"ๆฟๅๅ็งฐ",
"ๆฟๅไปฃ็ ",
"ๆๆฐไปท",
"ๆถจ่ท้ข",
"ๆถจ่ทๅน
",
"ๆปๅธๅผ",
"ๆขๆ็",
"ไธๆถจๅฎถๆฐ",
"ไธ่ทๅฎถๆฐ",
"้ขๆถจ่ก็ฅจ",
"้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
",
]
]
temp_df["ๆๆฐไปท"] = pd.to_numeric(temp_df["ๆๆฐไปท"])
temp_df["ๆถจ่ท้ข"] = pd.to_numeric(temp_df["ๆถจ่ท้ข"])
temp_df["ๆถจ่ทๅน
"] = pd.to_numeric(temp_df["ๆถจ่ทๅน
"])
temp_df["ๆปๅธๅผ"] = pd.to_numeric(temp_df["ๆปๅธๅผ"])
temp_df["ๆขๆ็"] = pd.to_numeric(temp_df["ๆขๆ็"])
temp_df["ไธๆถจๅฎถๆฐ"] = pd.to_numeric(temp_df["ไธๆถจๅฎถๆฐ"])
temp_df["ไธ่ทๅฎถๆฐ"] = pd.to_numeric(temp_df["ไธ่ทๅฎถๆฐ"])
temp_df["้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
"] = pd.to_numeric(temp_df["้ขๆถจ่ก็ฅจ-ๆถจ่ทๅน
"])
return temp_df
def stock_board_concept_hist_em(symbol: str = "ๆฐๅญ่ดงๅธ", adjust: str = "") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ-ๆฒชๆทฑๆฟๅ-ๆฆๅฟตๆฟๅ-ๅๅฒ่กๆ
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: ๆฟๅๅ็งฐ
:type symbol: str
:param adjust: choice of {'': ไธๅคๆ, "qfq": ๅๅคๆ, "hfq": ๅๅคๆ}
:type adjust: str
:return: ๅๅฒ่กๆ
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["ๆฟๅๅ็งฐ"] == symbol
]["ๆฟๅไปฃ็ "].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": "101",
"fqt": adjust_map[adjust],
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"ๆฅๆ",
"ๅผ็",
"ๆถ็",
"ๆ้ซ",
"ๆไฝ",
"ๆไบค้",
"ๆไบค้ข",
"ๆฏๅน
",
"ๆถจ่ทๅน
",
"ๆถจ่ท้ข",
"ๆขๆ็",
]
temp_df = temp_df[
[
"ๆฅๆ",
"ๅผ็",
"ๆถ็",
"ๆ้ซ",
"ๆไฝ",
"ๆถจ่ทๅน
",
"ๆถจ่ท้ข",
"ๆไบค้",
"ๆไบค้ข",
"ๆฏๅน
",
"ๆขๆ็",
]
]
temp_df["ๅผ็"] = pd.to_numeric(temp_df["ๅผ็"])
temp_df["ๆถ็"] = pd.to_numeric(temp_df["ๆถ็"])
temp_df["ๆ้ซ"] = pd.to_numeric(temp_df["ๆ้ซ"])
temp_df["ๆไฝ"] = pd.to_numeric(temp_df["ๆไฝ"])
temp_df["ๆถจ่ทๅน
"] = pd.to_numeric(temp_df["ๆถจ่ทๅน
"])
temp_df["ๆถจ่ท้ข"] = pd.to_numeric(temp_df["ๆถจ่ท้ข"])
temp_df["ๆไบค้"] = pd.to_numeric(temp_df["ๆไบค้"])
temp_df["ๆไบค้ข"] = pd.to_numeric(temp_df["ๆไบค้ข"])
temp_df["ๆฏๅน
"] = pd.t | o_numeric(temp_df["ๆฏๅน
"]) | pandas.to_numeric |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n_it','--n_iteration',required=True)
parser.add_argument('-protein','--protein',required=True)
parser.add_argument('-file_path','--file_path',required=True)
parser.add_argument('-mdd','--morgan_directory',required=True)
io_args = parser.parse_args()
n_iteration = int(io_args.n_iteration)
protein = io_args.protein
file_path = io_args.file_path
mdd=io_args.morgan_directory
import pandas as pd
import time
import numpy as np
import glob
import os
from keras.models import model_from_json
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve,roc_curve,fbeta_score, precision_score, recall_score
from shutil import copy2
#protein_name = 'CAMKK2/mini_pd'
#file_path = '../'
#iteration_done = 1
t_mol = pd.read_csv(mdd+'/Mol_ct_file.csv',header=None)[[0]].sum()[0]/1000000
hyperparameters = pd.read_csv(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/'+'hyperparameter_morgan_with_freq_v3.csv',header=None)
hyperparameters.columns = ['Model_no','Over_sampling','Batch_size','Learning_rate','N_layers','N_units','dropout',
'weight','cutoff','ROC_AUC','Pr_0_9','tot_left_0_9_mil','auc_te','pr_te','re_te','tot_left_0_9_mil_te','tot_positives']
hyperparameters.tot_left_0_9_mil = hyperparameters.tot_left_0_9_mil/1000000
hyperparameters.tot_left_0_9_mil_te = hyperparameters.tot_left_0_9_mil_te/1000000
hyperparameters['re_vl/re_pr'] = 0.9/hyperparameters.re_te
tmp = hyperparameters.groupby('cutoff')
cf_values = {}
for mini_df in tmp:
print(mini_df[0])
print(mini_df[1]['re_vl/re_pr'].mean())
print(mini_df[1]['re_vl/re_pr'].std())
cf_values[mini_df[0]] = mini_df[1]['re_vl/re_pr'].std()
#print(mini_df[1][mini_df[1].tot_left_0_9_mil_te==mini_df[1].tot_left_0_9_mil_te.min()])
print(cf_values)
model_to_use_with_cf = []
ind_pr = []
for cf in cf_values:
if cf_values[cf]<0.01:
tmp = hyperparameters[hyperparameters.cutoff==cf]
thr = 0.9
while 1==1:
if len(tmp[tmp.re_te>=thr])>=3:
tmp = tmp[tmp.re_te>=thr]
break
else:
thr = thr - 0.01
#tmp = tmp[tmp.re_te>=0.895]
#if len(tmp)
tmp = tmp.sort_values('pr_te')[::-1]
try:
model_to_use_with_cf.append([cf,tmp.Model_no.iloc[0]])
ind_pr.append([cf,tmp.pr_te.iloc[0]])
except:
pass
else:
tmp = hyperparameters[hyperparameters.cutoff==cf]
thr = 0.9
while 1==1:
if len(tmp[tmp.re_te>=thr])>=3:
tmp = tmp[tmp.re_te>=thr]
break
else:
thr = thr - 0.01
#tmp = tmp[tmp.re_te>=0.895]
tmp = tmp.sort_values('pr_te')[::-1]
try:
model_to_use_with_cf.append([cf,tmp.Model_no.iloc[:3].values])
ind_pr.append([cf,tmp.pr_te.iloc[:3].values])
except:
pass
#v_temp = []
#for i in range(len(model_to_use_with_cf)):
# cf = model_to_use_with_cf[i][0]
# tmp = hyperparameters[hyperparameters.cutoff==cf]
# t_pos = tmp.tot_positives.unique()
# if t_pos>150:
# v_temp.append(model_to_use_with_cf[i])
#model_to_use_with_cf = v_temp
print(model_to_use_with_cf)
print(ind_pr)
all_model_files = {}
for f in glob.glob(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/all_models/*'):
all_model_files[f] = 1
for f in glob.glob(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/all_models/*'):
try:
mn = int(f.split('/')[-1].split('_')[1])
except:
mn = int(f.split('/')[-1].split('_')[1].split('.')[0])
for i in range(len(model_to_use_with_cf)):
try:
if mn in model_to_use_with_cf[i][-1]:
all_model_files.pop(f)
except:
if mn==model_to_use_with_cf[i][-1]:
all_model_files.pop(f)
for f in all_model_files.keys():
os.remove(f)
def get_all_x_data(fname,y):
train_set = np.zeros([1000000,1024])
train_id = []
with open(fname,'r') as ref:
no=0
for line in ref:
tmp=line.rstrip().split(',')
train_id.append(tmp[0])
on_bit_vector = tmp[1:]
for elem in on_bit_vector:
train_set[no,int(elem)] = 1
no+=1
train_set = train_set[:no,:]
train_pd = pd.DataFrame(data=train_set)
train_pd['ZINC_ID'] = train_id
if len(y.columns)!=2:
y.reset_index(level=0,inplace=True)
else:
print('already 2 columns: ',fname)
score_col = y.columns.difference(['ZINC_ID'])[0]
train_data = pd.merge(y,train_pd,how='inner',on=['ZINC_ID'])
X_train = train_data[train_data.columns.difference(['ZINC_ID',score_col])].values
y_train = train_data[[score_col]].values
return X_train,y_train
try:
valid_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/valid_morgan_1024_updated.csv',header=None,usecols=[0])
except:
valid_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/valid_morgan_1024_updated.csv',header=None,usecols=[0],engine='python')
try:
if 'ZINC' in valid_pd.index[0]:
valid_pd = pd.DataFrame(data=valid_pd.index)
except:
pass
valid_pd.columns= ['ZINC_ID']
valid_label = pd.read_csv(file_path+'/'+protein+'/iteration_1/validation_labels.txt',sep=',',header=0)
validation_data = pd.merge(valid_label,valid_pd,how='inner',on=['ZINC_ID'])
validation_data.set_index('ZINC_ID',inplace=True)
y_valid = validation_data
try:
test_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/test_morgan_1024_updated.csv',header=None,usecols=[0])
except:
test_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/test_morgan_1024_updated.csv',header=None,usecols=[0],engine='python')
try:
if 'ZINC' in test_pd.index[0]:
test_pd = pd.DataFrame(data=test_pd.index)
except:
pass
test_pd.columns= ['ZINC_ID']
test_label = pd.read_csv(file_path+'/'+protein+'/iteration_1/testing_labels.txt',sep=',',header=0)
testing_data = | pd.merge(test_label,test_pd,how='inner',on=['ZINC_ID']) | pandas.merge |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pickle
import shutil
import tempfile
from unittest import TestCase, skipIf
from senpy.models import Results, Entry, EmotionSet, Emotion, Plugins
from senpy import plugins
from senpy.plugins.postprocessing.emotion.centroids import CentroidConversion
from senpy.gsitk_compat import GSITK_AVAILABLE
import pandas as pd
class ShelfDummyPlugin(plugins.SentimentPlugin, plugins.ShelfMixin):
'''Dummy plugin for tests.'''
name = 'Shelf'
version = 0
author = 'the senpy community'
def activate(self, *args, **kwargs):
if 'counter' not in self.sh:
self.sh['counter'] = 0
self.save()
def deactivate(self, *args, **kwargs):
self.save()
def analyse(self, *args, **kwargs):
self.sh['counter'] = self.sh['counter'] + 1
e = Entry()
e.nif__isString = self.sh['counter']
r = Results()
r.entries.append(e)
return r
class PluginsTest(TestCase):
def tearDown(self):
if os.path.exists(self.shelf_dir):
shutil.rmtree(self.shelf_dir)
if os.path.isfile(self.shelf_file):
os.remove(self.shelf_file)
def setUp(self):
self.shelf_dir = tempfile.mkdtemp()
self.shelf_file = os.path.join(self.shelf_dir, "shelf")
def test_serialize(self):
'''A plugin should be serializable and de-serializable'''
dummy = ShelfDummyPlugin()
dummy.serialize()
def test_jsonld(self):
'''A plugin should be serializable and de-serializable'''
dummy = ShelfDummyPlugin()
dummy.jsonld()
def test_shelf_file(self):
a = ShelfDummyPlugin(
info={'name': 'default_shelve_file',
'description': 'Dummy plugin for tests',
'version': 'test'})
a.activate()
assert os.path.isfile(a.shelf_file)
os.remove(a.shelf_file)
def test_plugin_filter(self):
ps = Plugins()
for i in (plugins.SentimentPlugin,
plugins.EmotionPlugin,
plugins.Analyser):
p = i(name='Plugin_{}'.format(i.__name__),
description='TEST',
version=0,
author='NOBODY')
ps.plugins.append(p)
assert len(ps.plugins) == 3
cases = [('AnalysisPlugin', 3),
('SentimentPlugin', 1),
('EmotionPlugin', 1)]
for name, num in cases:
res = list(plugins.pfilter(ps.plugins, plugin_type=name))
assert len(res) == num
def test_shelf(self):
''' A shelf is created and the value is stored '''
newfile = self.shelf_file + "new"
a = ShelfDummyPlugin(info={
'name': 'shelve',
'description': 'Shelf plugin for tests',
'version': 'test',
'shelf_file': newfile
})
assert a.sh == {}
a.activate()
assert a.sh == {'counter': 0}
assert a.shelf_file == newfile
a.sh['a'] = 'fromA'
assert a.sh['a'] == 'fromA'
a.save()
sh = pickle.load(open(newfile, 'rb'))
assert sh['a'] == 'fromA'
def test_dummy_shelf(self):
with open(self.shelf_file, 'wb') as f:
pickle.dump({'counter': 99}, f)
a = ShelfDummyPlugin(info={
'name': 'DummyShelf',
'description': 'Dummy plugin for tests',
'shelf_file': self.shelf_file,
'version': 'test'
})
a.activate()
assert a.shelf_file == self.shelf_file
res1 = a.analyse(input=1)
assert res1.entries[0].nif__isString == 100
a.deactivate()
del a
with open(self.shelf_file, 'rb') as f:
sh = pickle.load(f)
assert sh['counter'] == 100
def test_corrupt_shelf(self):
''' Reusing the values of a previous shelf '''
emptyfile = os.path.join(self.shelf_dir, "emptyfile")
invalidfile = os.path.join(self.shelf_dir, "invalid_file")
with open(emptyfile, 'w+b'), open(invalidfile, 'w+b') as inf:
inf.write(b'ohno')
files = {emptyfile: ['empty file', (EOFError, IndexError)],
invalidfile: ['invalid file', (pickle.UnpicklingError, IndexError)]}
for fn in files:
with open(fn, 'rb') as f:
msg, error = files[fn]
a = ShelfDummyPlugin(info={
'name': 'test_corrupt_shelf_{}'.format(msg),
'description': 'Dummy plugin for tests',
'version': 'test',
'shelf_file': f.name
})
assert os.path.isfile(a.shelf_file)
print('Shelf file: %s' % a.shelf_file)
with self.assertRaises(error):
a.sh['a'] = 'fromA'
a.save()
del a._sh
assert os.path.isfile(a.shelf_file)
a.force_shelf = True
a.sh['a'] = 'fromA'
a.save()
b = pickle.load(f)
assert b['a'] == 'fromA'
def test_reuse_shelf(self):
''' Reusing the values of a previous shelf '''
a = ShelfDummyPlugin(info={
'name': 'shelve',
'description': 'Dummy plugin for tests',
'version': 'test',
'shelf_file': self.shelf_file
})
a.activate()
print('Shelf file: %s' % a.shelf_file)
a.sh['a'] = 'fromA'
a.save()
b = ShelfDummyPlugin(info={
'name': 'shelve',
'description': 'Dummy plugin for tests',
'version': 'test',
'shelf_file': self.shelf_file
})
b.activate()
assert b.sh['a'] == 'fromA'
b.sh['a'] = 'fromB'
assert b.sh['a'] == 'fromB'
def test_extra_params(self):
''' Should be able to set extra parameters'''
a = ShelfDummyPlugin(info={
'name': 'shelve',
'description': 'Dummy shelf plugin for tests',
'version': 'test',
'shelf_file': self.shelf_file,
'extra_params': {
'example': {
'aliases': ['example', 'ex'],
'required': True,
'default': 'nonsense'
}
}
})
assert 'example' in a.extra_params
def test_box(self):
class MyBox(plugins.Box):
''' Vague description'''
author = 'me'
version = 0
def to_features(self, entry, **kwargs):
return entry.text.split()
def predict_one(self, features, **kwargs):
return ['SIGN' in features]
def to_entry(self, features, entry, **kwargs):
print('Features for to_entry:', features)
if features[0]:
entry.myAnnotation = 'DETECTED'
return entry
test_cases = [
{
'input': "nothing here",
'expected': {'myAnnotation': 'DETECTED'},
'should_fail': True
}, {
'input': "SIGN",
'expected': {'myAnnotation': 'DETECTED'}
}]
MyBox().test()
def test_sentimentbox(self):
class SentimentBox(plugins.SentimentBox):
''' Vague description'''
author = 'me'
version = 0
def predict_one(self, features, **kwargs):
text = ' '.join(features)
if ':)' in text:
return [1, 0, 0]
return [0, 0, 1]
test_cases = [
{
'input': 'a happy face :)',
'polarity': 'marl:Positive'
}, {
'input': "Nothing",
'polarity': 'marl:Negative'
}]
SentimentBox().test()
def test_conversion_centroids(self):
info = {
"name": "CentroidTest",
"description": "Centroid test",
"version": 0,
"centroids": {
"c1": {"V1": 0.5,
"V2": 0.5},
"c2": {"V1": -0.5,
"V2": 0.5},
"c3": {"V1": -0.5,
"V2": -0.5},
"c4": {"V1": 0.5,
"V2": -0.5}},
"aliases": {
"V1": "X-dimension",
"V2": "Y-dimension"
},
"centroids_direction": ["emoml:big6", "emoml:fsre-dimensions"]
}
c = CentroidConversion(info)
print(c.serialize())
es1 = EmotionSet()
e1 = Emotion()
e1.onyx__hasEmotionCategory = "c1"
es1.onyx__hasEmotion.append(e1)
res = c._forward_conversion(es1)
assert res["X-dimension"] == 0.5
assert res["Y-dimension"] == 0.5
print(res)
e2 = Emotion()
e2.onyx__hasEmotionCategory = "c2"
es1.onyx__hasEmotion.append(e2)
res = c._forward_conversion(es1)
assert res["X-dimension"] == 0
assert res["Y-dimension"] == 1
print(res)
e = Emotion()
e["X-dimension"] = -0.2
e["Y-dimension"] = -0.3
res = c._backwards_conversion(e)
assert res["onyx:hasEmotionCategory"] == "c3"
print(res)
e = Emotion()
e["X-dimension"] = -0.2
e["Y-dimension"] = 0.3
res = c._backwards_conversion(e)
assert res["onyx:hasEmotionCategory"] == "c2"
def _test_evaluation(self):
testdata = []
for i in range(50):
testdata.append(["good", 1])
for i in range(50):
testdata.append(["bad", -1])
dataset = | pd.DataFrame(testdata, columns=['text', 'polarity']) | pandas.DataFrame |
import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from numpy.testing import assert_allclose
from pvlib.location import Location
from pvlib import tracking
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_index_mismatch():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90,180])
with pytest.raises(ValueError):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = | pd.Series([135]) | pandas.Series |
##########################################################
# Author: <NAME>
# LinkedIn: https://www.linkedin.com/in/raghavsikaria/
# Github: https://github.com/raghavsikaria
# Last Update: 23-6-2020
# Project: Project-Rajasuyya
# Description: Contains all Data processing utilities
# exposed as static functions
# Code Sources & References:
# 1) Neural Networks in Action for Time Series Forecasting
# by <NAME> and can be found here:
# https://colab.research.google.com/drive/1PYj_6Y8W275ficMkmZdjQBOY7P0T2B3g#scrollTo=OkAmaZYuP20w
##########################################################
# Library imports
import pandas as pd
import numpy as np
from numpy import array
from sklearn.preprocessing import StandardScaler, MinMaxScaler
class DataCleaningAndProcessingUtils:
"""Contains all Data processing utilities exposed as static functions.
Checkout Colab Notebook by Kriti Mahajan posted in the Description. Much of
the code has been recycled here and exposed as re-usable functions.
"""
@staticmethod
def read_csv_data(path: str, index_column: str = None) -> 'DataFrame':
"""Reads a CSV file from the path given, converts & returns a DataFrame."""
df = | pd.read_csv(path,index_col = index_column,parse_dates=True) | pandas.read_csv |
import pandas as pd
from bayespy.network import Discrete
import bayespy.network
import numpy as np
from collections import Counter
import bayespy.data
import bayespy.jni
from bayespy.jni import jp
class _AutoInsight:
def __init__(self, network, target, logger):
self._network = network
self._logger = logger
self._target = target
self._target_state = bayespy.network.get_state(network, target.variable, target.state)
self._target = target
(self._inf_engine, _, _) = bayespy.model.InferenceEngine(network).create(retract=False)
def calculate(self, evidence=[], sort_by=['difference']):
variables = jp.java.util.Arrays.asList(
[v for v in self._network.getVariables() if v.getName() != self._target.variable])
ai = bayespy.jni.bayesServerAnalysis().AutoInsight
if len(evidence) > 0:
e = bayespy.model.Evidence(self._network, self._inf_engine)
evidence_obj = e.apply(evidence)
auto_insight_output = ai.calculate(self._target_state, variables,
bayespy.model.InferenceEngine.get_inference_factory(),
evidence_obj)
else:
auto_insight_output = ai.calculate(self._target_state, variables,
bayespy.model.InferenceEngine.get_inference_factory())
results = []
for variable in auto_insight_output.getVariableOutputs():
variable_name = variable.getVariable().getName()
if variable_name == "Cluster":
continue
for state in variable.getStateOutputs():
results.append({'variable': variable_name, 'state': state.getState().getName(),
'probability': state.getProbability().floatValue(),
'probability_given_target': state.getProbabilityGivenTarget().floatValue(),
'probability_target_given_this': np.nan if state.getProbabilityTargetGivenThis() is None else state.getProbabilityTargetGivenThis().floatValue(),
'difference': state.getDifference().floatValue(),
'lift': np.nan if state.getLift() is None else state.getLift().floatValue()})
df = pd.DataFrame(results)
return df.sort_values(by=sort_by, ascending=False).reset_index().drop('index', axis=1)
class AutoInsight:
def __init__(self, template, target, logger, comparison_models=3):
self._network_template = template
self._logger = logger
self._data_store = template.get_network_factory().get_datastore()
self._model_cache = []
self._comparison_model_count = comparison_models
self._target = target
def _create_models(self):
if len(self._model_cache) > 0:
return self._model_cache
for i in range(self._comparison_model_count):
network = self._network_template.create()
model = bayespy.model.NetworkModel(network, self._data_store, self._logger)
model.train()
self._model_cache.append(_AutoInsight(network, self._target, self._logger))
return self._model_cache
def get_most_common_tuples(self, combination_length=2, top=20):
models = self._create_models()
group = 0
combinations = pd.DataFrame()
for model in models:
insight = model.calculate()
reader = bayespy.data.DataFrameReader(insight)
while reader.read():
rows = [reader.to_dict()]
evidence = [bayespy.network.Discrete(reader.variable, reader.state)]
for i in range(combination_length-1):
sub_insight = model.calculate(evidence=evidence)
top_row = sub_insight.iloc[0]
evidence.append(bayespy.network.Discrete(top_row.variable, top_row.state))
d = top_row.to_dict()
d.update({'group': group})
rows.append(d)
r = pd.DataFrame(rows)
r['max_difference'] = r.difference.sum()
r['evidence'] = ','.join([str(n) for n in evidence])
combinations = combinations.append(r)
group += 1
return combinations.groupby(by=['evidence']).mean().sort_values(by=['max_difference'], ascending=False)\
.reset_index().drop(['index', 'group'], axis=1).head(top)
def get_descriptive_combinations(self, top=10):
models = self._create_models()
combinations = pd.DataFrame()
for i, model in enumerate(models):
rows = []
evidence = []
for j in range(10):
step = model.calculate(evidence=evidence)
row = step.iloc[0]
evidence.append(bayespy.network.Discrete(row.variable, row.state))
d = row.to_dict()
d.update({'group': i})
rows.append(d)
if row.difference < 0.05:
break
r = pd.DataFrame(rows)
r['max_difference'] = r.difference.sum()
r['evidence'] = ','.join([str(n) for n in evidence])
combinations = combinations.append(r)
return combinations.sort_values(by=['max_difference']).reset_index()
def get_exclusive_states(self, top=10):
models = self._create_models()
rows = | pd.DataFrame() | pandas.DataFrame |
"""
Module to extract, prepare, and produce feature vectors for quarterly stock data.
"""
from datetime import datetime as dt
import itertools
import re
from collections import Counter
from operator import itemgetter
import pandas as pd
import numpy as np
from py2store import lazyprop
from py2store import StrTupleDict
from py2store import groupby
from i2.deco import postprocess
from invest.misc.yf_prep import DbDf
qy_parser = StrTupleDict('{quarter}Q{year}',
{'quarter': '\d', 'year': '\d\d\d\d'},
process_info_dict={'quarter': int, 'year': int}
)
month_of_quarter = {1: 1, 2: 4, 3: 7, 4: 10}
not_w_p = re.compile('\W')
def normalize_str(string):
return not_w_p.sub('_', string.lower())
def indices_counts(ser: pd.Series):
return Counter(itertools.chain.from_iterable(x.index.values for x in ser))
def number_of_quarters_counts(ser: pd.Series):
return Counter(len(x) for x in ser)
def trans_quarter(string):
"""Transform from (not lexicographic friendly) {quarter}Q{year} to a datetime object.
>>> trans_quarter('4Q2019')
datetime.datetime(2019, 10, 1, 0, 0)
"""
quarter, year = qy_parser.str_to_tuple(string)
return dt(year=year, month=month_of_quarter[quarter], day=1)
def to_quarter_df(arr, cols=None):
ser = pd.Series({trans_quarter(x['Quarter']): x for x in arr})
df = pd.DataFrame.from_records(ser, index=ser.index)
if cols:
return df[cols]
else:
return df
def prep_quarterly_earnings(quarterly_earnings: pd.Series, cols=None):
sr = pd.Series({ticker: to_quarter_df(d, cols) for ticker, d in quarterly_earnings['records'].items()})
return sr.sort_index()
def prep_quarterly_from_df(df: pd.DataFrame):
def gen():
for ticker, row in df.sort_index(axis=1).iterrows():
row = row.dropna()
d = pd.DataFrame.from_records([x for x in row.values], index=row.index)
if 'Date' in d.columns:
del d['Date']
yield ticker, d.fillna(np.nan)
return pd.Series(dict(gen()))
def quarter_data_features(quarter_value_seq, n=2):
"""Quarterly data features. Assumes that quarter_value_seq has been sorted by time"""
s = np.array(quarter_value_seq) # copy and/or make into array
m0 = np.mean(s)
if n >= 2:
s = s / abs(m0) # normalize out m0 from s
m1 = np.mean(np.diff(s, 1))
if n == 3:
s = s / abs(m1) # normalize out m1 from s
m2 = np.mean(np.diff(s, 2))
return m0, m1, m2
elif n == 2:
return m0, m1
elif n == 1:
return (m0,)
else:
raise ValueError(f"n should be 1, 2 or 3 (was {n})")
def quarter_feature_gen(quarter_items, cols=None):
for ticker, df in quarter_items:
if cols is None:
_cols = df.columns
else:
_cols = cols
for k in _cols:
for i, v in enumerate(quarter_data_features(df[k])):
if v is not None and not np.isnan(v):
yield ticker, f"q_{normalize_str(k)}_{i}", v
# yield dict(ticker=ticker, **{f"q_{normalize_str(k)}_{i}": v})
def ticker_featname_featval_iterable_to_df(triple_iterable):
d = {k: dict(v) for k, v in groupby(triple_iterable, key=itemgetter(0), val=itemgetter(1, 2)).items()}
return | pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 16:23:53 2018
"""
#scraping CME is soooo effortless
#just simple html parse tree
#how i love Chicago
import urllib.request as u
import pandas as pd
from bs4 import BeautifulSoup as bs
import os
os.chdir('H:/')
#
def scrape(category_name,commodity_name):
#i use proxy handler cuz my uni network runs on its proxy
#and i cannot authenticate python through the proxy
#so i use empty proxy to bypass the authentication
proxy_handler = u.ProxyHandler({})
opener = u.build_opener(proxy_handler)
#cme officially forbids scraping
#so a header must be used for disguise as an internet browser
#the developers say no to scraping, it appears to be so
#but actually they turn a blind eye to us, thx
#i need different types of commodity
#so i need to format the website for each commodity
req=u.Request('http://www.cmegroup.com/trading/metals/%s/%s.html'%(
category_name,commodity_name),headers={'User-Agent': 'Mozilla/5.0'})
response=opener.open(req)
result=response.read()
soup=bs(result,'html.parser')
return soup
#
def etl(category_name,commodity_name):
try:
page=scrape(category_name,commodity_name)
print(commodity_name)
except Exception as e:
print(e)
#i need date, prior settle price and volume
#it is essential to view source of the website first
#then use beautiful soup to search specific class
p1=page.find_all('span',class_='cmeNoWrap')
p2=page.find_all('td',class_=['statusOK','statusNull','statusAlert'])
p3=page.find_all('td',class_="cmeTableRight")
a=[]
b=[]
c=[]
for i in p1:
a.append(i.text)
#somehow prior settle is hard to get
#we cannot find that specific tag
#we can search for the previous tag instead
#the find_next function of beautifulsoup allows us to get the next tag
#the previous tag of prior settle is change
for j in p2:
temp=j.find_next()
b.append(temp.text)
#the volume contains comma
for k in p3:
c.append(float(str(k).replace(',','')))
df=pd.DataFrame()
df['expiration date']=a
df['prior settle']=b
df['volume']=c
df['name']=commodity_name
#for me, i wanna highlight the front month
#The front month is the month where the majority of volume and liquidity occurs
df['front month']=df['volume']==max(df['volume'])
#
def main():
#scraping and etl
df1=etl('precious','silver')
df2=etl('precious','gold')
df3=etl('precious','palladium')
df4=etl('base','copper')
#concatenate then export
dd= | pd.concat([df1,df2,df3,df4]) | pandas.concat |
import pandas as pd
import pytest
from prereise.gather.solardata.helpers import get_plant_id_unique_location, to_reise
def test_plant_id_unique_location_type():
arg = (1, (1, 2, 3), {"a", "b", "c"})
for a in arg:
with pytest.raises(TypeError, match="plant must be a pandas.DataFrame"):
get_plant_id_unique_location(a)
def test_plant_id_unique_location_value():
arg = (
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}),
pd.DataFrame({"a": [1, 2, 3], "lon": [4, 5, 6]}),
pd.DataFrame({"lat": [1, 2, 3], "b": [4, 5, 6]}),
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).rename_axis("plant_id", axis=0),
pd.DataFrame({"lat": [1, 2], "b": [3, 4]}).rename_axis("plant_id", axis=0),
pd.DataFrame({"a": [1, 2], "lon": [3, 4]}).rename_axis("plant_id", axis=0),
)
for a in arg:
with pytest.raises(
ValueError,
match="data frame must have plant_id as index and lat and lon among columns",
):
get_plant_id_unique_location(a)
def test_plant_id_unique_location():
plant = pd.DataFrame(
{
"lat": [
46.6451,
35.0609,
45.8849,
37.7033,
37.7033,
35.0609,
46.6451,
46.6451,
35.0609,
46.6451,
37.7033,
45.8849,
35.0609,
46.6451,
],
"lon": [
-119.908,
-118.292,
-109.888,
-102.623,
-102.623,
-118.292,
-119.908,
-119.908,
-118.292,
-119.908,
-102.623,
-109.888,
-118.292,
-119.908,
],
}
).rename_axis("plant_id", axis=0)
result = get_plant_id_unique_location(plant)
expected = {
(-102.623, 37.7033): [3, 4, 10],
(-109.888, 45.8849): [2, 11],
(-118.292, 35.0609): [1, 5, 8, 12],
(-119.908, 46.6451): [0, 6, 7, 9, 13],
}
assert result.keys() == expected.keys()
for k in result:
assert result[k].values.tolist() == expected[k]
def test_to_reise_type():
arg = (1, (1, 2, 3), {"a", "b", "c"})
for a in arg:
with pytest.raises(TypeError, match="data must be a pandas.DataFrame"):
to_reise(a)
def test_to_reise_value():
arg = (
| pd.DataFrame({"a": [1, 2, 3], "Pout": [4, 5, 6]}) | pandas.DataFrame |
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
import os
import glob
import json
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
basedir = "/Volumes/Seagate Expansion Drive/Dog_Dataset/Outdoor/"
def combine_bottlenecks(geo):
starting_columns = list(np.arange(2048).astype("str"))
starting_columns.append("Label")
combined_df = pd.DataFrame(columns=starting_columns)
mybasedir = basedir + geo + "/"
os.chdir(mybasedir)
print("Combining bottlenecks...")
for petid in os.listdir("./"):
if os.path.isdir(mybasedir+petid) and (petid not in "lost_and_found"):
os.chdir(mybasedir+petid)
for csvfile in glob.glob("*.csv"):
df = | pd.read_csv(csvfile) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
# Handling Nan values
def handel_nan(DATA, Median=False):
"""[summary]
DESCRIPTION :-
1. If no. of nan > 50% in a column it will remove that column.
2. int type coumn will be filled with Mean by Default.
3. Catagorical columns will be filled by mode.
PARAMETERS :-
DATA = Dataset DataFrame
Median = It will fill numeric columns with median value.
Returns :-
DataFrame with updated Columns
"""
data = DATA
for row in data:
da = data[row]
NAN = da.isna().sum()
if(NAN != 0):
if((NAN/len(da)) >= 0.5):
data.drop([row], inplace=True, axis=1)
else:
if(da.dtype == 'O'):
data[row] = data[row].fillna(da.mode()[0])
else:
if Median != True:
data[row] = data[row].fillna(da.mean())
else:
data[row] = data[row].fillna(da.median())
return data
# Normalizing or Standardizing DataFrame
def handel_standardization(X_train, X_test = None,scale_range=(0,1)):
"""[summary]
DESCRIPTION :-
stand_data = handel_standardization( DATA )
OR
X_train ,X_test = handel_standardization (X_data , X_test)
PARAMETERS :-
X_train = Data or X_data
X_test = If you have seprate Test data you can standerdize both at the same time.
scale_range = it will scale the data between 0,1 by default
Returns:
If Input = X_train ,X_test
==> return Train and Test after Standardizing
If Input = single Dataset
==> return Dataset after Standardizing
"""
train = X_train
test = X_test
if test is not None:
data = train.copy()
Test = test.copy()
else:
data = train.copy()
Row = []
for row in data:
if(data[row].dtype != 'O'):
Row.append(row)
if(len(Row) != 0):
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=scale_range)
if test is not None:
dat = sc.fit_transform(data[Row])
Tes = sc.transform(Test[Row])
data[Row] = dat
Test[Row] = Tes
return (data, Test)
else:
dat = sc.fit_transform(data[Row])
data[Row] = dat
return data
# Handling Catagorical Variables
def handel_Catagorical(Train_X, Test_Y=None, selected=None,remo_dupli=True):
"""[summary]
DESCRIPTION :-
x_train , x_test = handel_Catagorical(X_data , x_test)
OR
x_train , x_test = handel_Catagorical(X_data , x_test, selected = [Index of selected Columns --- OR -- Names of columns ])
OR
DATA = handel_Catagorical ( X_data)
PARAMETERS :-
Train_X = Data or X_data
Test_Y = If you have seprate Test data
selected (list ) = [0,4,5] i.e index OR ['feature1','feature2'] i.e names of columns,
if None is will process all the Catagorical columns
remo_dupli = will remove duplicated columns if any
Returns :-
Updated dateset with One hot encoded Catagorical variables.
"""
DATA_X = Train_X.copy()
if (Test_Y is not None):
DATA_Y = Test_Y.copy()
data = pd.concat([DATA_X, DATA_Y], axis=0)
else:
data = DATA_X
col = DATA_X.columns
if selected is not None:
column = []
if (type(selected[0]) == int):
for index in selected:
column.append(col[index])
elif (type(selected[0]) == str):
column = selected
else:
raise TypeError('Type Error!!')
for row in data:
if selected is not None:
if row in column:
da = data[row]
else:
continue
else:
da = data[row]
if (da.dtype == 'O'):
dummy = | pd.get_dummies(da) | pandas.get_dummies |
from datetime import datetime
import warnings
import pytest
import pandas as pd
import pyodbc
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, conversion, create
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="module")
def sample():
dataframe = pd.DataFrame(
{
"_varchar": [None, "b", "c", "4", "e"],
"_tinyint": [None, 2, 3, 4, 5],
"_smallint": [256, 2, 6, 4, 5], # tinyint max is 255
"_int": [32768, 2, 3, 4, 5], # smallint max is 32,767
"_bigint": [2147483648, 2, 3, None, 5], # int max size is 2,147,483,647
"_float": [1.111111, 2, 3, 4, 5], # any decicmal places
"_time": [str(datetime.now().time())]
* 5, # string in format HH:MM:SS.ffffff
"_datetime": [datetime.now()] * 4 + [pd.NaT],
"_empty": [None] * 5,
}
)
return dataframe
def test_table_errors(sql):
table_name = "##test_table_column"
with pytest.raises(KeyError):
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, primary_key_column="Z")
def test_table_column(sql):
table_name = "##test_table_column"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "A")
assert all(schema["sql_type"] == "varchar")
assert all(schema["is_nullable"] == True)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "string")
assert all(schema["odbc_type"] == pyodbc.SQL_VARCHAR)
assert all(schema["odbc_size"] == 0)
assert all(schema["odbc_precision"] == 0)
def test_table_pk(sql):
table_name = "##test_table_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "FLOAT"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_composite_pk(sql):
table_name = "##test_table_composite_pk"
columns = {"A": "TINYINT", "B": "VARCHAR(5)", "C": "FLOAT"}
primary_key_column = ["A", "B"]
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 3
assert all(schema.index == ["A", "B", "C"])
assert all(schema["sql_type"] == ["tinyint", "varchar", "float"])
assert all(schema["is_nullable"] == [False, False, True])
assert all(schema["ss_is_identity"] == False)
assert schema["pk_seq"].equals(
pd.Series([1, 2, pd.NA], index=["A", "B", "C"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, False, True])
assert all(schema["pandas_type"] == ["UInt8", "string", "float64"])
assert all(
schema["odbc_type"]
== [pyodbc.SQL_TINYINT, pyodbc.SQL_VARCHAR, pyodbc.SQL_FLOAT]
)
assert all(schema["odbc_size"] == [1, 0, 8])
assert all(schema["odbc_precision"] == [0, 0, 53])
def test_table_pk_input_error(sql):
with pytest.raises(ValueError):
table_name = "##test_table_pk_input_error"
columns = {"A": "TINYINT", "B": "VARCHAR(100)", "C": "DECIMAL(5,2)"}
primary_key_column = "A"
not_nullable = "B"
sql.create.table(
table_name,
columns,
not_nullable=not_nullable,
primary_key_column=primary_key_column,
sql_primary_key=True,
)
def test_table_sqlpk(sql):
table_name = "##test_table_sqlpk"
columns = {"A": "VARCHAR"}
sql.create.table(table_name, columns, sql_primary_key=True)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 2
assert all(schema.index == ["_pk", "A"])
assert all(schema["sql_type"] == ["int identity", "varchar"])
assert all(schema["is_nullable"] == [False, True])
assert all(schema["ss_is_identity"] == [True, False])
assert schema["pk_seq"].equals(
pd.Series([1, pd.NA], index=["_pk", "A"], dtype="Int64")
)
assert all(schema["pk_name"].isna() == [False, True])
assert all(schema["pandas_type"] == ["Int32", "string"])
assert all(schema["odbc_type"] == [pyodbc.SQL_INTEGER, pyodbc.SQL_VARCHAR])
assert all(schema["odbc_size"] == [4, 0])
assert all(schema["odbc_precision"] == [0, 0])
def test_table_from_dataframe_simple(sql):
table_name = "##test_table_from_dataframe_simple"
dataframe = pd.DataFrame({"ColumnA": [1]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
assert len(schema) == 1
assert all(schema.index == "ColumnA")
assert all(schema["sql_type"] == "tinyint")
assert all(schema["is_nullable"] == False)
assert all(schema["ss_is_identity"] == False)
assert all(schema["pk_seq"].isna())
assert all(schema["pk_name"].isna())
assert all(schema["pandas_type"] == "UInt8")
assert all(schema["odbc_type"] == pyodbc.SQL_TINYINT)
assert all(schema["odbc_size"] == 1)
assert all(schema["odbc_precision"] == 0)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result.equals(dataframe)
def test_table_from_dataframe_datestr(sql):
table_name = "##test_table_from_dataframe_datestr"
dataframe = pd.DataFrame({"ColumnA": ["06/22/2021"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame({
'column_name': pd.Series(['ColumnA','_time_insert']),
'sql_type': pd.Series(['date','datetime2'], dtype='string'),
'is_nullable': pd.Series([False, True]),
'ss_is_identity': pd.Series([False, False]),
'pk_seq': pd.Series([None, None], dtype='Int64'),
'pk_name': pd.Series([None, None], dtype='string'),
'pandas_type': pd.Series(['datetime64[ns]', 'datetime64[ns]'], dtype='string'),
'odbc_type': pd.Series([pyodbc.SQL_TYPE_DATE, pyodbc.SQL_TYPE_TIMESTAMP], dtype='int64'),
'odbc_size': pd.Series([10, 27], dtype='int64'),
'odbc_precision': pd.Series([0, 7], dtype='int64'),
}).set_index(keys='column_name')
assert schema[expected.columns].equals(expected)
result = conversion.read_values(f'SELECT * FROM {table_name}', schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
def test_table_from_dataframe_errorpk(sql, sample):
with pytest.raises(ValueError):
table_name = "##test_table_from_dataframe_nopk"
sql.create.table_from_dataframe(table_name, sample, primary_key="ColumnName")
def test_table_from_dataframe_nopk(sql, sample):
table_name = "##test_table_from_dataframe_nopk"
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(
table_name, sample.copy(), primary_key=None
)
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema, _ = conversion.get_schema(sql.connection, table_name)
expected = pd.DataFrame(
{
"column_name": pd.Series(
[
"_varchar",
"_tinyint",
"_smallint",
"_int",
"_bigint",
"_float",
"_time",
"_datetime",
"_empty",
],
dtype="string",
),
"sql_type": pd.Series(
[
"varchar",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"time",
"datetime2",
"nvarchar",
],
dtype="string",
),
"is_nullable": pd.Series(
[True, True, False, False, True, False, False, True, True], dtype="bool"
),
"ss_is_identity": pd.Series([False] * 9, dtype="bool"),
"pk_seq": pd.Series([pd.NA] * 9, dtype="Int64"),
"pk_name": | pd.Series([pd.NA] * 9, dtype="string") | pandas.Series |
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
"ๆ่ชhttps://zhuanlan.zhihu.com/p/82737301"
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
import logging
from sklearn.model_selection import StratifiedKFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.optimizers import Adam
import pandas as pd
from sklearn.metrics import mean_absolute_error, accuracy_score, f1_score
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.callbacks import Callback
learning_rate = 5e-5
min_learning_rate = 1e-5
config_path = 'chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = 'chinese_L-12_H-768_A-12/vocab.txt'
MAX_LEN = 64
token_dict = {}
with open(dict_path, 'r', encoding='utf-8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
train= pd.read_csv('baifendian_data/train.csv')
test=pd.read_csv('baifendian_data/dev_set.csv',sep='\t')
train_achievements = train['question1'].values
train_requirements = train['question2'].values
labels = train['label'].values
def label_process(x):
if x==0:
return [1,0]
else:
return [0,1]
train['label']=train['label'].apply(label_process)
labels_cat=list(train['label'].values)
labels_cat=np.array(labels_cat)
test_achievements = test['question1'].values
test_requirements = test['question2'].values
print(train.shape,test.shape)
class data_generator:
def __init__(self, data, batch_size=16):
self.data = data
self.batch_size = batch_size
self.steps = len(self.data[0]) // self.batch_size
if len(self.data[0]) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
X1, X2, y = self.data
idxs = list(range(len(self.data[0])))
np.random.shuffle(idxs)
T, T_, Y = [], [], []
for c, i in enumerate(idxs):
achievements = X1[i]
requirements = X2[i]
t, t_ = tokenizer.encode(first=achievements, second=requirements, max_len=MAX_LEN)
T.append(t)
T_.append(t_)
Y.append(y[i])
if len(T) == self.batch_size or i == idxs[-1]:
T = np.array(T)
T_ = np.array(T_)
Y = np.array(Y)
yield [T, T_], Y
T, T_, Y = [], [], []
def apply_multiple(input_, layers):
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def get_model():
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
for l in bert_model.layers:
l.trainable = True
T1 = Input(shape=(None,))
T2 = Input(shape=(None,))
T = bert_model([T1, T2])
T = Lambda(lambda x: x[:, 0])(T)
T= Dense(30, activation='selu')(T)
T = BatchNormalization()(T)
output = Dense(2, activation='softmax')(T)
model = Model([T1, T2], output)
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(1e-5), # ็จ่ถณๅคๅฐ็ๅญฆไน ็
metrics=['accuracy']
)
model.summary()
return model
class Evaluate(Callback):
def __init__(self, val_data, val_index):
self.score = []
self.best = 0.
self.early_stopping = 0
self.val_data = val_data
self.val_index = val_index
self.predict = []
self.lr = 0
self.passed = 0
def on_batch_begin(self, batch, logs=None):
"""็ฌฌไธไธชepoch็จๆฅwarmup๏ผ็ฌฌไบไธชepochๆๅญฆไน ็้ๅฐๆไฝ
"""
if self.passed < self.params['steps']:
self.lr = (self.passed + 1.) / self.params['steps'] * learning_rate
K.set_value(self.model.optimizer.lr, self.lr)
self.passed += 1
elif self.params['steps'] <= self.passed < self.params['steps'] * 2:
self.lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)
self.lr += min_learning_rate
K.set_value(self.model.optimizer.lr, self.lr)
self.passed += 1
def on_epoch_end(self, epoch, logs=None):
score, acc, f1 = self.evaluate()
if score > self.best:
self.best = score
self.early_stopping = 0
model.save_weights('bert{}.w'.format(fold))
else:
self.early_stopping += 1
def evaluate(self):
self.predict = []
prob = []
val_x1, val_x2, val_y, val_cat = self.val_data
for i in tqdm(range(len(val_x1))):
achievements = val_x1[i]
requirements = val_x2[i]
t1, t1_ = tokenizer.encode(first=achievements, second=requirements)
T1, T1_ = np.array([t1]), np.array([t1_])
_prob = model.predict([T1, T1_])
oof_train[self.val_index[i]] = _prob[0]
self.predict.append(np.argmax(_prob, axis=1)[0]+1)
prob.append(_prob[0])
score = 1.0 / (1 + mean_absolute_error(val_y+1, self.predict))
acc = accuracy_score(val_y+1, self.predict)
f1 = f1_score(val_y+1, self.predict, average='macro')
return score, acc, f1
nfolds=10
skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=42)
def predict(data):
prob = []
val_x1, val_x2 = data
for i in tqdm(range(len(val_x1))):
achievements = val_x1[i]
requirements = val_x2[i]
t1, t1_ = tokenizer.encode(first=achievements, second=requirements)
T1, T1_ = np.array([t1]), np.array([t1_])
_prob = model.predict([T1, T1_])
prob.append(_prob[0])
return prob
oof_train = np.zeros((len(train), 2), dtype=np.float32)
oof_test = np.zeros((len(test), 2), dtype=np.float32)
for fold, (train_index, valid_index) in enumerate(skf.split(train_achievements, labels)):
x1 = train_achievements[train_index]
x2 = train_requirements[train_index]
y = labels_cat[train_index]
val_x1 = train_achievements[valid_index]
val_x2 = train_requirements[valid_index]
val_y = labels[valid_index]
val_cat = labels_cat[valid_index]
train_D = data_generator([x1, x2, y])
evaluator = Evaluate([val_x1, val_x2, val_y, val_cat], valid_index)
model = get_model()
model.fit_generator(train_D.__iter__(),
steps_per_epoch=len(train_D),
epochs=7,
callbacks=[evaluator]
)
model.load_weights('bert{}.w'.format(fold))
oof_test += predict([test_achievements, test_requirements])
K.clear_session()
oof_test /= nfolds
test=pd.DataFrame(oof_test)
test.to_csv('test_pred.csv',index=False)
test.head(),test.shape
train=pd.DataFrame(oof_train)
train.to_csv('train_pred.csv',index=False)
pred= | pd.read_csv('test_pred.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
PURPOSE:
A series of tools to aid in the design of a detonation tube.
CREATED BY:
<NAME>
Oregon State University
CIRE and Propulsion Lab
<EMAIL>
"""
import warnings
import os
from math import sqrt
import pint
import pandas as pd
import numpy as np
import sympy as sp
import cantera as ct
from . import tools
from . import thermochem
class Bolt:
@classmethod
def calculate_stress_areas(
cls,
thread_size,
thread_class,
bolt_max_tensile,
plate_max_tensile,
engagement_length,
unit_registry
):
"""
Calculates internal and external thread stress areas using formulas in
Machinery's Handbook, 26th edition.
Parameters
----------
thread_size : str
Size of threads to be evaluated, e.g. '1/4-20' or '1 1/2-6'
thread_class : str
Class of threads to be evaluated, '2' or '3'. 'A' or 'B' are
automatically appended for internal/external threads
bolt_max_tensile : pint quantity
Pint quantity of bolt (ext. thread) tensile failure stress
plate_max_tensile : pint quantity
Pint quantity of plate (int. thread) tensile failure stress
engagement_length : pint quantity
Pint quantity of total thread engagement length
unit_registry : pint unit registry
Unit registry for managing units to prevent conflicts with parent
unit registry
Returns
-------
thread : dict
Dictionary with the following key/value pairs:
'plate area': stress area of internal threads within the plate
'screw area': stress area of external threads on the screw
'minimum engagement': minimum engagement length causing screw to
fail in tension rather than shear, thus preventing the plate
from stripping.
"""
quant = unit_registry.Quantity
tools.check_pint_quantity(
bolt_max_tensile,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
plate_max_tensile,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
engagement_length,
'length',
ensure_positive=True
)
# convert to local unit registry
bolt_max_tensile = quant(
bolt_max_tensile.magnitude,
bolt_max_tensile.units.format_babel()
)
plate_max_tensile = quant(
plate_max_tensile.magnitude,
plate_max_tensile.units.format_babel()
)
engagement_length = quant(
engagement_length.magnitude,
engagement_length.units.format_babel()
)
thread = dict()
# look up thread specs for stress area calculations
thread_specs = cls._import_thread_specs() # type: pd.DataFrame
k_n_max = quant(
thread_specs['internal']
['minor diameter max']
[thread_size]
[thread_class + 'B'],
'in'
)
e_s_min = quant(
thread_specs['external']
['pitch diameter min']
[thread_size]
[thread_class + 'A'],
'in'
)
e_n_max = quant(
thread_specs['internal']
['pitch diameter max']
[thread_size]
[thread_class + 'B'],
'in'
)
d_s_min = quant(
thread_specs['external']
['major diameter min']
[thread_size]
[thread_class + 'A'],
'in'
)
tpi = quant(
float(thread_size.split('-')[-1]),
'1/in'
)
basic_diameter = quant(
thread_specs['external']
['basic diameter']
[thread_size]
[thread_class + 'A'],
'in'
)
if bolt_max_tensile < quant(100000, 'psi'):
# calculate screw tensile area using eq. 9 (p. 1482) in Fasteners
# section of Machinery's Handbook 26 (also eq. 2a on p. 1490)
screw_area_tensile = np.pi / 4 * (
basic_diameter - 0.9742785 / tpi
) ** 2
else:
# calculate screw tensile area using eq. 2b (p. 1490) in Fasteners
# section of Machinery's Handbook 26
screw_area_tensile = np.pi * (
e_s_min / 2 -
0.16238 / tpi
) ** 2
# calculate screw shear area using eq. 5 (p. 1491) in Fasteners section
# of Machinery's Handbook 26
screw_area_shear = (
np.pi * tpi * engagement_length * k_n_max *
(1. / (2 * tpi) + 0.57735 * (e_s_min - k_n_max))
)
# choose correct area
if screw_area_shear < screw_area_tensile:
warnings.warn(
'Screws fail in shear, not tension.' +
' Plate may be damaged.' +
' Consider increasing bolt engagement length',
Warning
)
thread['screw area'] = screw_area_shear
else:
thread['screw area'] = screw_area_tensile
# calculate plate shear area using eq. 6 (p. 1491) in Fasteners section
# of Machinery's Handbook 26
thread['plate area'] = (
np.pi * tpi * engagement_length * d_s_min *
(1. / (2 * tpi) + 0.57735 * (d_s_min - e_n_max))
)
# calculate minimum engagement scale factor using eq. 3 (p. 1490) in
# Fasteners section of Machinery's Handbook 26
j_factor = (
(screw_area_shear * bolt_max_tensile) /
(thread['plate area'] * plate_max_tensile)
)
# calculate minimum thread engagement (corrected for material
# differences) using eqs. 1 and 4 (pp. 1490-1491) in Fasteners section
# of Machinery's Handbook 26
thread['minimum engagement'] = (
2 * screw_area_tensile / (
k_n_max * np.pi * (
1. / 2 + 0.57735 * tpi * (e_s_min - k_n_max)
)
)
) * j_factor
return thread
@staticmethod
def _import_thread_specs():
"""
Imports thread specifications from .csv files
Returns
-------
thread_specs : dict
[internal thread specs, external thread specs]. Both sets of thread
specifications are multi-indexed with (thread size, thread class).
"""
file_directory = os.path.join(
os.path.dirname(
os.path.relpath(__file__)
),
'lookup_data'
)
file_names = [
'ANSI_inch_internal_thread.csv',
'ANSI_inch_external_thread.csv'
]
file_locations = [
os.path.relpath(
os.path.join(
file_directory,
name
)
)
for name in file_names
]
thread_specs = {
key: pd.read_csv(location, index_col=(0, 1)) for location, key in
zip(file_locations, ['internal', 'external'])
}
return thread_specs
@classmethod
def get_thread_property(
cls,
thread_property,
thread_size,
thread_class,
unit_registry
):
"""
Finds a thread property, such as minor diameter, using a dataframe from
import_thread_specs(). import_thread_specs is not directly called here
to save time by not reading from disk every time a property is
requested.
Parameters
----------
thread_property : str
Property that is desired, such as 'minor diameter'
thread_size : str
Thread size for desired property, such as '1/4-20' or '1 1/2-6'
thread_class : str
Thread class: '2B' or '3B' for internal threads, '2A' or '3A' for
external threads
unit_registry : pint unit registry
Unit registry for managing units to prevent conflicts with parent
unit registry
Returns
-------
pint.UnitRegistry().Quantity
Property requested, as a pint quantity with units of inches
"""
quant = unit_registry.Quantity
thread_specs = cls._import_thread_specs()
# determine if internal or external
if 'A' in thread_class and ('2' in thread_class or '3' in thread_class):
thread_specs = thread_specs['external']
elif 'B' in thread_class and ('2' in thread_class
or '3' in thread_class):
thread_specs = thread_specs['internal']
else:
raise ValueError('\nbad thread class')
# ensure property is in the specs dataframe
if thread_property not in thread_specs.keys():
raise KeyError('\nThread property \'' +
thread_property +
'\' not found. Available specs: ' +
"'" + "', '".join(thread_specs.keys()) + "'")
# ensure thread size is in the specs dataframe
if thread_size not in thread_specs.index:
raise KeyError('\nThread size \'' +
thread_size +
'\' not found')
# retrieve the property
return quant(thread_specs[thread_property][thread_size][thread_class],
'in')
class DDT:
@staticmethod
def calculate_spiral_diameter(
pipe_id,
blockage_ratio
):
"""
Calculates the diameter of a Shchelkin spiral corresponding to a given
blockage ratio within a pipe of given inner diameter.
Parameters
----------
pipe_id : pint quantity
Length scale representing the inner diameter of the pipe used for
the detonation tube
blockage_ratio : float
percentage (float between 0 and 1)
Returns
-------
spiral_diameter : pint quantity
Shchelkin spiral diameter inside a tube of pipe_id inner diameter
giving a blockage ratio of blockage_ratio %. Units are the same as
pipe_id.
"""
# ensure blockage ratio is a float
try:
blockage_ratio = float(blockage_ratio)
except ValueError:
raise ValueError('\nNon-numeric blockage ratio.')
# ensure blockage ratio is on 0<BR<1
if not 0 < blockage_ratio < 1:
raise ValueError('\nBlockage ratio outside of 0<BR<1')
tools.check_pint_quantity(
pipe_id,
'length',
ensure_positive=True
)
# calculate Shchelkin spiral diameter
spiral_diameter = pipe_id / 2 * (1 - sqrt(1 - blockage_ratio))
return spiral_diameter
@staticmethod
def calculate_blockage_ratio(
tube_inner_diameter,
blockage_diameter
):
"""
Calculates the blockage ratio of a Shchelkin spiral within a detonation
tube.
Parameters
----------
tube_inner_diameter : pint quantity
Length scale corresponding to the ID of the detonation tube
blockage_diameter : pint quantity
Length scale corresponding to the OD of a Shchelkin spiral
Returns
-------
blockage_ratio : float
Ratio of blocked to open area (between 0 and 1)
"""
# check dimensionality and >=0
tools.check_pint_quantity(
tube_inner_diameter,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
blockage_diameter,
'length',
ensure_positive=True
)
# make sure units cancel
blockage_diameter = blockage_diameter.to_base_units()
tube_inner_diameter = tube_inner_diameter.to_base_units()
# ensure blockage diameter < tube diameter
if tube_inner_diameter.magnitude == 0:
raise ValueError('\ntube ID cannot be 0')
elif blockage_diameter >= tube_inner_diameter:
raise ValueError('\nblockage diameter >= tube diameter')
# calculate blockage ratio
blockage_ratio = (1 - (1 - 2 * blockage_diameter.magnitude /
tube_inner_diameter.magnitude) ** 2)
return blockage_ratio
@staticmethod
def calculate_run_up(
blockage_ratio,
tube_diameter,
initial_temperature,
initial_pressure,
species_dict,
mechanism,
unit_registry,
phase_specification=''
):
"""
Calculates the runup distance needed for a detonation to develop from a
deflagration for a given blockage ratio, tube diameter, and mixture.
This is accomplished using equations collected by Ciccarelli and
Dorofeev [1] for blockage ratios <= 0.75. If the desired blockage ratio
is less than 0.3, the mixture viscosity is needed, and the
phase_specification option may be necessary depending on the mechanism.
[1] <NAME> and <NAME>, โFlame acceleration and transition to
detonation in ducts,โ Prog. Energy Combust. Sci., vol. 34, no. 4, pp.
499โ550, Aug. 2008.
Parameters
----------
blockage_ratio : float
Ratio of the cross-sectional area of the detonation tube and a
periodic blockage used to cause DDT
tube_diameter : pint quantity
Internal diameter of the detonation tube
initial_temperature : pint quantity
Mixture initial temperature
initial_pressure : pint quantity
Mixture initial pressure
species_dict : dict
Dictionary containing the species in the mixture as keys, with total
moles or mole fractions as values
mechanism : str
Mechanism file name for Cantera
unit_registry : pint unit registry
Unit registry for managing units to prevent conflicts with parent
unit registry
phase_specification : str
(Optional) Phase specification within the mechanism file used to
evaluate thermophysical properties. If Gri30.cti is used with no
phase specification, viscosity calculations will fail, resulting in
an error for all blockage ratios less than 0.3.
Returns
-------
runup_distance : pint quantity
Predicted DDT distance, with the same units as the tube diameter
"""
if blockage_ratio <= 0 or blockage_ratio > 0.75:
raise ValueError('\nBlockage ratio outside of correlation range')
tools.check_pint_quantity(
tube_diameter,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
initial_temperature,
'temperature',
ensure_positive=True
)
tools.check_pint_quantity(
initial_pressure,
'pressure',
ensure_positive=True
)
# handle units
quant = unit_registry.Quantity
tube_diameter = quant(
tube_diameter.magnitude,
tube_diameter.units.format_babel()
)
# calculate laminar flamespeed
laminar_fs = thermochem.calculate_laminar_flamespeed(
initial_temperature,
initial_pressure,
species_dict,
mechanism
)
laminar_fs = quant(
laminar_fs.magnitude, laminar_fs.units.format_babel()
)
# calculate density ratio across the deflagration assuming adiabatic
# flame
density = np.zeros(2)
working_gas = ct.Solution(mechanism, phase_specification)
working_gas.TPX = [
initial_temperature.to('K').magnitude,
initial_pressure.to('Pa').magnitude,
species_dict
]
density[0] = working_gas.density
working_gas.equilibrate('HP')
density[1] = 1 / working_gas.density
density_ratio = np.prod(density)
# find sound speed in products at adiabatic flame temperature
sound_speed = thermochem.get_eq_sound_speed(
quant(working_gas.T, 'K'),
quant(working_gas.P, 'Pa'),
species_dict,
mechanism
)
sound_speed = quant(
sound_speed.magnitude,
sound_speed.units.format_babel()
)
def eq4_1():
"""
Calculate runup distance for blockage ratios <= 0.1 using equation
4.1 from <NAME> and <NAME>, โFlame acceleration and
transition to detonation in ducts,โ Prog. Energy Combust. Sci.,
vol. 34, no. 4, pp. 499โ550, Aug. 2008.
"""
# define constants
kappa = 0.4
kk = 5.5
cc = 0.2
mm = -0.18
eta = 2.1
# calculate laminar flame thickness, delta
working_gas.TPX = [
initial_temperature.to('K').magnitude,
initial_pressure.to('Pa').magnitude,
species_dict
]
rho = quant(working_gas.density_mass, 'kg/m^3')
mu = quant(working_gas.viscosity, 'Pa*s')
nu = mu / rho
delta = (nu / laminar_fs).to_base_units()
# calculate gamma
gamma = (
sound_speed /
(eta * (density_ratio - 1) ** 2 * laminar_fs) *
(delta / tube_diameter) ** (1. / 3)
) ** (1 / (2 * mm + 7. / 3))
# calculate runup distance
d_over_h = (2. / (1 - np.sqrt(1 - blockage_ratio)))
runup = (
gamma / cc *
(1 / kappa * np.log(gamma * d_over_h) + kk) *
tube_diameter
)
return runup.to(tube_diameter.units.format_babel())
def eq4_4():
"""
Calculate runup for blockage ratios between 0.3 and 0.75 using
equation 4.4 in <NAME> and <NAME>, โFlame acceleration
and transition to detonation in ducts,โ Prog. Energy Combust. Sci.,
vol. 34, no. 4, pp. 499โ550, Aug. 2008.
"""
# define constants
aa = 2.
bb = 1.5
# calculate left and right hand sides of eq 4.4
lhs = (
2 * 10 * laminar_fs * (density_ratio - 1) /
(sound_speed * tube_diameter)
)
rhs = (
aa * (1 - blockage_ratio) /
(1 + bb * blockage_ratio)
)
runup = rhs / lhs
return runup.to(tube_diameter.units.format_babel())
# use appropriate equation to calculate runup distance
if 0.3 <= blockage_ratio <= 0.75:
runup_distance = eq4_4()
elif 0.1 >= blockage_ratio:
runup_distance = eq4_1()
else:
interp_distances = np.array([
eq4_1().magnitude,
eq4_4().magnitude
])
runup_distance = np.interp(
blockage_ratio,
np.array([0.1, 0.3]),
interp_distances
)
runup_distance = quant(
runup_distance,
tube_diameter.units.format_babel()
)
return runup_distance
class Window:
@classmethod
def safety_factor(
cls,
length,
width,
thickness,
pressure,
rupture_modulus
):
"""
This function calculates the safety factor of a clamped rectangular
window given window dimensions, design pressure, and material rupture
modulus
Parameters
----------
length : pint quantity with length units
Window unsupported (viewing) length
width : pint quantity with length units
Window unsupported (viewing) width
thickness : pint quantity with length units
Window thickness
pressure : pint quantity with pressure units
Design pressure differential across window at which factor of
safety is to be calculated
rupture_modulus : pint quantity with pressure units
Rupture modulus of desired window material.
Returns
-------
safety_factor : float
Window factor of safety
"""
tools.check_pint_quantity(
length,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
width,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
thickness,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
pressure,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
rupture_modulus,
'pressure',
ensure_positive=True
)
safety_factor = cls.solver(
length=length.to_base_units().magnitude,
width=width.to_base_units().magnitude,
thickness=thickness.to_base_units().magnitude,
pressure=pressure.to_base_units().magnitude,
rupture_modulus=rupture_modulus.to_base_units().magnitude
)
return safety_factor
@classmethod
def minimum_thickness(
cls,
length,
width,
safety_factor,
pressure,
rupture_modulus,
unit_registry
):
"""
This function calculates the thickness of a clamped rectangular window
which gives the desired safety factor.
Parameters
----------
length : pint quantity with length units
Window unsupported (viewing) length
width : pint quantity with length units
Window unsupported (viewing) width
safety_factor : float
Safety factor
pressure : pint quantity with pressure units
Design pressure differential across window at which factor of
safety is to be calculated
rupture_modulus : pint quantity with pressure units
Rupture modulus of desired window material.
unit_registry : pint unit registry
Keeps output consistent with parent registry, avoiding conflicts
Returns
-------
thickness : pint quantity
Window thickness
"""
quant = unit_registry.Quantity
tools.check_pint_quantity(
length,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
width,
'length',
ensure_positive=True
)
tools.check_pint_quantity(
pressure,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
rupture_modulus,
'pressure',
ensure_positive=True
)
# Ensure safety factor is numeric and > 1
try:
if safety_factor < 1:
raise ValueError('\nWindow safety factor < 1')
except TypeError:
raise TypeError('\nNon-numeric window safety factor')
thickness = cls.solver(
length=length.to_base_units().magnitude,
width=width.to_base_units().magnitude,
safety_factor=safety_factor,
pressure=pressure.to_base_units().magnitude,
rupture_modulus=rupture_modulus.to_base_units().magnitude
)
return quant(
thickness,
width.to_base_units().units).to(width.units.format_babel())
@staticmethod
def solver(
**kwargs
):
"""
This function uses sympy to solve for a missing window measurement.
Inputs are five keyword arguments, with the following possible values:
length
width
thickness
pressure
rupture_modulus
safety_factor
All of these arguments should be floats, and dimensions should be
consistent (handling should be done in other functions, such as
calculate_window_sf().
Equation from:
https://www.crystran.co.uk/userfiles/files/
design-of-pressure-windows.pdf
Parameters
----------
kwargs
Returns
-------
missing value as a float, or NaN if the result is imaginary
"""
# Ensure that 5 keyword arguments were given
if kwargs.__len__() != 5:
raise ValueError('\nIncorrect number of arguments sent to solver')
# Ensure all keyword arguments are correct
good_arguments = [
'length',
'width',
'thickness',
'pressure',
'rupture_modulus',
'safety_factor'
]
bad_args = []
for arg in kwargs:
if arg not in good_arguments:
bad_args.append(arg)
if len(bad_args) > 0:
error_string = '\nBad keyword argument:'
for arg in bad_args:
error_string += '\n' + arg
raise ValueError(error_string)
# Define equation to be solved
k_factor = 0.75 # clamped window factor
argument_symbols = {
'length': 'var_l',
'width': 'var_w',
'thickness': 'var_t',
'pressure': 'var_p',
'rupture_modulus': 'var_m',
'safety_factor': 'var_sf'
}
var_l = sp.Symbol('var_l')
var_w = sp.Symbol('var_w')
var_t = sp.Symbol('var_t')
var_p = sp.Symbol('var_p')
var_m = sp.Symbol('var_m')
var_sf = sp.Symbol('var_sf')
expr = (
var_l *
var_w *
sp.sqrt(
(
var_p *
k_factor *
var_sf /
(
2 *
var_m *
(
var_l ** 2 +
var_w ** 2
)
)
)
) - var_t
)
# Solve equation
for arg in kwargs:
expr = expr.subs(argument_symbols[arg], kwargs[arg])
solution = sp.solve(expr)[0]
if solution.is_real:
return float(solution)
else:
warnings.warn('Window inputs resulted in imaginary solution.')
return np.NaN
@staticmethod
def calculate_bolt_sfs(
max_pressure,
window_area,
num_bolts,
thread_size,
thread_class,
bolt_max_tensile,
plate_max_tensile,
engagement_length,
unit_registry
):
"""
Calculates bolt and plate safety factors for viewing window bolts
Parameters
----------
max_pressure : pint quantity
Pint quantity of tube maximum pressure (absolute)
window_area : pint quantity
Pint quantity of window area exposed to high pressure environment
num_bolts : int
Number of bolts used to secure each viewing window
thread_size : str
Size of threads to be evaluated, e.g. '1/4-20' or '1 1/2-6'
thread_class : str
Class of threads to be evaluated, '2' or '3'. 'A' or 'B' are
automatically appended for internal/external threads
bolt_max_tensile : pint quantity
Pint quantity of bolt (ext. thread) tensile failure stress
plate_max_tensile : pint quantity
Pint quantity of plate (int. thread) tensile failure stress
engagement_length : pint quantity
Pint quantity of total thread engagement length
unit_registry : pint unit registry
Keeps output consistent with parent registry, avoiding conflicts
Returns
-------
safety_factors : dict
Dictionary with keys of 'bolt' and 'plate', giving factors of safety
for window bolts and the plate that they are screwed into.
"""
quant = unit_registry.Quantity
tools.check_pint_quantity(
max_pressure,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
window_area,
'area',
ensure_positive=True
)
tools.check_pint_quantity(
bolt_max_tensile,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
plate_max_tensile,
'pressure',
ensure_positive=True
)
tools.check_pint_quantity(
engagement_length,
'length',
ensure_positive=True
)
# convert all quantities to local unit registry
max_pressure = quant(
max_pressure.magnitude,
max_pressure.units.format_babel()
)
window_area = quant(
window_area.magnitude,
window_area.units.format_babel()
)
bolt_max_tensile = quant(
bolt_max_tensile.magnitude,
bolt_max_tensile.units.format_babel()
)
plate_max_tensile = quant(
plate_max_tensile.magnitude,
plate_max_tensile.units.format_babel()
)
engagement_length = quant(
engagement_length.magnitude,
engagement_length.units.format_babel()
)
# get total force per bolt
window_force = (
(max_pressure - quant(1, 'atm')) * window_area / num_bolts
)
# get stress areas
thread = Bolt.calculate_stress_areas(
thread_size,
thread_class,
bolt_max_tensile,
plate_max_tensile,
engagement_length,
unit_registry
)
screw_area = thread['screw area']
screw_area = quant(
screw_area.magnitude,
screw_area.units.format_babel()
)
plate_area = thread['plate area']
plate_area = quant(
plate_area.magnitude,
plate_area.units.format_babel()
)
# calculate safety factors
safety_factors = dict()
safety_factors['bolt'] = (
bolt_max_tensile / (window_force / screw_area)
).to_base_units()
safety_factors['plate'] = (
plate_max_tensile / (window_force / plate_area)
).to_base_units()
return safety_factors
class Tube:
_all_quantities = {
'material',
'schedule',
'nominal_size',
'welded',
'initial_pressure',
'initial_temperature',
'max_pressure',
'max_stress',
'dynamic_load_factor',
'dimensions',
'fuel',
'oxidizer',
'diluent',
'equivalence_ratio',
'dilution_fraction',
'dilution_mode',
'mechanism',
'safety_factor',
'flange_class',
'cj_speed',
'dimensions',
'verbose',
'show_warnings'
}
def __init__(
self,
*,
material='316L',
schedule='80',
nominal_size='6',
welded=False,
max_stress=None,
initial_temperature=(20, 'degC'),
max_pressure=None,
mechanism='gri30.cti',
fuel='CH4',
oxidizer='O2:1, N2:3.76',
diluent='N2',
equivalence_ratio=1,
dilution_fraction=0,
dilution_mode='mole',
safety_factor=4,
verbose=False,
show_warnings=True,
autocalc_initial=False,
use_multiprocessing=False
):
"""
Parameters
----------
"""
self._initializing = True
# decide whether to allow automatic calculations
self._calculate_stress = max_stress is not None
self._calculate_max_pressure = max_pressure is not None
self._autocalc_initial = bool(autocalc_initial)
# build local unit registry
self._units = self._UnitSystem()
# initiate hidden dict of properties
self._properties = dict()
# define all non-input quantities as None
inputs = locals()
for item in self._all_quantities:
self._properties[item] = None
# decide on use of multiprocessing (requires __main__)
self._use_multiprocessing = bool(use_multiprocessing)
# determine whether or not the tube is welded
self._properties['welded'] = bool(welded)
# check materials list to make sure it's good
# define and collect tube materials and groups
self._collect_tube_materials()
self._get_material_groups()
self._check_materials_list()
self._collect_material_limits()
self._get_flange_limits_from_csv()
self.material = material
self._pipe_schedules_import()
self._mechanisms = tools.find_mechanisms()
# determine whether or not to report progress or issues to the user
self.verbose = bool(verbose)
self._show_warnings = bool(show_warnings)
# initialize dimensions object and set nominal size and schedule
self._properties['dimensions'] = self._Dimensions()
self.nominal_size = nominal_size
self.schedule = schedule
# set initial temperature to 20 C if not defined
# if initial_temperature is None:
# self._properties[
# 'initial_temperature'
# ] = self._units.quant(20, 'degC')
# else:
self.initial_temperature = initial_temperature
# set max stress
if max_stress is not None:
self._properties['max_stress'] = max_stress
# keep the user's input
self._calculate_stress = False
else:
# allow max stress to be recalculated
self._calculate_stress = True
# set safety factor
self.safety_factor = safety_factor
# set max pressure
if max_pressure is not None:
self.max_pressure = max_pressure
# allow max pressure to be recalculated
self._calculate_max_pressure = False
else:
# keep the user's input
self._calculate_max_pressure = True
# set mechanism and reactant mixture
if mechanism is not None:
self.mechanism = mechanism
self.fuel = fuel
self.oxidizer = oxidizer
self.diluent = diluent
self.equivalence_ratio = equivalence_ratio
self.dilution_mode = dilution_mode
self.dilution_fraction = dilution_fraction
self._initializing = False
# start auto-calculation chain
if self._calculate_stress:
self.calculate_max_stress()
elif self._calculate_max_pressure:
self.calculate_max_pressure()
class _UnitSystem:
def __init__(
self
):
self.ureg = pint.UnitRegistry()
self.quant = self.ureg.Quantity
class _Dimensions:
def __init__(self):
self.inner_diameter = None
self.outer_diameter = None
self.wall_thickness = None
def _pipe_schedules_import(self):
# collect pipe schedules
file_directory = os.path.join(
os.path.dirname(
os.path.relpath(__file__)
),
'lookup_data'
)
file_name = 'pipe_schedules.csv'
file_location = os.path.relpath(
os.path.join(
file_directory,
file_name
)
)
self._schedules = | pd.read_csv(file_location, index_col=0) | pandas.read_csv |
import unittest
import pathlib
import os
import pandas as pd
from enda.contracts import Contracts
from enda.timeseries import TimeSeries
class TestContracts(unittest.TestCase):
EXAMPLE_A_DIR = os.path.join(pathlib.Path(__file__).parent.absolute(), "example_a")
CONTRACTS_PATH = os.path.join(EXAMPLE_A_DIR, "contracts.csv")
def test_read_contracts_from_file(self):
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
self.assertEqual((7, 12), contracts.shape)
def test_check_contracts_dates(self):
contracts = Contracts.read_contracts_from_file(
TestContracts.CONTRACTS_PATH,
date_start_col="date_start",
date_end_exclusive_col="date_end_exclusive",
date_format="%Y-%m-%d"
)
# check that it fails if the given date_start_col is not there
with self.assertRaises(ValueError):
Contracts.check_contracts_dates(
contracts,
date_start_col="dummy",
date_end_exclusive_col="date_end_exclusive"
)
# check that it fails if one contract ends before it starts
c = contracts.copy(deep=True)
# set a wrong date_end_exclusive for the first contract
c.loc[0, "date_end_exclusive"] = pd.to_datetime("2020-09-16")
with self.assertRaises(ValueError):
Contracts.check_contracts_dates(
c,
date_start_col="dummy",
date_end_exclusive_col="date_end_exclusive"
)
@staticmethod
def get_simple_portfolio_by_day():
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
contracts["contracts_count"] = 1 # add a variable to count the number of contracts for each row
# count the running total, each day, of some columns
portfolio_by_day = Contracts.compute_portfolio_by_day(
contracts,
columns_to_sum=["contracts_count", "subscribed_power_kva", "estimated_annual_consumption_kwh"],
date_start_col="date_start",
date_end_exclusive_col="date_end_exclusive"
)
return portfolio_by_day
def test_compute_portfolio_by_day_1(self):
"""" test with a single group """
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
# print(portfolio_by_day)
self.assertEqual((11, 3), portfolio_by_day.shape)
self.assertEqual(4, portfolio_by_day.loc["2020-09-26", "contracts_count"])
self.assertEqual(30, portfolio_by_day.loc["2020-09-26", "subscribed_power_kva"])
self.assertEqual(5, portfolio_by_day["contracts_count"].max())
self.assertEqual(48, portfolio_by_day["subscribed_power_kva"].max())
def test_compute_portfolio_by_day_2(self):
"""" test with 2 groups , and a single measure to sum"""
contracts = Contracts.read_contracts_from_file(TestContracts.CONTRACTS_PATH)
contracts_sm = contracts[contracts["smart_metered"]]
pf_sm = Contracts.compute_portfolio_by_day(contracts_sm, columns_to_sum=["subscribed_power_kva"])
contracts_slp = contracts[~contracts["smart_metered"]]
pf_slp = Contracts.compute_portfolio_by_day(contracts_slp, columns_to_sum=["subscribed_power_kva"])
# print(pf_sm, pf_slp)
self.assertEqual(pf_sm.shape, (5, 1))
self.assertEqual(pf_slp.shape, (11, 1))
self.assertEqual(18, pf_sm.loc["2020-09-20", "subscribed_power_kva"])
self.assertEqual(27, pf_slp.loc["2020-09-26", "subscribed_power_kva"])
def test_get_portfolio_between_dates_1(self):
""" test with a portfolio by day """
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
self.assertEqual(pd.to_datetime("2020-09-16"), portfolio_by_day.index.min())
self.assertEqual(pd.to_datetime("2020-09-26"), portfolio_by_day.index.max())
pf = Contracts.get_portfolio_between_dates(
portfolio_by_day,
start_datetime=pd.to_datetime("2020-09-10"),
end_datetime_exclusive=pd.to_datetime("2020-09-30")
)
# print(pf["contracts_count"])
self.assertEqual(pd.to_datetime("2020-09-10"), pf.index.min())
self.assertEqual(pd.to_datetime("2020-09-29"), pf.index.max())
self.assertEqual(0, pf.loc["2020-09-12", "contracts_count"])
self.assertEqual(4, pf.loc["2020-09-28", "contracts_count"])
def test_get_portfolio_between_dates_2(self):
""" test with a portfolio by 15min step """
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
tz_str = 'Europe/Berlin'
pf = TimeSeries.interpolate_daily_to_sub_daily_data(
portfolio_by_day,
freq='15min',
tz=tz_str
)
# print(pf)
self.assertEqual(pd.to_datetime("2020-09-16 00:00:00+02:00"), pf.index.min())
self.assertEqual(pd.to_datetime("2020-09-26 23:45:00+02:00"), pf.index.max())
self.assertIsInstance(pf.index, pd.DatetimeIndex)
self.assertEqual(tz_str, str(pf.index[0].tzinfo))
pf2 = Contracts.get_portfolio_between_dates(
pf,
start_datetime=pd.to_datetime("2020-09-10 00:00:00+02:00").tz_convert(tz_str),
end_datetime_exclusive=pd.to_datetime("2020-09-30 00:00:00+02:00").tz_convert(tz_str)
)
# print(pf2)
self.assertEqual(pd.to_datetime("2020-09-10 00:00:00+02:00"), pf2.index.min())
self.assertEqual(pd.to_datetime("2020-09-29 23:45:00+02:00"), pf2.index.max())
self.assertEqual(0, pf2.loc["2020-09-12 10:30:00+02:00", "contracts_count"])
self.assertEqual(4, pf2.loc["2020-09-27 05:15:00+02:00", "contracts_count"])
def test_forecast_portfolio_linear_1(self):
""" Test on a portfolio_by_day"""
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
# print(portfolio_by_day['subscribed_power_kva'])
# linear forecast using the 11 days gives an increasing trend
forecast_by_day_a = Contracts.forecast_portfolio_linear(
portfolio_by_day,
start_forecast_date=pd.to_datetime("2020-09-27"),
end_forecast_date_exclusive=pd.to_datetime("2020-09-30"),
freq='D'
)
# print(forecast_by_day_a['subscribed_power_kva'])
self.assertEqual((3, 3), forecast_by_day_a.shape)
self.assertEqual(pd.to_datetime("2020-09-29"), forecast_by_day_a.index.max())
self.assertGreaterEqual(forecast_by_day_a.loc["2020-09-27", "subscribed_power_kva"], 40)
self.assertGreaterEqual(forecast_by_day_a.loc["2020-09-29", "subscribed_power_kva"], 40)
# linear forecast using only the last 7 days gives a decreasing trend
forecast_by_day_b = Contracts.forecast_portfolio_linear(
portfolio_by_day[portfolio_by_day.index >= '2020-09-20'],
start_forecast_date=pd.to_datetime("2020-09-27"),
end_forecast_date_exclusive=pd.to_datetime("2020-10-02"),
freq='D'
)
# print(forecast_by_day_b['subscribed_power_kva'])
self.assertEqual((5, 3), forecast_by_day_b.shape)
self.assertEqual(pd.to_datetime("2020-10-01"), forecast_by_day_b.index.max())
self.assertLessEqual(forecast_by_day_b.loc["2020-09-27", "subscribed_power_kva"], 40)
self.assertLessEqual(forecast_by_day_b.loc["2020-09-29", "subscribed_power_kva"], 40)
def test_forecast_portfolio_linear_2(self):
"""Test on a portfolio at freq=7min"""
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
portfolio_by_20min = TimeSeries.interpolate_daily_to_sub_daily_data(portfolio_by_day,
freq='20min', tz='Europe/Paris')
# print(portfolio_by_20min)
# linear forecast_by_10min, give it a portfolio_by_20min to train
forecast_by_10min = Contracts.forecast_portfolio_linear(
portfolio_by_20min,
start_forecast_date=pd.to_datetime("2020-09-27 00:00:00+02:00").tz_convert("Europe/Paris"),
end_forecast_date_exclusive=pd.to_datetime("2020-09-30 00:00:00+02:00").tz_convert("Europe/Paris"),
freq='10min',
tzinfo='Europe/Paris'
)
# print(forecast_by_10min)
self.assertEqual((432, 3), forecast_by_10min.shape)
self.assertEqual("Europe/Paris", str(forecast_by_10min.index.tzinfo))
self.assertGreaterEqual(forecast_by_10min.loc["2020-09-27 00:00:00+02:00", "subscribed_power_kva"], 40)
self.assertGreaterEqual(forecast_by_10min.loc["2020-09-29 00:00:00+02:00", "subscribed_power_kva"], 40)
def test_forecast_portfolio_holt_1(self):
""" Test on a portfolio_by_day"""
portfolio_by_day = TestContracts.get_simple_portfolio_by_day()
# print(portfolio_by_day)
forecast_by_day = Contracts.forecast_portfolio_holt(
portfolio_by_day,
start_forecast_date= | pd.to_datetime("2020-09-27") | pandas.to_datetime |
import csv
import pandas as pd
import threading
from helpers.movie_helper import get_one_movie_resource_pt, get_one_movie_resource_en
def merge_links_movies():
# First we merge links and movies to have access to external TMDB API
links = pd.read_csv("../movie_data/links.csv", dtype=str)
movies = pd.read_csv("../movie_data/movies.csv", dtype=str)
result = pd.merge(links, movies, how="inner")
# There are 107 missing tmdbIds and we need to drop those movies
na = result[result['tmdbId'].isna()]
na[['movieId']].to_csv("../movie_data/remove_by_movieId.csv", index=False)
result = result.dropna()
# We are dropping 5061 movies with genre '(no genres listed)'
no_genres_index = result[result['genres'] == '(no genres listed)'].index
no_genre = result[result['genres'] == '(no genres listed)']
not_found = pd.read_csv("../movie_data/movies_not_found.csv", dtype=str)
removed = pd.concat([no_genre[['tmdbId']], not_found], ignore_index=True)
removed.to_csv("../movie_data/remove_by_tmdbId.csv", index=False)
result.drop(no_genres_index, inplace=True)
result.to_csv("../movie_data/dataset.csv", index=False)
def check_missing_movies():
complete = pd.read_csv("../movie_data/movie_details_complete.csv", dtype=str, lineterminator='\n')[['tmdbId']]
dataset = pd.read_csv("../movie_data/movie_details_1.csv", dtype=str)[['tmdbId']]
# not_found = pd.read_csv("../movie_data/movies_not_found.csv", dtype=str)
missing1 = pd.concat([dataset, complete]).drop_duplicates(keep=False)
# missing2 = pd.concat([not_found, missing1]).drop_duplicates(keep=False)
missing1.to_csv("../movie_data/missing_on_count.csv", index=False)
def merge_detail_files():
files = []
for i in range(1, 59):
file = pd.read_csv(f"../movie_details/movie_details_{i}.csv", dtype=str)
files.append(file)
missing = | pd.read_csv(f"../movie_details/movie_details_missing.csv", dtype=str) | pandas.read_csv |
import scipy as sp
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
import beatmap as bt
__all__ = [
"plot_isotherm_data",
"plot_ssa_heatmap",
"plot_err_heatmap",
"plot_bet",
"plot_isotherm_data",
"plot_isotherm_combo",
"plot_bet_combo",
]
figure_title_size = 18
legend_label_size = 14
legend_title_size = 16
axis_label_size = 16
axis_title_size = 16
def plot_isotherm_data(isotherm_data):
r"""Plot BET experimental isotherm data"""
source = pd.DataFrame(
{"P/Po": isotherm_data.iso_df.relp, "n (mol/g)": isotherm_data.iso_df.n}
)
temp = (
alt.Chart(source)
.mark_point(filled=True)
.encode(
y=alt.Y("n (mol/g)", axis=alt.Axis(format="~e", tickCount=len(source) / 4)),
x=alt.X("P/Po", axis=alt.Axis(format=".1")),
tooltip=["n (mol/g)", "P/Po"],
)
.configure_mark(opacity=0.7)
.configure_axis(
labelFontSize=axis_label_size, titleFontSize=axis_title_size, grid=True
)
.configure_point(size=100)
.properties(title="Experimental isotherm data", height=500, width=500)
.configure_title(fontSize=figure_title_size)
.interactive()
)
st.altair_chart(temp, use_container_width=True)
def plot_ssa_heatmap(bet_results, mask_results):
r"""Plot SSA heatmap"""
x, y = np.meshgrid(bet_results.iso_df.relp, bet_results.iso_df.relp)
temp = np.round(bet_results.ssa.copy(), 2)
temp[mask_results.mask] = 0
dmin = np.amin(temp[~mask_results.mask])
dmax = np.amax(temp[~mask_results.mask])
source = pd.DataFrame(
{
"Start relative pressure": x.ravel(),
"End relative pressure": y.ravel(),
"SSA": temp.ravel(),
}
)
hmap = (
alt.Chart(source)
.mark_rect(stroke="gray", strokeWidth=0.5)
.encode(
x=alt.X(
"Start relative pressure:O",
sort=alt.EncodingSortField(
"Start relative pressure", order="ascending",
),
axis=alt.Axis(
tickMinStep=2, tickCount=10, labelSeparation=5, format=",.2r"
),
),
y=alt.Y(
"End relative pressure:O",
sort=alt.EncodingSortField("End relative pressure", order="descending"),
axis=alt.Axis(
tickMinStep=2, tickCount=10, labelSeparation=5, format=",.2r"
),
),
color=alt.Color(
"SSA:Q", scale=alt.Scale(domain=[dmin, dmax], scheme="Greens")
),
tooltip=["SSA", "Start relative pressure", "End relative pressure"],
)
.configure_view(strokeWidth=0)
.configure_scale(bandPaddingInner=0.15)
.configure_axis(
labelFontSize=axis_label_size,
titleFontSize=axis_title_size,
domainColor="white",
)
.properties(title="Specific surface area [m^2/g]", height=600, width=670)
.configure_title(fontSize=figure_title_size)
.configure_legend(
padding=10,
strokeColor="white",
cornerRadius=10,
labelFontSize=legend_label_size,
titleFontSize=legend_title_size,
gradientLength=250,
tickCount=5,
offset=40,
)
.interactive()
)
st.altair_chart(hmap, use_container_width=True)
def plot_err_heatmap(bet_results, mask_results):
r"""Plot Error heatmap"""
x, y = np.meshgrid(bet_results.iso_df.relp, bet_results.iso_df.relp)
temp = np.round(bet_results.err.copy(), 2)
temp[mask_results.mask] = 0
dmin = np.amin(temp[~mask_results.mask])
dmax = np.amax(temp[~mask_results.mask])
source = pd.DataFrame(
{
"Start relative pressure": x.ravel(),
"End relative pressure": y.ravel(),
"Error": temp.ravel(),
}
)
hmap = (
alt.Chart(source)
.mark_rect(stroke="gray", strokeWidth=0.5)
.encode(
x=alt.X(
"Start relative pressure:O",
sort=alt.EncodingSortField(
"Start relative pressure", order="ascending",
),
axis=alt.Axis(
tickMinStep=2, tickCount=10, labelSeparation=5, format=",.2r"
),
),
y=alt.Y(
"End relative pressure:O",
sort=alt.EncodingSortField("End relative pressure", order="descending"),
axis=alt.Axis(
tickMinStep=2, tickCount=10, labelSeparation=5, format=",.2r"
),
),
color=alt.Color(
"Error:Q", scale=alt.Scale(domain=[dmin, dmax], scheme="Greys")
),
tooltip=["Error", "Start relative pressure", "End relative pressure"],
)
.configure_view(strokeWidth=0)
.configure_scale(bandPaddingInner=0.15)
.configure_axis(
labelFontSize=axis_label_size,
titleFontSize=axis_title_size,
domainColor="white",
)
.properties(title="Error", height=600, width=670,)
.configure_title(fontSize=figure_title_size)
.configure_legend(
padding=10,
strokeColor="white",
cornerRadius=10,
labelFontSize=legend_label_size,
titleFontSize=legend_title_size,
gradientLength=250,
tickCount=5,
offset=40,
)
.interactive()
)
st.altair_chart(hmap, use_container_width=True)
def plot_bet(bet_results, mask_results, ssa_answer):
r""""""
mask = mask_results.mask
df = bet_results.iso_df
ssa = np.ma.array(bet_results.ssa, mask=mask)
index = bt.utils.index_of_value(ssa, ssa_answer)
start = int(index[1])
stop = int(index[0])
slope, intercept, r_val, p_value, std_err = sp.stats.linregress(
df.relp[start : stop + 1], df.bet[start : stop + 1]
)
liney = np.zeros(2)
liney[0] = slope * (df.relp[start] - 0.01) + intercept
liney[1] = slope * (df.relp[stop] + 0.01) + intercept
linex = np.zeros(2)
linex[0] = df.relp[start] - 0.01
linex[1] = df.relp[stop] + 0.01
linreg_dict = {
" ": ["Slope", "Intercept", "r"],
"Trendline": [slope, intercept, r_val],
}
linreg_table = pd.DataFrame(data=linreg_dict)
line_source = pd.DataFrame({"P/Po": linex, "1/(n(P/Po-1))": liney})
line = (
alt.Chart(line_source)
.mark_line()
.encode(
y=alt.Y("1/(n(P/Po-1))", axis=alt.Axis(grid=False)),
x=alt.X("P/Po", axis=alt.Axis(format=".2", grid=False)),
)
.properties(title="BET plot", height=500, width=500)
)
line = (
alt.layer(line)
.configure_axis(
labelFontSize=axis_label_size, titleFontSize=axis_title_size, grid=True
)
.configure_title(fontSize=figure_title_size)
.configure_point(size=100)
)
data_source = pd.DataFrame(
{
"P/Po": df.relp[start : stop + 1],
"1/(n(P/Po-1))": np.round(df.bet[start : stop + 1], 2),
" ": len(df.bet[start : stop + 1])
* [
"Min. Error \
Experimental Data"
],
}
)
data = (
alt.Chart(data_source)
.mark_point(filled=True)
.encode(
y=alt.Y("1/(n(P/Po-1))", axis=alt.Axis(grid=False)),
x=alt.X("P/Po", axis=alt.Axis(format=".2", grid=False)),
tooltip=["1/(n(P/Po-1))", "P/Po"],
)
.interactive()
)
st.altair_chart(line + data, use_container_width=True)
return linreg_table
def plot_isotherm_combo(bet_results, mask_results, ssa_answer):
r"""Plot BET experimental isotherm data"""
mask = mask_results.mask
df = bet_results.iso_df
nm = np.ma.array(bet_results.nm, mask=mask)
c = np.ma.array(bet_results.c, mask=mask)
ssa = np.ma.array(bet_results.ssa, mask=mask)
index = bt.utils.index_of_value(ssa, ssa_answer)
start = int(index[0])
stop = int(index[1])
c_value = c[start, stop]
nnm = nm[start, stop]
ppo = np.arange(0, 0.9001, 0.001)
synth = 1 / (1 - ppo) - 1 / (1 + (c_value - 1) * ppo)
expnnm = df.n / nnm
expnnm_min_used = expnnm[stop : start + 1]
ppo_expnnm_min_used = df.relp[stop : start + 1]
model_source = pd.DataFrame(
{"P/Po": ppo, "n/nm": synth, " ": len(synth) * ["Model Isotherm"]}
)
model = (
alt.Chart(model_source)
.mark_line()
.encode(
y=alt.Y("n/nm", axis=alt.Axis(grid=False)),
x=alt.X("P/Po", axis=alt.Axis(format=".2", grid=False)),
color=" ",
)
.properties(title="Experimental data and model isotherm", height=480, width=622)
)
experimental_source = pd.DataFrame(
{
"P/Po": bet_results.iso_df.relp,
"n/nm": np.round(expnnm, 2),
" ": len(expnnm) * ["Experimental Data"],
}
)
experimental = (
alt.Chart(experimental_source)
.mark_point()
.encode(
y=alt.Y("n/nm", axis=alt.Axis(grid=False)),
x=alt.X("P/Po", axis=alt.Axis(format=".2", grid=False)),
opacity=" ",
tooltip=["n/nm", "P/Po"],
)
.interactive()
)
used_source = pd.DataFrame(
{
"P/Po": ppo_expnnm_min_used,
"n/nm": expnnm_min_used,
" ": len(expnnm_min_used) * ["Min. Error Experimental Data"],
}
)
experimental_used = (
alt.Chart(used_source)
.mark_point(filled=True)
.encode(
y=alt.Y("n/nm", axis=alt.Axis(grid=False)),
x=alt.X("P/Po", axis=alt.Axis(format=".2", grid=False)),
shape=" ",
)
)
chart = (
alt.layer(model, experimental, experimental_used)
.configure_axis(
labelFontSize=axis_label_size, titleFontSize=axis_title_size, grid=True
)
.configure_title(fontSize=figure_title_size)
.configure_legend(labelFontSize=legend_label_size)
)
st.altair_chart(chart)
def plot_bet_combo(bet_results, mask_results):
r""""""
mask = mask_results.mask
df = bet_results.iso_df
err = np.ma.array(bet_results.err, mask=mask)
err_max, err_max_idx, err_min, err_min_idx = bt.utils.max_min(err)
min_start = int(err_min_idx[1])
min_stop = int(err_min_idx[0])
max_start = int(err_max_idx[1])
max_stop = int(err_max_idx[0])
slope, intercept, r_val, p_value, std_err = sp.stats.linregress(
df.relp[min_start : min_stop + 1], df.bet[min_start : min_stop + 1]
)
min_liney = np.zeros(2)
min_liney[0] = slope * (df.relp[min_start] - 0.01) + intercept
min_liney[1] = slope * (df.relp[min_stop] + 0.01) + intercept
min_linex = np.zeros(2)
min_linex[0] = df.relp[min_start] - 0.01
min_linex[1] = df.relp[min_stop] + 0.01
slope_max, intercept_max, r_val_max, p_value_max, std_err_max = sp.stats.linregress(
df.relp[max_start : max_stop + 1], df.bet[max_start : max_stop + 1]
)
max_liney = np.zeros(2)
max_liney[0] = slope_max * (df.relp[max_start] - 0.01) + intercept_max
max_liney[1] = slope_max * (df.relp[max_stop] + 0.01) + intercept_max
max_linex = np.zeros(2)
max_linex[0] = df.relp[max_start] - 0.01
max_linex[1] = df.relp[max_stop] + 0.01
linreg_dict = {
" ": ["Slope", "Intercept", "r"],
"Min. Error Trendline": [slope, intercept, r_val],
"Max. Error Trendline": [slope_max, intercept_max, r_val_max],
}
linreg_table = | pd.DataFrame(data=linreg_dict) | pandas.DataFrame |
__author__ = "<NAME>"
__copyright__ = "BMW Group"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from tsa import Logger
import sys
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import argparse
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from statsmodels.tsa.seasonal import seasonal_decompose
from copy import copy, deepcopy
from scipy import stats
class UVariateTimeSeriesClass(object):
"""
Uni-variate time series class
Attributes:
_ts_df_cols - internal column names for dataframe that will be input to model
ts_df - time series data frame
freq - frequency of time series, possibilities ['S', 'min', 'H', 'D', 'W', 'M']
p_train - float value defining which part of data is to be used as training data. Note, value of 1.0 would mean
all data will be used as training data,
hence no test data will be generated.
timeformat - time format if time series data needs to be brought into datetime
#
_mode - defines the mode as 'test' or 'forecast'
_train_dt - training data
_test_dt - test data
model_fit - fitted model
fittedvalues - computed fitted values
residuals - residuals
rmse - RMSE on test set (test data and the forecast on test data)
upper_whisker_res - upper whisker for residuals
lower_conf_int - upper confidence interval
upper_conf_int - lower confidence interval
forecast - computed forcatsed values
residuals_forecast - residuals between forecasted and real values. Note, this variable exist only if test data
existed
Methods:
ts_transform() - transforms time series using log10 or box-cox
ts_resample() - resamples time series at the chosen frequency freq
_plot_residuals() - residual plots helper function
ts_test() - evaluates fitted model on the test data, if this one has been generated
ts_forecast() - forecasts time series and plots the results
_plot_forecast() - helper function for plotting forecasted time-series
ts_decompose() - decomposes time series in seasonal, trend and resduals and plots the results
plot_decompose() - plots the results of ts_decompose()
Helper methods:
_prepare_fit() - prepares ts_fit of child class. Supposed to be called by a child class
_residuals() - helper function for calculating residuals. Supposed to be called by a child class
_check_ts_test() - checks for test. Supposed to be called by a child class
_check_ts_forecast() - checks for forecast. Supposed to be called by a child class
"""
def __init__(self, ts_df, time_format="%Y-%m-%d %H:%M:%S", freq='D', p_train=1.0, **kwds):
"""
Initializes the object UVariateTimeSeriesForecaster
"""
self._ts_df_cols = ['ds', 'y']
self.ts_df = ts_df
self.time_format = time_format
self.freq = freq
self.p_train = p_train
self.transform = None
self._boxcox_lmbda = None
self._mode = ''
self._train_dt = None
self._test_dt = None
self.model_fit = None
self.fittedvalues = None
self.residuals = None
self.rmse = None
self.upper_whisker_res = None
self.lower_conf_int = None
self.upper_conf_int = None
self.forecast = None
self.residuals_forecast = None
self.seasonal = None
self.trend = None
self.baseline = None
self._uvts_cls_logger = Logger('uvts_cls')
# Assertion Tests
try:
assert self.freq in ['S', 'min', 'H', 'D', 'W', 'M']
except AssertionError:
self._uvts_cls_logger.warning("freq should be in ['S', 'min', 'H', 'D', W', 'M']. "
"Assuming daily frequency!")
self.freq = 'D'
try:
self.p_train = float(self.p_train)
assert self.p_train > 0
except AssertionError:
self._uvts_cls_logger.error("p_train defines part of data on which you would train your model."
"This value cannot be less than or equal to zero!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
except ValueError:
self._uvts_cls_logger.error("p_train must be convertible to float type!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
else:
if int(self.p_train) < 1:
self._mode = 'test'
else:
self._mode = 'forecast'
try:
assert | pd.DataFrame(self.ts_df) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.