code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from __future__ import annotations
import time
from typing import Any
import numpy as np
import numpy.typing as npt
import polars as pl
import pywt
from scipy import signal
from amplo.automl.feature_processing._base import (
PERFECT_SCORE,
BaseFeatureExtractor,
assert_double_index,
)
from amplo.automl.feature_processing.score_watcher import ScoreWatcher
from amplo.base.exceptions import NotFittedError
from amplo.utils.util import check_dtypes
__all__ = ["WaveletExtractor"]
class WaveletExtractor(BaseFeatureExtractor):
"""This class extracts wavelets, which carry frequency information.
Contrary to FFT, wavelets provide a trade-off between temporal and frequency info.
This class only works for multi-indexed classification data. This would also be
applicable to regression problems, as the data dimension is unchanged.
Parameters
----------
target : str | None, optional
Target column that must be present in data, by default None
mode : str | None, optional
Model mode: {"classification", "regression"}, by default "classification"
wavelets : list[str] | None, optional
List of wavelets to choose from.
If None, defaults to ["cmor1.5-1.0", "gaus4", "gaus7", "cgau2", "cgau6", "mexh"].
Each string must be a valid wavelet name (see notes), by default None
strategy : str, optional
Fitting strategy for feature extraction, by default "smart"
If "random", iterates on randomly shuffled feature-wavelet combinations and
performs pooling on a random subset of `self.pooling` until end of iterator or
timeout is reached.
If "smart", similar to "random" but (1) skips unpromising features or wavelets
and (2) uses promising poolings only.
timeout : int, optional
Timeout in seconds for fitting, by default 1800
verbose : int, optional
Verbisity for logger, by default 0
Notes
-----
Valid ``wavelet`` parameters can be found via:
>>> import pywt
>>> pywt.wavelist()
"""
def __init__(
self,
target: str | None = None,
mode: str | None = "classification",
wavelets: list[str] | None = None,
strategy: str = "smart",
timeout: int = 1800,
verbose: int = 0,
) -> None:
super().__init__(target=target, mode=mode, verbose=verbose)
# Assert classification or notset
if self.mode and self.mode != "classification":
raise NotImplementedError("Only mode 'classification' supported.")
# Check inputs and set defaults
check_dtypes(
("wavelets", wavelets, (type(None), list)),
("strategy", strategy, str),
("timeout", timeout, int),
)
if wavelets is None:
wavelets = ["cmor1.5-1.0", "gaus4", "gaus7", "cgau2", "cgau6", "mexh"]
else:
check_dtypes(("wavelets__item", item, str) for item in wavelets)
if strategy not in ("smart", "random", "exhaustive"):
raise ValueError("Strategy should be 'smart' or 'random'.")
if timeout <= 0:
raise ValueError(f"Timeout must be strictly positive but got: {timeout}")
# Set attributes
self.wavelets = wavelets
self.strategy = strategy
self.timeout = timeout
self.peak_freqs_: dict[str, npt.NDArray[Any]] = {}
self.start_time: float | None = None
self.col_watch: ScoreWatcher | None = None
self.wav_watch: ScoreWatcher | None = None
def fit(self, data: pl.DataFrame, index_cols: list[str]): # type: ignore[override]
"""
Fit the wavelet extractor on the data.
Notes
-----
It's a trade-off between speed and memory to decide whether we want to directly
transform or fit first.
When fitting first, the features can be directly overwritten.
When transforming directly, we don't have to run the wavelet transform twice.
The wavelet transform is rather expensive.
"""
self.fit_transform(data, index_cols)
return self
def fit_transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
self.logger.info("Fitting wavelet extractor.")
data, index_cols, _ = assert_double_index(data, index_cols)
# Select data
x = data.drop([self.target, *index_cols])
y = data[self.target]
# Set baseline
self.initialize_baseline(x, y)
assert self._baseline_score is not None
if self._baseline_score > PERFECT_SCORE:
self.logger.info("Features are good, we're skipping feature aggregation.")
self.is_fitted_ = True
self.skipped_ = True
return data
# Set score watchers
if self.strategy == "smart":
self.col_watch = ScoreWatcher(x.columns)
self.wav_watch = ScoreWatcher(self.wavelets)
# Initialize
self.set_peak_freqs(x)
features: list[pl.DataFrame] = []
self.start_time = time.time()
for col, wav in self.get_wavelet_combinations(x):
if self.should_skip_col_wav(col, wav):
continue
if time.time() - self.start_time > self.timeout:
self.logger.info("Timeout reached, skipping rest.")
break
self.logger.debug(f"Fitting: {wav}, {col}")
# Extract wavelets
wav_features = self.extract_wavelets(x, wav, col)
wav_scores: dict[str, float] = {}
for c in wav_features.columns:
wav_scores[c] = self.calc_feature_score(wav_features[c], y)
# Add score
if self.strategy == "smart" and self.col_watch and self.wav_watch:
self.col_watch.update(col, sum(wav_scores.values()), len(wav_scores))
self.wav_watch.update(wav, sum(wav_scores.values()), len(wav_scores))
# Check if good enough and add
selected_cols = self.select_scores(wav_scores)
features.append(wav_features.select(selected_cols))
self.add_features(selected_cols)
self.logger.debug(
f"Accepted {len(selected_cols)} / {len(wav_scores)} wavelet features "
f"for {col.ljust(100)} (baseline: {self._baseline_score} / score: "
f"{max(wav_scores)})"
)
self.is_fitted_ = True
self.logger.info(f"Accepted {len(features)} wavelet-transformed features.")
return pl.concat([data[index_cols], *features, y.to_frame()], how="horizontal")
def transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
if not self.is_fitted_:
raise NotFittedError
if self.skipped_:
return data
data, index_cols, _ = assert_double_index(data, index_cols)
# Get columns and wavelet info
x_out = []
for f in self.features_:
col, _, wav, scale = f.split("__")
x_out.append(self.extract_wavelet(data, wav, col, scale=float(scale)))
if self.target in data:
x_out.append(data[self.target])
return pl.concat([data[index_cols], pl.DataFrame(x_out)], how="horizontal")
def extract_wavelets(
self,
data: pl.DataFrame,
wavelet: str,
column: str,
scales: list[float] | None = None,
) -> pl.DataFrame:
"""Calculates a wavelet.
Parameters
----------
data : pl.DataFrame
wav : str
col : str
"""
if scales is None:
# Use the fact: scale = s2f_const / frequency
fs = 1.0
s2f_const = pywt.scale2frequency(wavelet, scale=1) * fs
scales = np.round(s2f_const / self.peak_freqs_[column], 2).tolist()
assert isinstance(scales, list)
# Transform and return
coeffs, _ = pywt.cwt(data[column].to_numpy(), scales=scales, wavelet=wavelet)
columns = [f"{column}__wav__{wavelet}__{scale}" for scale in scales]
return pl.from_numpy(coeffs.real, columns=columns, orient="col")
def extract_wavelet(
self, data: pl.DataFrame, wavelet: str, column: str, scale: int | float
) -> pl.Series:
"""Extracts a single wavelet"""
df = self.extract_wavelets(data, wavelet, column, [scale])
return df[df.columns[0]]
def should_skip_col_wav(self, col: str, wav: str) -> bool:
"""Checks whether current iteration of column / function should be skipped.
Parameters
----------
col : str
func : str
"""
# Check score watchers
if self.strategy == "smart":
if self.col_watch is None or self.wav_watch is None:
raise ValueError("Watchers are not set.")
if self.col_watch.should_skip(col) or self.wav_watch.should_skip(wav):
self.logger.debug(f"Scorewatcher skipped: {wav}, {col}")
return True
return False
def get_wavelet_combinations(self, data: pl.DataFrame) -> list[tuple[str, str]]:
"""Returns all column - wavelet combinations.
parameters
----------
data : pl.DataFrame
"""
rng = np.random.default_rng(236868)
col_wav_iterator: list[tuple[str, str]] = [
(col, wav)
for col in data.columns
for wav in self.wavelets
if self.peak_freqs_[col].size > 0
]
if self.strategy in ("random", "smart"):
rng.shuffle(col_wav_iterator)
return col_wav_iterator
def set_peak_freqs(self, data: pl.DataFrame, fs: float = 1.0) -> None:
"""Calculates the frequencies where the PSD has the highest magnitude.
parameters
----------
data : pl.DataFrame
fs : float
Sampling Frequency
"""
self.peak_freqs_ = {}
for col in data.columns:
freqs, pxx = signal.welch(x=data[col], fs=fs)
if max(pxx) < 1e-3:
self.peak_freqs_[col] = np.array([])
continue
peak_idx, _ = signal.find_peaks(np.log(pxx), prominence=0.3, distance=10)
self.peak_freqs_[col] = freqs[peak_idx]
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/wavelet_extractor.py
|
wavelet_extractor.py
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import numpy as np
import numpy.typing as npt
import polars as pl
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from typing_extensions import Self
from amplo.base import BaseTransformer, LoggingMixin
from amplo.utils.util import check_dtypes, unique_ordered_list
__all__ = [
"assert_double_index",
"check_data",
"BaseFeatureExtractor",
"PERFECT_SCORE",
]
PERFECT_SCORE = -1e-3
def assert_double_index(
data: pl.DataFrame, index_cols: list[str], allow_single: bool = False
) -> tuple[pl.DataFrame, list[str], bool]:
"""
Checks whether provided data has a valid double-index.
Parameters
----------
data : pl.DataFrame
Data to be checked.
index_cols : list[str]
Index column names. Only ['index'] and ['log', 'index'] are accepted.
allow_single : bool, optional
Add an index level if True and data is single-indexed, by default False
Returns
-------
data : pl.DataFrame
Checked data. May have an additional column due to index insertion.
index_cols : list[str]
Index column names. May have additional element due to index insertion.
was_double_index : bool
Evaluates to True when the input data was double-indexed already.
"""
# Init helpers
LOG, INDEX = "log", "index"
was_double_index = True
# Check index size and act if necessary
if index_cols == [INDEX]:
was_double_index = False
if not allow_single:
raise ValueError(
"Data must be double-indexed. Got a single-indexed instead."
)
# Add log column, raise if exists already
if LOG in data.columns:
raise RuntimeError(f"New index column '{LOG}' already exists.")
index_cols = [LOG, INDEX] # assure that new index is first level
# NOTE: 'pl.lit(0)' creates a new, '0'-filled column
data = data.with_column(pl.lit(0).cast(pl.Int8).alias(LOG))
elif index_cols != [LOG, INDEX]:
if allow_single:
raise ValueError(
f"Data must be single- or double-indexed by ['{INDEX}'] or "
f"['{LOG}', '{INDEX}'], respectively. Got {index_cols} instead."
)
raise ValueError(
f"Data must be double-indexed by ['{LOG}', '{INDEX}']. "
f"Got {index_cols} instead."
)
# Assert that indices exist
if not set(index_cols).issubset(data.columns):
raise ValueError("Index columns are not present in data.")
return data, index_cols, was_double_index
def check_data(data: pl.DataFrame, allow_double_underscore: bool = False) -> None:
"""
Checks validatity of data.
Parameters
----------
data : pl.DataFrame
Raises
------
ValueError
"""
check_dtypes(("data", data, pl.DataFrame))
# Various checks
if any("__" in str(col) for col in data.columns) and not allow_double_underscore:
raise ValueError("Column names cannot contain '__' (double underscores).")
if data.fill_nan(None).null_count().max(axis=1).max() > 0: # type: ignore[operator]
raise ValueError("Data contains NaN.")
if not all(
data[col].dtype
in (
pl.Float32,
pl.Float64,
pl.Int8,
pl.Int16,
pl.Int32,
pl.Int64,
pl.UInt8,
pl.UInt16,
pl.UInt32,
pl.UInt64,
)
for col in data.columns
):
raise ValueError("Data contains non-numeric data.")
if data.max(axis=1).max() > 1e12 or data.min(axis=1).min() < -1e12: # type: ignore[operator]
raise ValueError("Data contains extreme values.")
class BaseFeatureExtractor(BaseTransformer, LoggingMixin):
"""
Base class for feature extractors.
Fitted attributes:
Extracted feature names are stored in "features_".
Parameters
----------
target : str | None, optional
Target column that must be present in data, by default None
mode : str | None, optional
Model mode: {"classification", "regression"}, by default None
verbose : int, optional
Verbisity for logger, by default 0
"""
def __init__(self, target: str | None = None, mode: str | None = None, verbose=0):
BaseTransformer.__init__(self)
LoggingMixin.__init__(self, verbose=verbose)
self.target = target or ""
self.mode = mode or ""
self.features_: list[str] = []
self._validation_model = self.get_validation_model()
self._baseline_score: float = -np.inf
self.skipped_: bool = False
self.is_fitted_ = False
@abstractmethod
def fit(self, data: pl.DataFrame, index_cols: list[str]) -> Self: # type: ignore[override]
...
@abstractmethod
def transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
...
@abstractmethod
def fit_transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
...
def set_params(self, **params):
super().set_params(**params)
self._validation_model = self.get_validation_model()
return self
def set_features(self, features: list[str] | str) -> None:
"""
(Re-)set the features_ attribute.
Parameters
----------
features : typing.Iterable of str
"""
# Check input
if isinstance(features, str):
features = [features]
for x in features:
check_dtypes(("feature_item", x, str))
# Apply
self.features_ = sorted(features)
def add_features(self, features: list[str] | str | pl.DataFrame) -> None:
"""
Add items to the features_ attribute.
Parameters
----------
features : typing.Iterable of str
"""
# Check input
if isinstance(features, str):
self.features_.append(features)
elif isinstance(features, list):
self.features_.extend(features)
elif isinstance(features, pl.DataFrame):
self.features_.extend(features.columns)
else:
raise NotImplementedError
self.features_ = unique_ordered_list(self.features_)
def get_validation_model(self) -> DecisionTreeClassifier | DecisionTreeRegressor:
"""
Get the validation model for feature scoring.
"""
if self.mode == "classification":
return DecisionTreeClassifier(
max_depth=3,
class_weight="balanced",
random_state=19483,
)
elif self.mode == "regression":
self.logger.warning(
"There are known scoring issues for the DecisionTreeRegressor, as it is"
" inherently bad at extrapolation."
)
return DecisionTreeRegressor(
max_depth=3,
random_state=19483,
)
else:
raise AttributeError(f"Invalid mode: '{self.mode}'")
def initialize_baseline(self, x: pl.DataFrame, y: pl.Series):
"""
Initializes the baseline score of the given features.
Parameters
----------
x : pl.DataFrame
Feature data.
y : pl.Series
Target data.
"""
# Calculate feature score for each column, keep the max
self.logger.debug("Calculating baseline score for each column.")
baseline = float("-inf")
for col in x.columns:
col_score = self.calc_feature_score(x[col], y)
baseline = max(baseline, col_score)
self._baseline_score = baseline
if self._baseline_score > -1e-3:
self.logger.info(
"Baseline score large enough to skip feature extraction: "
f"{self._baseline_score}"
)
self.logger.debug(f"Initialized the baseline score to {self._baseline_score}")
def calc_feature_score(self, feature: pl.Series, y: pl.Series) -> float:
"""
Analyses and scores a feature.
Parameters
----------
feature : pl.Series
Feature to be analysed.
y : pl.Series
Target data (for scoring).
Returns
-------
score : float
Feature score. In case of multiclass, a score per class.
"""
# (Re-)fit validation model.
# Note that we do not make a train-test split. In this case, it makes sense as
# we only fit a shallow tree (max_depth=3). Because of that the model cannot
# really overfit.
if self.mode == "classification":
if len(y.unique()) > 2:
self.logger.warning("We're not scoring features per class.")
return np.mean(
cross_val_score(
self._validation_model,
feature.to_numpy().reshape((-1, 1)),
y.to_numpy().reshape((-1, 1)),
scoring="neg_log_loss",
cv=2,
)
)
elif self.mode == "regression":
return np.mean(
cross_val_score(
self._validation_model,
feature.to_numpy().reshape((-1, 1)),
y.to_numpy().reshape((-1, 1)),
scoring="neg_mean_squared_error",
cv=2,
)
)
raise AttributeError("Invalid mode.")
def update_baseline(self, scores: npt.NDArray[Any] | float) -> None:
"""
Update the baseline scores.
Parameters
----------
scores : npt.NDArray[Any] | float
Scores where each column contains the scores for the given feature.
"""
if self._baseline_score is None:
raise ValueError("Baseline not yet set.")
if isinstance(scores, float):
self._baseline_score = max(self._baseline_score, scores)
else:
self._baseline_score = np.max(self._baseline_score, np.max(scores))
def accept_feature(self, scores: npt.NDArray[Any] | float) -> bool:
"""
Decides whether to accept a new feature.
Parameters
----------
scores : array of float
Scores for checking against baseline threshold.
Returns
-------
bool
Whether to accept the feature.
"""
if self._baseline_score is None:
self.logger.warning("No baseline score is set. Output will be false")
# If score is within 1% of baseline, accept.
# NOTE: these scores are negative (neg_log_loss & neg_mean_square_error)
if isinstance(scores, float):
return scores >= self.weight_scheduler * self._baseline_score
return any(scores >= self.weight_scheduler * self._baseline_score)
@property
def weight_scheduler(self) -> float:
"""
We want to be lenient with adding features in the beginning, and stricter
in the end to avoid adding too many features.
"""
CUTOFF = 50
# If scores are negative
if self._baseline_score < 0:
if len(self.features_) >= CUTOFF:
return 0.98
return 2 - np.log(len(self.features_) + 1) / np.log(CUTOFF + 1)
# And if scores are positive
if len(self.features_) >= CUTOFF:
return 1.02
return np.log(len(self.features_) + 1) / np.log(CUTOFF + 1)
def select_scores(
self, scores: dict[str, float], update_baseline=True
) -> list[str]:
"""
Scores and selects each feature column.
Parameters
----------
scores : dict[str, float]
Scores to be selected.
update_baseline : bool
Whether to update the baseline scores.
Returns
-------
list[str]
Scores for accepted features.
"""
check_dtypes(("scores", scores, dict))
if len(scores) == 0:
return []
accepted = []
for key, value in scores.items():
if self.accept_feature(value):
accepted.append(key)
if update_baseline:
self._baseline_score = max(scores.values())
return accepted
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/_base.py
|
_base.py
|
from __future__ import annotations
from typing import Callable
import polars as pl
from polars import internals as pli
__all__ = ["POOL_FUNCTIONS", "pl_pool"]
def root_mean_square(column: str) -> pli.Expr:
return pl.col(column).pow(2).mean().pow(0.5)
def sum_values(column: str) -> pli.Expr:
return pl.col(column).sum()
def abs_energy(column: str) -> pli.Expr:
return pl.col(column).dot(pl.col(column))
def abs_max(column: str) -> pli.Expr:
return pl.col(column).abs().max()
def n_mean_crossings(column: str) -> pli.Expr:
"""
Calculates the number of crossings of x on mean.
A crossing is defined as two sequential values where the first value is lower than
mean and the next is greater, or vice-versa.
"""
return ((pl.col(column) - pl.col(column).mean()).sign().diff() != 0).sum()
def abs_sum_of_changes(column: str) -> pli.Expr:
return pl.col(column).diff().abs().sum()
def mean_of_changes(column: str) -> pli.Expr:
return pl.col(column).diff().mean()
def abs_mean_of_changes(column: str) -> pli.Expr:
return pl.col(column).diff().abs().mean()
def cid_ce(column: str) -> pli.Expr:
"""Calculates an estimate for a time series complexity."""
parsed = ((pl.col(column) - pl.col(column).mean()) / pl.col(column).std()).diff()
return parsed.dot(parsed).sqrt()
def linear_trend(column: str) -> pli.Expr:
"""OLS coeff"""
ind = (pl.col("index") + 1).alias(column)
return (ind.max() * ind.dot(pl.col(column)) - pl.col(column).sum() * ind.sum()) / (
ind.max() * ind.pow(2).sum() - ind.sum().pow(2)
)
def linear_trend_bias(column: str) -> pli.Expr:
"""OLS bias"""
return pl.col(column).mean() - linear_trend(column) * (pl.col("index") + 1).mean()
def linear_trend_error(column: str) -> pli.Expr:
"""Errors of OLS fit"""
error = (
pl.col(column)
- linear_trend(column) * (pl.col("index") + 1)
- linear_trend_bias(column)
)
return error.pow(2).sum()
def peak_loc(column: str, n: int) -> pli.Expr:
"""Returns the value of peak n
The concatenation is necessary to handle missing peaks.
This results in a value of -1
"""
parsed = pl.col(column)
peaks = parsed.diff().sign().diff() == -2
return pl.arg_where(peaks.cumsum() == n).first().fill_null(0).apply(lambda x: x - 1)
def peak_val(column: str, n: int) -> pli.Expr:
"""Returns the location of peak n"""
parsed = pl.col(column)
return parsed.filter(pl.col("index") == peak_loc(column, n)).first().fill_null(-1.0)
# ----------------------------------------------------------------------
# Globals
POOL_FUNCTIONS: dict[str, Callable[..., pli.Expr]] = {
# --- Basics ---
"min": pl.min,
"max": pl.max,
"mean": pl.mean,
"std": pl.std,
"median": pl.median,
"variance": pl.var,
"kurtosis": pl.Expr.kurtosis,
"skew": pl.Expr.skew,
"root_mean_square": root_mean_square,
"sum_values": sum_values,
# --- Characteristics ---
"entropy": pl.Expr.entropy,
"abs_energy": abs_energy,
"abs_max": abs_max,
"linear_trend": linear_trend,
"linear_trend_error": linear_trend_error,
"n_mean_crossings": n_mean_crossings,
# --- Difference ---
"abs_sum_of_changes": abs_sum_of_changes,
"mean_of_changes": mean_of_changes,
"abs_mean_of_changes": abs_mean_of_changes,
"cid_ce": cid_ce,
"peak_1_loc": lambda x: peak_loc(x, 1),
"peak_2_loc": lambda x: peak_loc(x, 2),
"peak_3_loc": lambda x: peak_loc(x, 3),
"peak_1_val": lambda x: peak_val(x, 1),
"peak_2_val": lambda x: peak_val(x, 2),
"peak_3_val": lambda x: peak_val(x, 3),
}
_EXTENDED_POOL_FUNCTIONS: dict[str, Callable[..., pli.Expr]] = {
"first": pl.first,
**POOL_FUNCTIONS,
}
def pl_pool(
df: pl.DataFrame,
column_name: str,
window_size: int,
func_str: str,
) -> pl.DataFrame:
"""
Pools series data with given aggregation functions.
Parameters
----------
df : pl.DataFrame
Dataframe to convert, containing log, index, column.
col : str
Column to be pooled.
window_size : int
Window size for pooling.
func_str : str
Name of the pooling function to be used.
Returns
-------
pl.DataFrame
Pooled data where each column name consists of the original series data name and
its pooling function name (keys of `aggregation` parameter).
When for example the series data name is "series" and one `aggregation` key is
named "min", the resulting column is named "series__pool=min".
"""
# Set defaults
LOG, INDEX = "log", "index"
# Input check
if column_name not in df.columns:
raise ValueError(f"Column '{column_name}' is missing.")
if LOG not in df.columns or INDEX not in df.columns:
raise ValueError(f"Index columns ('{LOG}' and/or '{INDEX}') are missing.")
# Set pooling function and polars expression
func = _EXTENDED_POOL_FUNCTIONS[func_str]
alias = f"{column_name}__pool={func_str}"
if func.__module__ == "polars.internals.expr.expr":
expr = func(pl.col(column_name)).alias(alias)
else:
expr = func(column_name).alias(alias)
# Pool, ensure column order and return
pooled_df = (
df.groupby_dynamic(INDEX, every=f"{window_size}i", by=LOG).agg(expr).fill_nan(0)
)
return pooled_df[[LOG, INDEX, alias]]
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/pooling.py
|
pooling.py
|
from dataclasses import dataclass
import numpy as np
from amplo.utils import check_dtypes
__all__ = ["Score", "ScoreWatcher"]
@dataclass
class Score:
count: int
weight: int
value: float
class ScoreWatcher:
"""
Watcher for scores.
Parameters
----------
keys : list of str
Keys for the watcher.
Attributes
----------
watch : dict[str, Score]
Keeps track of the counter and score of each watcher key.
"""
INITIAL_SCORE: float = 1.0
def __init__(self, keys: list[str]):
check_dtypes(("keys", keys, list))
for item in keys:
check_dtypes(("key__item", item, str))
self.watch: dict[str, Score] = {
key: Score(0, 0, self.INITIAL_SCORE) for key in keys
}
def __getitem__(self, key: str) -> tuple[int, float]:
"""
Get the counter and score for the given key.
Parameters
----------
key : str
Key of the watcher.
Returns
-------
typing.Tuple[int, np.ndarray]
"""
return self.watch[key].count, self.watch[key].value
def __repr__(self):
"""
Readable string representation of the class.
"""
return f"{self.__class__.__name__}({sorted(self.watch)})"
def update(self, key: str, score: float, weight: int = 1) -> None:
"""
Update a key of the watcher.
Parameters
----------
key : str
Watcher key.
score : array_like
Scoring value(s).
weight : int
Weight of the score.
Returns
-------
ScoreWatcher
Updated instance of the watcher.
"""
check_dtypes(("key", key, str), ("weight", weight, int))
if np.isnan(score):
raise ValueError("Cannot enter a NaN score.")
# Initially, the score is 1
if self.watch[key].value == self.INITIAL_SCORE:
self.watch[key] = Score(1, weight, score)
else:
self.watch[key].value = (
self.watch[key].weight * self.watch[key].value + weight * score
) / (self.watch[key].weight + weight)
self.watch[key].count += 1
self.watch[key].weight += weight
def should_skip(self, key) -> bool:
"""Function which determines whether to skip an iteration
NOTE: the 3 sigma threshold has been empirically established, to find a balance
between skipping and trying combinations.
"""
if (
self.watch[key].count > 10
and self.watch[key].value < self.mean() - self.std() * 3
):
return True
return False
def mean(self) -> float:
"""
Calculate the mean of all scores.
Returns
-------
np.ndarray
Mean of all scores.
"""
return sum(list(map(lambda x: x.value, self.watch.values()))) / len(self.watch)
def std(self) -> float:
"""
Calculate the standard deviation of all scores.
Returns
-------
np.ndarray
Standard deviation of all scores.
"""
return np.std(list(map(lambda x: x.value, self.watch.values())))
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/score_watcher.py
|
score_watcher.py
|
from __future__ import annotations
import numpy as np
import polars as pl
from shap import TreeExplainer
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from amplo.base import BaseTransformer, LoggingMixin
from amplo.base.exceptions import NotFittedError
from amplo.classification import CatBoostClassifier
from amplo.regression import CatBoostRegressor
from amplo.utils import check_dtypes
class FeatureSelector(BaseTransformer, LoggingMixin):
"""
Class that given a dataset, analyses the feature importance and creates feature sets
that only include informative features.
This is done in two ways:
- Using the mean decrease in gini impurity of a random forest
- Using SHapely Additive exPlanations (SHAP values)
Per method, two feature sets are created, increment and threshold.
The increment takes every feature that contributes more than `selection_increment`
The cutoff takes the n highest features that combined carry a `selection_cutoff`
fraction of the total feature importance.
Parameters
----------
target : str
mode : str
selection_cutoff : float
selection_increment : float
feature_set : str
Can be set for transformation. Does not limit the selection process.
analyser_feature_sets : str, default='auto'
Either 'auto', 'all', 'gini' or 'shap'. Note that shap can be slow for large
sample sets.
verbose : int
"""
def __init__(
self,
target: str,
mode: str,
selection_cutoff: float = 0.85,
selection_increment: float = 0.005,
feature_set: str | None = None,
analyse_feature_sets: str = "auto",
verbose: int = 1,
) -> None:
BaseTransformer.__init__(self)
LoggingMixin.__init__(self, verbose=verbose)
check_dtypes(
("target", target, str),
("mode", mode, str),
("selection_cutoff", selection_cutoff, float),
("selection_increment", selection_increment, float),
)
self.target = target
self.mode = mode
self.selection_cutoff = selection_cutoff
self.selection_increment = selection_increment
self.analyse_feature_sets = analyse_feature_sets
self.feature_set: str | None = feature_set
self.feature_sets_: dict[str, list[str]] = {}
self.feature_importance_: dict[str, dict[str, float]] = {}
def fit(self, data: pl.DataFrame, index_cols: list[str]): # type: ignore[override]
"""Fits this feature selector.
If feature set is provided, it only selects using the corresponding method.
Parameters
----------
data : pl.DataFrame
Fitting data.
index_cols : list[str]
Column names of index.
"""
self.logger.info("Fitting feature selector.")
# Select x and y data
x = data.drop([self.target, *index_cols])
y = data[self.target]
if self.feature_set:
if "rf" in self.feature_set:
self.select_gini_impurity(x, y)
elif "shap" in self.feature_set:
self.select_shap(x, y)
else:
raise ValueError("Unknown provided feature set")
else:
if self.analyse_feature_sets in ("auto", "all", "gini"):
self.select_gini_impurity(x, y)
if self.analyse_feature_sets in ("all", "shap") or (
self.analyse_feature_sets == "auto" and len(data) < 50_000
):
self.select_shap(x, y)
self.is_fitted_ = True
return self
def transform( # type: ignore[override]
self, data: pl.DataFrame, index_cols: list[str], feature_set: str | None = None
) -> pl.DataFrame:
"""Transforms feature sets
Parameters
----------
data : pl.DataFrame
Transforming data.
index_cols : list[str]
Column names of index.
feature_set : str, optional
When not provided, the union of all feature sets is returned.
Returns
-------
pl.DataFrame
Transformed data.
"""
if not self.is_fitted_:
raise NotFittedError
# Update feature set
if feature_set:
self.feature_set = feature_set
elif not self.feature_set:
self.logger.warning("Feature set not set, returning all features.")
# Features_ is given from feature_set, so we can directly return
if self.target in data:
return data[[*index_cols, *self.features_, self.target]]
return data[[*index_cols, *self.features_]]
def fit_transform( # type: ignore[override]
self, data: pl.DataFrame, index_cols: list[str], feature_set: str | None = None
) -> pl.DataFrame:
"""
Fits and transforms.
Parameters
----------
data : pl.DataFrame
Fitting and transforming data.
index_cols : list[str]
Column names of index.
feature_set : str | None, optional
When not provided, the union of all feature sets is returned., by default None
Returns
-------
pl.DataFrame
Transformed data.
"""
return self.fit(data, index_cols).transform(data, index_cols, feature_set)
def select_gini_impurity(self, x: pl.DataFrame, y: pl.Series) -> None:
"""
Selects features based on the random forest feature importance.
Calculates the mean decrease in Gini impurity. Symmetric correlation
based on multiple features and multiple tree ensembles.
Parameters
----------
x : pl.DataFrame
y : pl.Series
"""
self.logger.info("Analysing feature importance: Gini impurity.")
# Set model
rs = np.random.RandomState(seed=236868)
if self.mode == "regression":
forest = RandomForestRegressor(random_state=rs)
elif self.mode in ("classification", "multiclass"):
forest = RandomForestClassifier(random_state=rs)
else:
raise ValueError("Invalid mode.")
forest.fit(x.to_pandas(), y.to_pandas())
# Get RF values
fi = forest.feature_importances_
fi /= fi.sum()
# Convert to dict
self.feature_importance_["rf"] = self.sort_dict(
{k: v for k, v in zip(x.columns, fi)}
)
# Make feature sets
self.make_threshold("rf")
self.make_increment("rf")
self.logger.info(
f"Selected {len(self.feature_sets_['rf_threshold'])} features with "
f"{self.selection_cutoff * 100:.2f}% RF treshold."
)
self.logger.info(
f"Selected {len(self.feature_sets_['rf_increment'])} features with "
f"{self.selection_increment * 100:.2f}% RF increment."
)
def select_shap(self, x: pl.DataFrame, y: pl.Series) -> None:
"""
Calculates shapely value to be used as a measure of feature importance.
Parameters
----------
x : pd.DataFrame
y : pd.Series
"""
self.logger.info("Analysing feature importance: Shapely additive explanations.")
# Set model
seed = 236868
base: CatBoostClassifier | CatBoostRegressor
if self.mode == "regression":
base = CatBoostRegressor(random_seed=seed)
elif self.mode in ("classification", "multiclass"):
base = CatBoostClassifier(random_seed=seed)
else:
raise ValueError("Invalid mode.")
base.fit(x.to_pandas(), y.to_pandas())
# Get Shap values
explainer = TreeExplainer(base.model)
shap = np.array(explainer.shap_values(x.to_pandas(), y.to_pandas()))
# Average over classes and samples and normalize
if shap.ndim == 3:
shap = np.mean(np.abs(shap), axis=0)
shap = np.mean(np.abs(shap), axis=0)
shap /= shap.sum() # normalize
# Convert to dict
self.feature_importance_["shap"] = self.sort_dict(
{k: v for k, v in zip(x.columns, shap)}
)
# Make feature sets
self.make_threshold("shap")
self.make_increment("shap")
self.logger.info(
f"Selected {len(self.feature_sets_['shap_threshold'])} features with "
f"{self.selection_cutoff * 100:.2f}% Shap treshold."
)
self.logger.info(
f"Selected {len(self.feature_sets_['shap_increment'])} features with "
f"{self.selection_increment * 100:.2f}% Shap increment."
)
@property
def features_(self) -> list[str]:
"""Returns the features of the current feature set"""
if self.feature_set is None:
return self.all_features
return self.feature_sets_[self.feature_set]
@property
def all_features(self) -> list[str]:
"""Returns the union of all feature sets"""
return list({f for s in self.feature_sets_.values() for f in s})
def sort_dict(self, dct: dict[str, float]) -> dict[str, float]:
"""Sorts a dictionary by ascending values."""
return dict(
sorted(
dct.items(),
key=lambda x: x[1],
reverse=True,
)
)
def make_threshold(self, feature_set: str) -> None:
"""Creates a feature set based on total information conservation."""
fi = self.feature_importance_[feature_set]
vals = np.array(list(fi.values()))
total_info = np.cumsum(vals) - vals
self.feature_sets_[f"{feature_set}_threshold"] = [
k
for i, (k, v) in enumerate(fi.items())
if total_info[i] <= self.selection_cutoff
]
def make_increment(self, feature_set: str) -> None:
"""Creates a feature set based on individual information carriage."""
self.feature_sets_[f"{feature_set}_increment"] = [
k
for k, v in self.feature_importance_[feature_set].items()
if v > self.selection_increment
]
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/feature_selection.py
|
feature_selection.py
|
from __future__ import annotations
import polars as pl
from amplo.automl.feature_processing._base import (
PERFECT_SCORE,
BaseFeatureExtractor,
assert_double_index,
check_data,
)
from amplo.automl.feature_processing.feature_aggregator import FeatureAggregator
from amplo.automl.feature_processing.wavelet_extractor import WaveletExtractor
from amplo.base.exceptions import NotFittedError
from amplo.utils.util import check_dtypes, unique_ordered_list
__all__ = ["TemporalFeatureExtractor"]
class TemporalFeatureExtractor(BaseFeatureExtractor):
"""
Feature extractor for temporal data.
This is simply a combination of the aggregation of the wavelet extractor with the
aggregation of the input (raw) features.
Parameters
----------
target : str | None, optional
Target column that must be present in data, by default None
mode : str | None, optional
Model mode: {"classification", "regression"}, by default None
window_size : int, optional, default: None
Determines how many data rows will be collected and summarized by pooling.
If None, will determine a reasonable window size for the data at hand.
fit_raw : bool, default: True
Whether to include pooling from raw features to extract features.
fit_wavelets : bool | list[str] | None, optional
Whether to search for pooled wavelet features, by default None.
If False, wavelets aren't used.
If True, defaults to ["cmor1.5-1.0", "gaus4", "gaus7", "cgau2", "cgau6", "mexh"]
If None, same as if True.
A custom list of wavelets can also be provided
Each string must be a valid wavelet name (see notes).
strategy : {"exhaustive", "random", "smart"}, default: "smart"
Fitting strategy for feature extraction.
If "exhaustive", use brute-force method.
If "random", iterates on randomly shuffled feature-wavelet combinations and
performs pooling on a random subset of `self.pooling` until end of iterator or
timeout is reached.
If "smart", similar to "random" but (1) skips unpromising features or wavelets
and (2) uses promising poolings only.
timeout : int, default: 1800
Timeout in seconds for fitting. Has no effect when `strategy` is "exhaustive".
verbose : int, optional
Verbisity for logger, by default 0
Notes
-----
Valid ``wavelet`` parameters can be found via:
>>> import pywt
>>> pywt.wavelist()
"""
def __init__(
self,
target: str | None = None,
mode: str | None = None,
window_size: int | None = None,
fit_raw: bool = True,
fit_wavelets: bool | list[str] | None = None,
strategy: str = "smart",
timeout: int = 1800,
verbose: int = 0,
):
super().__init__(target=target, mode=mode, verbose=verbose)
# Assert classification or notset
if self.mode and self.mode != "classification":
raise NotImplementedError("Only mode 'classification' supported.")
# Check inputs and set defaults
check_dtypes(
("window_size", window_size, (type(None), int)),
("fit_raw", fit_raw, bool),
("fit_wavelets", fit_wavelets, (type(None), bool, list)),
("strategy", strategy, str),
("timeout", timeout, int),
)
wavelets: list[str] | None
if fit_wavelets is False:
wavelets = [] # disable fitting wavelets
elif fit_wavelets is True:
wavelets = None
else:
wavelets = fit_wavelets
# Integrity checks
if strategy not in ("exhaustive", "random", "smart"):
raise ValueError(f"Invalid value for `strategy`: {strategy}")
if timeout <= 0:
raise ValueError(f"`timeout` must be strictly positive but got: {timeout}")
if not any([fit_raw, fit_wavelets]):
raise ValueError(
"Disabling all fitting functions is useless. Enable at least one feature extractor."
)
# Set attributes
self.window_size = window_size
self.fit_raw = fit_raw
self.fit_wavelets = wavelets
self.strategy = strategy
self.timeout = timeout
self.is_fitted_ = False
# Subclasses
self.wavelet_extractor = WaveletExtractor(
target=target,
mode=mode,
wavelets=wavelets,
strategy=strategy,
timeout=timeout,
verbose=verbose,
)
self.wavelet_aggregator = FeatureAggregator(
target=target,
mode=mode,
window_size=window_size,
strategy=strategy,
verbose=verbose,
)
self.raw_aggregator = FeatureAggregator(
target=target,
mode=mode,
window_size=window_size,
strategy=strategy,
verbose=verbose,
)
def fit(self, data: pl.DataFrame, index_cols: list[str]): # type: ignore[override]
# We implement fit_transform because we anyhow transform the data. Therefore,
# when using fit_transform we don't have to do redundant transformations.
self.fit_transform(data, index_cols)
return self
def fit_transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
"""Fits and transforms."""
# Input checks
self.logger.info("Fitting temporal feature extractor.")
check_data(data)
# Assert double-index
assert_double_index(data, index_cols)
# Split data
x = data.drop([self.target, *index_cols])
y = data[self.target]
# Initialize fitting
self.initialize_baseline(x, y)
assert self._baseline_score is not None
if self._baseline_score > PERFECT_SCORE:
self.logger.info("Features are good, we're skipping feature aggregation.")
self.is_fitted_ = True
self.skipped_ = True
return data
# Fit-transform
raw_agg_data = self.raw_aggregator.fit_transform(data, index_cols)
wav_data = self.wavelet_extractor.fit_transform(data, index_cols)
wav_agg_data = self.wavelet_aggregator.fit_transform(wav_data, index_cols)
data_out = pl.concat(
[raw_agg_data, wav_agg_data.drop([*index_cols, self.target])],
how="horizontal",
)
self.set_features(
self.wavelet_aggregator.features_ + self.raw_aggregator.features_
)
self.is_fitted_ = True
return data_out[[*index_cols, *self.features_, self.target]]
def transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
"""Transforms."""
self.logger.info("Transforming data.")
if not self.is_fitted_:
raise NotFittedError
if self.skipped_:
return data
# Input checks
data, index_cols, got_double_index = assert_double_index(
data, index_cols, allow_single=True
)
check_data(data)
# Apply transformations
data_out = pl.concat(
[
self.raw_aggregator.transform(data, index_cols),
self.wavelet_aggregator.transform(
self.wavelet_extractor.transform(data, index_cols), index_cols
).drop([*index_cols, self.target]),
],
how="horizontal",
)
# Restore input indexing
if not got_double_index:
data.drop_in_place(index_cols.pop(0))
return data_out[[*index_cols, *self.features_, self.target]]
def set_features(self, features: str | list[str]):
"""Updates the features of the aggregators nad extractor.
Parameters
----------
features : list[str]
"""
if isinstance(features, str):
features = [features]
self.features_ = features
self.raw_aggregator.set_features([f for f in features if "__wav__" not in f])
self.wavelet_aggregator.set_features([f for f in features if "__wav__" in f])
self.wavelet_extractor.set_features(
unique_ordered_list(
[f.split("__pool")[0] for f in features if "__wav__" in f]
)
)
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/temporal_feature_extractor.py
|
temporal_feature_extractor.py
|
from __future__ import annotations
import polars as pl
from tqdm import tqdm
from amplo.automl.feature_processing._base import (
BaseFeatureExtractor,
assert_double_index,
check_data,
)
from amplo.automl.feature_processing.pooling import POOL_FUNCTIONS, pl_pool
from amplo.automl.feature_processing.score_watcher import ScoreWatcher
from amplo.base.exceptions import NotFittedError
from amplo.utils import check_dtypes
__all__ = ["FeatureAggregator"]
class FeatureAggregator(BaseFeatureExtractor):
"""Aggregates a timeseries into a single sample using various pooling functions
Returns only features deemed worthy, and never the original features.
NOTE: Only for multi-index classification problems.
Parameters
----------
target : str | None, optional
Target column that must be present in data, by default None
mode : str | None, optional
Model mode: {"classification", "regression"}, by default "classification"
window_size : int, optional, default: None
Determines how many data rows will be collected and summarized by pooling.
If None, will determine a reasonable window size for the data at hand.
strategy : {"exhaustive", "random", "smart"}, default: "smart"
Fitting strategy for feature extraction.
verbose : int, optional
Verbisity for logger, by default 0
"""
ALL_POOL_FUNC_STR = list(POOL_FUNCTIONS)
def __init__(
self,
target: str | None = None,
mode: str | None = "classification",
window_size: int | None = None,
strategy: str = "smart",
verbose: int = 1,
):
super().__init__(target=target, mode=mode, verbose=verbose)
# Assert classification or notset
if self.mode and self.mode != "classification":
raise NotImplementedError("Only mode 'classification' supported.")
# Check inputs and set defaults
check_dtypes(
("window_size", window_size, (type(None), int)),
("strategy", strategy, str),
)
# Set attributes
self.window_size = window_size
self.strategy = strategy
# Subclasses
self.col_watch: ScoreWatcher | None = None
self.pool_watch: ScoreWatcher | None = None
def fit(self, data: pl.DataFrame, index_cols: list[str]): # type: ignore[override]
self.fit_transform(data, index_cols)
return self
def fit_transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
"""Fits pool functions and aggregates"""
self.logger.info("Fitting feature aggregator.")
check_data(data, allow_double_underscore=True)
data, index_cols, _ = assert_double_index(data, index_cols)
# Select data
x = data.drop([self.target, *index_cols])
y = data[self.target]
index = data.select(index_cols)
# Initialize
self.set_window_size(index)
self.initialize_baseline(x, y)
assert self.window_size is not None
assert self._baseline_score is not None
# Set score watchers
if self.strategy == "smart":
self.col_watch = ScoreWatcher(x.columns)
self.pool_watch = ScoreWatcher(self.ALL_POOL_FUNC_STR)
# Initialize
pool_funcs = self.ALL_POOL_FUNC_STR
data_out = self.pool_target(data, index_cols)
y_pooled = data_out[self.target]
for col in tqdm(x.columns):
if col in (self.target, *index.columns):
continue
for func in pool_funcs:
if self.should_skip_col_func(col, func):
continue
self.logger.debug(f"Fitting: {func}, {col}")
# Pooling
feature = pl_pool(data, col, self.window_size, func)[:, -1]
score = self.calc_feature_score(feature, y=y_pooled)
# Update score watchers
if self.strategy == "smart" and self.col_watch and self.pool_watch:
self.col_watch.update(col, score, 1)
self.pool_watch.update(func, score, 1)
# Accept feature
accepted = self.accept_feature(score)
if accepted:
data_out = data_out.with_column(feature)
self.add_features(feature.name)
# Update baseline
self.logger.debug(
f"{func.ljust(25)} {col.ljust(75)} accepted: {accepted} "
f"{score} / {self._baseline_score}"
)
self.update_baseline(score)
self.is_fitted_ = True
self.logger.info(f"Accepted {data_out.shape[1] - 3} aggregated features.")
return data_out
def transform(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame: # type: ignore[override]
"""Aggregates data"""
if not self.is_fitted_:
raise NotFittedError
assert self.window_size
data, index_cols, _ = assert_double_index(data, index_cols)
# Initialize - include pooled target if provided in input data
if self.target not in data:
data = data.with_column(pl.lit(0).alias(self.target))
data_out = self.pool_target(data, index_cols)
data_out.drop_in_place(self.target)
else:
data_out = self.pool_target(data, index_cols)
# Pooling
for feature in self.features_:
col, pool = feature.split("__pool=")
data_out = data_out.with_column(
pl_pool(data, col, self.window_size, pool)[:, -1]
)
self.logger.info("Transformed features.")
return data_out
def should_skip_col_func(self, col: str, func: str) -> bool:
"""Checks whether current iteration of column / function should be skipped.
parameters
----------
col : str
func : str
"""
# Check score watchers
if self.strategy == "smart":
if self.col_watch is None or self.pool_watch is None:
raise ValueError("Watchers are not set.")
if self.col_watch.should_skip(col) or self.pool_watch.should_skip(func):
self.logger.debug(f"Scorewatcher skipped: {func}, {col}")
return True
return False
def pool_target(self, data: pl.DataFrame, index_cols: list[str]) -> pl.DataFrame:
"""
Pools target data with given window size.
Parameters
----------
target : pl.DataFrame
Data to be pooled. Columns 'log', 'index' and target are required.
index_cols : list[str] | None
Column names of the double-index. By default ['log', 'index'].
Returns
-------
pl.DataFrame
Pooled target data.
"""
data, index_cols, _ = assert_double_index(data, index_cols)
# Transform and rename back to self.target
assert self.window_size is not None
out = pl_pool(data, self.target, self.window_size, "first")
return out.rename({f"{self.target}__pool=first": self.target})
def set_window_size(self, index: pl.DataFrame) -> None:
"""
Sets the window size in case not provided.
Notes
-----
We'll make the window size such that on average there's 5 samples
Window size CANNOT be small, it significantly slows down the window calculations.
Parameters
----------
index : pl.DataFrame
Index of data to be fitted.
"""
if self.window_size is not None:
self.logger.debug(f"Window size (from args): {self.window_size}.")
return
# Count log sizes
col_1, col_2 = index.columns
counts = index.groupby(col_1).count()["count"]
counts_min: int = counts.min() # type: ignore[assignment]
counts_max: int = counts.max() # type: ignore[assignment]
counts_mean: float = counts.mean()
ws = int(min(counts_min, counts_mean // 5))
# Ensure that window size is an integer and at least 50
# We're doing fft, less than 50 makes no sense
self.window_size = max(ws, 50)
self.logger.debug(f"Set window size to {self.window_size}.")
if counts_max // self.window_size > 100:
self.logger.warning("Data with >100 windows will result in slow pooling.")
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/feature_aggregator.py
|
feature_aggregator.py
|
from __future__ import annotations
import re
from itertools import combinations
from warnings import warn
import pandas as pd
import polars as pl
from amplo.automl.feature_processing._base import BaseFeatureExtractor, check_data
from amplo.automl.feature_processing.feature_selection import FeatureSelector
from amplo.automl.feature_processing.nop_feature_extractor import NopFeatureExtractor
from amplo.automl.feature_processing.static_feature_extractor import (
StaticFeatureExtractor,
)
from amplo.automl.feature_processing.temporal_feature_extractor import (
TemporalFeatureExtractor,
)
from amplo.base import BaseTransformer, LoggingMixin
from amplo.base.exceptions import NotFittedError
from amplo.utils import check_dtypes
from amplo.utils.data import pandas_to_polars, polars_to_pandas
__all__ = [
"find_collinear_columns",
"translate_features",
"get_required_columns",
"FeatureProcessor",
]
def find_collinear_columns(
data: pl.DataFrame, information_threshold: float = 0.9
) -> list[str]:
"""
Finds collinear features and returns them.
Calculates the Pearson Correlation coefficient for all input features.
Features that exceed the information threshold are considered linearly
co-dependent, i.e. describable by: y = a * x + b. As these features add
little to no information, they will be removed.
Parameters
----------
data : pl.DataFrame
Data to search for collinear features.
information_threshold : float
Percentage value that defines the threshold for a ``collinear`` feature.
Returns
-------
list of str
List of collinear feature columns.
"""
check_dtypes(
("data", data, pl.DataFrame),
("information_threshold", information_threshold, float),
)
# Set helpers
SPLITTER = "<->"
# Calculate correlation within columns
data_demeaned = data.fill_nan(None).with_columns(pl.all() - pl.all().mean())
ss = data_demeaned.with_columns(pl.all().pow(2).sum().pow(0.5))[0]
correlation = data_demeaned.select(
[
((pl.col(coli) * pl.col(colj)).sum() / (ss[coli] * ss[colj]))
.abs()
.alias(f"{coli}{SPLITTER}{colj}")
# 'combinations' iterates through all combinations of the column names
# without having twice the same column and independent of the order
for coli, colj in combinations(data.columns, 2)
]
)
# Filter out every column which succeeds the information threshold
# NOTE: 'column', 'column_0' and 'field_{i}' are default names by polars
collinear_columns = (
# convert the dataframe to a series (kind of)
correlation.transpose(include_header=True, header_name="column")
# filter by information threshold
.filter(pl.col("column_0") > information_threshold)["column"]
# extract the column name (apply split and take the right hand side)
.str.split_exact(SPLITTER, 2).struct.field("field_1")
# convert to list
.to_list()
)
# Sort and remove potential duplicates
collinear_columns_ = sorted(set(map(str, collinear_columns)))
return collinear_columns_
def translate_features(feature_cols: list[str]) -> dict[str, list[str]]:
"""
Translates (extracted) features and tells its underlying original feature.
Parameters
----------
feature_cols : list of str
Feature columns to be translated.
Returns
-------
dict of {str: list of str}
Dictionary with `feature_cols` as keys and their underlying original features
as values.
"""
for item in feature_cols:
check_dtypes(("feature_cols__item", item, str))
translation = {}
for feature in feature_cols:
# Raw features
if "__" not in feature:
t = [feature]
# From StaticFeatureExtractor
elif re.search("__(mul|div|x|d)__", feature):
f1, _, f2 = feature.split("__")
t = [f1, f2]
elif re.search("^(sin|cos|inv)__", feature):
_, f = feature.split("__")
t = [f]
# From TemporalFeatureExtractor
elif re.search("^((?!__).)*__pool=.+", feature): # `__` appears only once
f, _ = feature.split("__")
t = [f]
elif re.search(".+__wav__.+__pool=.+", feature):
f, _ = feature.split("__", maxsplit=1)
t = [f]
else:
raise ValueError(f"Could not translate feature: {feature}")
translation[feature] = t
return translation
def get_required_columns(feature_cols: list[str]) -> list[str]:
"""
Returns all required columns that are required for the given features.
Parameters
----------
feature_cols : list of str
Feature columns to be translated.
Returns
-------
list[str]
All required data columns for the given features.
"""
required_cols = []
for translation in translate_features(feature_cols).values():
required_cols.extend(translation)
return sorted(set(required_cols))
class FeatureProcessor(BaseTransformer, LoggingMixin):
"""
Feature processor module to extract and select features.
Parameters
----------
target : str
Target column that must be present in data.
mode : "classification", "regression"
Model mode.
is_temporal : bool, optional
Whether the data should be treated as temporal data or not.
If none is provided, is_temporal will be set to true when fit data is
multi-indexed, false otherwise.
extract_features : bool
Whether to extract features or just remove correlating columns.
collinear_threshold : float
Information threshold for collinear features.
analyse_feature_sets : {"auto", "all", "gini", "shap"}, default: "auto"
Which feature sets to analyse.
If "auto", gini (and shap) will be analysed.
If "all", gini and shap will be analysed.
If "gini" or "shap", gini or shap will be analysed, respectively.
selection_cutoff : float
Upper feature importance threshold for threshold feature selection.
selection_increment : float
Lower feature importance threshold for increment feature selection.
verbose : int
Verbosity for logger.
**extractor_kwargs : typing.Any
Additional keyword arguments for feature extractor.
Currently, only the `TemporalFeatureExtractor` module supports this parameter.
"""
def __init__(
self,
target: str = "",
mode: str = "",
use_wavelets: bool = True,
is_temporal: bool | None = None,
extract_features: bool = True,
collinear_threshold: float = 0.99,
analyse_feature_sets: str = "auto",
selection_cutoff: float = 0.85,
selection_increment: float = 0.005,
verbose: int = 1,
**extractor_kwargs,
):
BaseTransformer.__init__(self)
LoggingMixin.__init__(self, verbose=verbose)
self.target = target
self.mode = mode
check_dtypes(
("is_temporal", is_temporal, (bool, type(None))),
("extract_features", extract_features, bool),
("collinear_threshold", collinear_threshold, float),
("analyse_feature_sets", analyse_feature_sets, (str, type(None))),
("selection_cutoff", selection_cutoff, float),
("selection_increment", selection_increment, float),
)
for value, name in (
(collinear_threshold, "collinear_threshold"),
(selection_cutoff, "selection_cutoff"),
(selection_increment, "selection_increment"),
):
if not 0 < value < 1:
raise ValueError(f"Invalid argument {name} = {value} ∉ (0, 1).")
# Set attributes
self.feature_extractor: BaseFeatureExtractor
self.feature_selector = FeatureSelector(
target, mode, selection_cutoff, selection_increment
)
self.is_temporal = is_temporal
self.use_wavelets = use_wavelets
self.extract_features = extract_features
self.collinear_threshold = collinear_threshold
self.analyse_feature_sets = analyse_feature_sets
self.selection_cutoff = selection_cutoff
self.selection_increment = selection_increment
self.extractor_kwargs = extractor_kwargs
self.collinear_cols_: list[str] = []
def fit(self, data: pd.DataFrame):
"""
Fits this feature processor (extractor & selector).
Parameters
----------
data : pd.DataFrame
Input data
"""
# NOTE: We anyhow have to transform the data. Therefore, when calling
# 'fit_transform' we do no redundant transformations.
self.fit_transform(data)
return self
def fit_transform(
self, data: pd.DataFrame, feature_set: str | None = None
) -> pd.DataFrame:
"""
Fits and transforms this feature processor.
Parameters
----------
data : pd.DataFrame
Input data
feature_set : str | None, optional
Choose specific feature set, by default None
Returns
-------
pd.DataFrame
Transformed data
"""
self.logger.info("Fitting data.")
# Convert to polars
self.logger.debug("Convert pandas data to polars.")
pl_data, index_renaming = pandas_to_polars(data)
index_cols = list(index_renaming)
# Check
check_data(pl_data)
# Remove collinear columns
pl_data = self._remove_collinear(pl_data, index_cols)
# Fit and transform feature extractor.
self._set_feature_extractor(index_cols)
pl_data = self.feature_extractor.fit_transform(pl_data, index_cols)
# Analyse feature importance and feature setssdfg
pl_data = self.feature_selector.fit_transform(pl_data, index_cols, feature_set)
self.feature_extractor.set_features(self.features_)
# Convert back to pandas and restore index
self.logger.debug("Convert polars data back do pandas.")
data = polars_to_pandas(pl_data, index_renaming)
self.is_fitted_ = True
return data
def transform(
self, data: pd.DataFrame, feature_set: str | None = None
) -> pd.DataFrame:
"""
Transform data and return it.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self.is_fitted_
Parameters
----------
data : pd.DataFrame
feature_set : str, optional
Desired feature set.
When feature_set is None, all features will be returned.
Returns
-------
pandas.DataFrame
"""
self.logger.info("Transforming data.")
if not self.is_fitted_:
raise NotFittedError
# Convert to polars
self.logger.debug("Convert pandas data to polars.")
pl_data, index_renaming = pandas_to_polars(data)
index_cols = list(index_renaming)
# Check
check_data(pl_data)
# Set features for transformation
if feature_set and feature_set in self.feature_sets_:
self.set_feature_set(feature_set)
elif feature_set:
raise ValueError(f"Feature set does not exist: {feature_set}")
# Transform
pl_data = self._impute_missing_columns(pl_data)
pl_data = self.feature_extractor.transform(pl_data, index_cols)
pl_data = self.feature_selector.transform(pl_data, index_cols)
# Convert back to pandas and restore index
self.logger.debug("Convert polars data back do pandas.")
data = polars_to_pandas(pl_data, index_renaming)
return data
def _set_feature_extractor(self, index_cols: list[str]):
"""
Checks is_temporal attribute. If not set and x is multi-indexed, sets to true.
Parameters
----------
index_cols : list[str]
Column names of the indices.
"""
self.logger.debug("Setting feature extractor...")
# Set is_temporal
if len(index_cols) not in (1, 2):
self.logger.warning("Index is neither single- nor double-indexed.")
if self.is_temporal is None:
self.is_temporal = len(index_cols) == 2
self.logger.debug(
f"Data is {'single' if self.is_temporal else 'double'}-indexed. "
f"Setting 'is_temporal' attribute to {self.is_temporal}."
)
# Set feature extractor
if not self.extract_features:
self.feature_extractor = NopFeatureExtractor(
target=self.target, mode=self.mode, verbose=self.verbose
)
elif self.is_temporal:
self.feature_extractor = TemporalFeatureExtractor(
target=self.target,
mode=self.mode,
fit_wavelets=self.use_wavelets,
verbose=self.verbose,
**self.extractor_kwargs,
)
else:
self.feature_extractor = StaticFeatureExtractor(
target=self.target,
mode=self.mode,
verbose=self.verbose,
)
self.logger.debug(f"Chose {type(self.feature_extractor).__name__}.")
def _remove_collinear(
self, data: pl.DataFrame, index_cols: list[str]
) -> pl.DataFrame:
"""
Examines the data and separates different column types.
Fitted attributes:
Datetime columns are stored in "datetime_cols_".
Collinear, numeric columns are stored in "collinear_cols_".
Numeric columns (not collinear) are stored in "numeric_cols_".
Parameters
----------
data : pl.DataFrame
Data to examine.
index_cols : list[str]
Column names is the index.
"""
self.logger.info("Analysing columns of interest.")
self.collinear_cols_ = find_collinear_columns(
data.drop(index_cols), self.collinear_threshold
)
self.logger.info(f"Removed {len(self.collinear_cols_)} columns.")
return data.drop(self.collinear_cols_)
def _impute_missing_columns(self, data: pl.DataFrame) -> pl.DataFrame:
"""
Imputes missing columns when not present for transforming.
Parameters
----------
data : pl.DataFrame
Data to check and impute when necessary.
Returns
-------
pl.DataFrame
Imputed data.
"""
if not self.is_fitted_:
raise NotFittedError
# Identify required columns
required_cols = [
col
for columns in translate_features(self.features_).values()
for col in columns
]
required_cols = list(set(required_cols))
# Find missing columns and impute
missing_cols = [col for col in required_cols if col not in data.columns]
if missing_cols:
warn(
f"Imputing {len(missing_cols)} missing columns, namely: {missing_cols}"
)
data = data.with_columns([pl.lit(0).alias(col) for col in missing_cols])
return data
@property
def features_(self) -> list[str]:
"""Returns extracted & selected features"""
return self.feature_selector.features_
@property
def feature_importance_(self) -> dict[str, dict[str, float]]:
"""
Format:
{
"rf": {
"feature_1": 0.98,
...
},
...
}
"""
return self.feature_selector.feature_importance_
@property
def feature_set_(self) -> str | None:
return self.feature_selector.feature_set
@property
def feature_sets_(self) -> dict[str, list[str]]:
"""
Format:
{
"rf": ["feature_1", ...],
"rfi": ["feature_2", ...]
}
"""
return self.feature_selector.feature_sets_
def set_feature_set(self, feature_set: str) -> None:
"""Updates the feature set of the feature selector & extractor"""
self.feature_selector.feature_set = feature_set
if self.feature_extractor:
self.feature_extractor.set_features(self.features_)
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/automl/feature_processing/feature_processing.py
|
feature_processing.py
|
from __future__ import annotations
import logging
import re
import warnings
from collections.abc import Generator
from typing import Any
__all__ = [
"hist_search",
"clean_feature_name",
"check_dtypes",
"unique_ordered_list",
]
def unique_ordered_list(seq: list[Any]):
seen = {}
result = []
for item in seq:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result
def hist_search(array, value):
"""
Binary search that finds the index in order to fulfill
``array[index] <= value < array[index + 1]``
Parameters
----------
array : array of float
value : float
Returns
-------
int
Bin index of the value
"""
# Return -1 when no bin exists
if value < array[0] or value >= array[-1]:
logging.debug(
f"No bin (index) found for value {value}. "
f"Array(Min: {array[0]}, "
"Max: {array[-1]})"
)
return -1
# Initialize min and max bin index
low = 0
high = len(array) - 1
# Bin search
countdown = 30
while countdown > 0:
# Count down
countdown -= 1
# Set middle bin index
middle = low + (high - low) // 2
if low == middle == high - 1: # stop criterion
return middle
if value < array[middle]: # array[low] <= value < array[middle]
high = middle
elif value >= array[middle]: # array[middle] <= value < array[high]
low = middle
warnings.warn("Operation took too long. Returning -1 (no match).", RuntimeWarning)
return -1
def clean_feature_name(feature_name: str | int) -> str:
"""
Clean feature names and append "feature_" when it's a digit.
Parameters
----------
feature_name : str or int
Feature name to be cleaned.
Returns
-------
cleaned_feature_name : str
"""
# Handle digits
if isinstance(feature_name, int) or str(feature_name).isdigit():
feature_name = f"feature_{feature_name}"
# Remove non-numeric and non-alphabetic characters.
# Assert single underscores and remove underscores in prefix and suffix.
return re.sub("[^a-z0-9]+", "_", feature_name.lower()).strip("_")
def check_dtype(name: str, value: Any, typ: type | tuple[type, ...]) -> None:
"""
Checks all dtype.
Parameters
----------
name : str
Parameter name, required for properly raising the error.
value : Any
Parameter value to be checked.
typ : type
Required parameter type.
Returns
-------
None
Examples
--------
>>> check_dtype("var1", 123, int)
Raises
------
TypeError
If given type constraint is not fulfilled.
"""
if not isinstance(value, typ):
msg = f"Invalid dtype for argument '{name}': {type(value).__name__}"
raise TypeError(msg)
_DTCheckType = tuple[str, Any, "type | tuple[type, ...]"]
def check_dtypes(
*checks: _DTCheckType | Generator[_DTCheckType, None, None] | list[_DTCheckType]
) -> None:
"""
Checks all dtypes.
Parameters
----------
checks : _DTCheckType | Generator[_DTCheckType, None, None] | list[_DTCheckType]
Tuples, generators or lists of (name, parameter, allowed types) to be checked.
Returns
-------
None
Examples
--------
Check a single parameter:
>>> check_dtypes(("var1", 123, int))
Check multiple:
>>> check_dtypes(("var1", 123, int), ("var2", 1.0, (int, float))) # tuples
>>> check_dtypes(("var", var, str) for var in ["a", "b"]) # generator
>>> check_dtypes([("var", var, str) for var in ["a", "b"]]) # list
Raises
------
TypeError
If any given type constraint is not fulfilled.
"""
for check in checks:
if isinstance(check, (list, Generator)):
check_dtypes(*check)
else:
check_dtype(*check)
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/utils/util.py
|
util.py
|
from __future__ import annotations
import logging
from datetime import datetime
from pathlib import Path
import colorlog
__all__ = [
"add_file_handler",
"del_file_handlers",
"get_root_logger",
"del_root_logger",
]
# ------------------------------------------------------------------------------
# Filters
class TimeFilter(logging.Filter):
def filter(self, record):
# Check if previous logged
if not hasattr(self, "last"):
self.last = record.relativeCreated
# Calc & add delta
delta = datetime.fromtimestamp(
record.relativeCreated / 1000.0
) - datetime.fromtimestamp(self.last / 1000.0)
record.relative = f"{(delta.seconds + delta.microseconds / 1e6):.2f}"
# Update last
self.last = record.relativeCreated
return True
class NameFilter(logging.Filter):
"""
Logging filter that ignores child names of loggers that inherit from ``AutoML``.
"""
def filter(self, record):
split_name = record.name.split(".", 1)
if split_name[0] == "AmploML":
record.name = split_name[0]
return True
def _add_filters(handler: logging.Handler) -> None:
handler.addFilter(TimeFilter())
handler.addFilter(NameFilter())
# ------------------------------------------------------------------------------
# Loggers
_ROOT_LOGGER: logging.Logger | None = None
def _create_logger() -> logging.Logger:
"""
Creates a new logger that also captures warnings from `warnings.warn()`.
Returns
-------
logging.Logger
New logger instance.
"""
# Get custom logger
logger = logging.getLogger("AmploML")
logger.setLevel("INFO")
# Set console handler
console_formatter = colorlog.ColoredFormatter(
"%(white)s%(asctime)s %(blue)s[%(name)s]%(log_color)s[%(levelname)s] "
"%(white)s%(message)s %(light_black)s<%(filename)s:%(lineno)d> (%(relative)ss)",
datefmt="%H:%M",
)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.NOTSET)
console_handler.setFormatter(console_formatter)
_add_filters(console_handler)
logger.addHandler(console_handler)
# Capture warnings from `warnings.warn(...)`
logging.captureWarnings(True)
py_warnings_logger = logging.getLogger("py.warnings")
warnings_formatter = colorlog.ColoredFormatter(
"%(white)s%(asctime)s %(blue)s[%(name)s]%(log_color)s[%(levelname)s] "
"%(white)s%(message)s",
datefmt="%H:%M",
)
warnings_handler = logging.StreamHandler()
warnings_handler.setLevel(logging.WARNING)
warnings_handler.setFormatter(warnings_formatter)
warnings_handler.terminator = "" # suppress unnecessary newline
_add_filters(warnings_handler)
py_warnings_logger.addHandler(warnings_handler)
return logger
def add_file_handler(file_path: str | Path) -> None:
"""
Add a file handler to the root logger.
Parameters
----------
file_path : str or Path
Path where the logger should write to.
Raises
------
AttributeError
When the root logger is not properly initialized (None).
"""
global _ROOT_LOGGER
if not isinstance(_ROOT_LOGGER, logging.Logger):
raise AttributeError(
"The root logger is not initialized properly. "
"Did you call `get_root_logger()`? "
f"Root logger: {_ROOT_LOGGER}"
)
# Set file handler
file_formatter = logging.Formatter(
"%(asctime)s [%(name)s][%(levelname)s] %(message)s "
"<%(filename)s:%(lineno)d> (%(relative)ss)",
datefmt="%H:%M",
)
file_handler = logging.FileHandler(file_path, mode="a")
file_handler.setLevel(logging.NOTSET)
file_handler.setFormatter(file_formatter)
_add_filters(file_handler)
_ROOT_LOGGER.addHandler(file_handler)
def del_file_handlers() -> None:
"""
Delete all file handlers in the root logger.
Raises
------
AttributeError
When the root logger is not properly initialized (None).
"""
global _ROOT_LOGGER
if not isinstance(_ROOT_LOGGER, logging.Logger):
raise AttributeError(
"The root logger is not initialized properly. "
"Did you call `get_root_logger()`? "
f"Root logger: {_ROOT_LOGGER}"
)
for handler in _ROOT_LOGGER.handlers:
if isinstance(handler, logging.FileHandler):
_ROOT_LOGGER.removeHandler(handler)
def get_root_logger() -> logging.Logger:
"""
Get the root logger. If not yet done the logger will be initialized.
"""
global _ROOT_LOGGER
# Do not initialize the same logger multiple times
if isinstance(_ROOT_LOGGER, logging.Logger):
return _ROOT_LOGGER
# First time called -> initialize logger
_ROOT_LOGGER = _create_logger()
return _ROOT_LOGGER
def del_root_logger() -> None:
"""Reset the root logger and set it to None."""
global _ROOT_LOGGER
_ROOT_LOGGER = None
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/utils/logging.py
|
logging.py
|
from __future__ import annotations
import warnings
import pandas as pd
import polars as pl
from sklearn.feature_selection import r_regression # pearson coefficient
from sklearn.preprocessing import LabelEncoder
from amplo.utils.util import check_dtypes
__all__ = [
"influx_query_to_df",
"check_dataframe_quality",
"check_pearson_correlation",
"pandas_to_polars",
"polars_to_pandas",
]
def influx_query_to_df(result):
df = []
for table in result:
parsed_records = []
for record in table.records:
parsed_records.append((record.get_time(), record.get_value()))
df.append(
pd.DataFrame(parsed_records, columns=["ts", table.records[0].get_field()])
)
return pd.concat(df).set_index("ts").groupby(level=0).sum()
def check_dataframe_quality(data: pd.DataFrame) -> bool:
if data.isna().any().any():
warnings.warn("Data contains NaN.")
elif data.isnull().any().any():
warnings.warn("Data contains null.")
elif (data.dtypes == object).any().any():
warnings.warn("Data contains dtype 'object', which is ambiguous.")
elif (data.dtypes == str).any().any():
warnings.warn("Data contains dtype 'str', which is ambiguous.")
elif data.max().max() > 1e38 or data.min().min() < -1e38:
warnings.warn("Data contains values larger than float32 (1e38).")
else:
return True
return False
def check_pearson_correlation(features: pd.DataFrame, labels: pd.Series) -> bool:
if labels.dtype == "object":
labels = LabelEncoder().fit_transform(labels)
pearson_corr = r_regression(features, labels)
if abs(pearson_corr).mean() > 0.5:
return False
else:
return True
def pandas_to_polars(
data: pd.DataFrame, include_index: bool = True
) -> tuple[pl.DataFrame, dict[str, str]]:
"""
Convert pandas to polars DataFrame.
Notes
-----
Polars removes the indices when calling `pl.from_pandas(data)`. This function does
per default include the index (include_index=True) and indicates the index with the
second return argument (index_names).
Polars also does not allow duplicate column names. Moving indices to the columns may
introduce such duplicates. Additionally defining an index_prefix may help to avoid
such collisions.
Parameters
----------
data : pd.DataFrame
Pandas object to be converted
include_index : bool, optional
Whether to include the index for conversion, by default True
Returns
-------
pl_data : pl.DataFrame
Converted polars object.
index_renaming : dict[str, str]
Rename dictionary for the index names.
"""
check_dtypes(
("data", data, pd.DataFrame),
("include_index", include_index, bool),
)
# Convert to polars without index
if not include_index:
return pl.from_pandas(data), {}
# Get original and set new (work) index names
orig_index_names = list(data.index.names)
if len(orig_index_names) == 1:
work_index_names = ["index"]
elif len(orig_index_names) == 2:
work_index_names = ["log", "index"]
else:
raise ValueError("Amplo supports only single- and double-indices.")
# Conversion
index_renaming = dict(zip(work_index_names, orig_index_names))
pl_data = pl.from_pandas(data.reset_index(names=work_index_names))
return pl_data, index_renaming
def polars_to_pandas(
data: pl.DataFrame, index_names: None | list[str] | dict[str, str] = None
) -> pd.DataFrame:
"""
Convert polars to pandas DataFrame.
Parameters
----------
data : pl.DataFrame
Polars object to be converted.
index_names : None | list[str] | dict[str, str], optional
As polars does not support indices, we recover it from its column(s).
If None, no index is recovered.
If list, given columns are recovered (w/o renaming them).
If dict, values() are assumed to contain column names to recover index from and
keys() will be the new names for it, by default None
Returns
-------
pd_data : pd.DataFrame
Converted pandas object.
"""
check_dtypes(
("data", data, pl.DataFrame),
("index_names", index_names, (type(None), dict, list)),
)
# Convert to pandas
pd_data = data.to_pandas()
# Restore index names
# Skip when `index_names` is empty. Otw `pd.set_index` will raise an error.
if index_names and isinstance(index_names, list):
orig_index_names = work_index_names = index_names
elif index_names and isinstance(index_names, dict):
orig_index_names = list(index_names.values())
work_index_names = list(index_names.keys())
else:
# Skip index restoral
return pd_data
# Restore index (optional)
pd_data.set_index(work_index_names, inplace=True)
pd_data.index.names = orig_index_names
return pd_data
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/utils/data.py
|
data.py
|
from __future__ import annotations
import base64
import importlib
import json
import pickle
from dataclasses import asdict as dc_asdict
from dataclasses import is_dataclass
from io import BytesIO, StringIO
from logging import Logger
from typing import Any
import numpy as np
import numpy.typing as npt
__all__ = [
"AMPLO_JSON_KEY",
"get_superclasses",
"AmploJSONEncoder",
"AmploJSONDecoder",
"dump",
"dumps",
"load",
"loads",
]
AMPLO_JSON_KEY = "__amplo_json_type__"
def get_superclasses(cls: type) -> set[type]:
superclasses = {cls}
for base_cls in cls.__bases__:
superclasses.add(base_cls)
superclasses.update(get_superclasses(base_cls))
return superclasses
class AmploJSONEncoder(json.JSONEncoder):
"""
JSON Encoder extension.
Parameters
----------
args : Any
Passed to 'super()' (json.JSONEncoder).
allow_pickle : bool, default=False
If True, will use pickle to encode any otherwise not JSON-dumpable object.
kwargs : Any
Passed to 'super()' (json.JSONEncoder).
"""
def __init__(self, *args, allow_pickle: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.allow_pickle = allow_pickle
def default(self, obj):
# ---
# Irreversible type casts
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
# ---
# Reversible type casts
enc_name = None
cls_name = obj.__class__.__name__.lower()
# Find matching encoding function name
# NOTE: Custom encodings shall be found here.
if hasattr(self, f"encode_{cls_name}"):
enc_name = cls_name
elif is_dataclass(obj):
enc_name = "dataclass"
elif hasattr(obj, "get_params") and hasattr(obj, "set_params"):
enc_name = "params_object"
elif self.allow_pickle:
enc_name = "pickled_object"
# Get custom encoder and encode
if enc_name is not None:
encoder = getattr(self, f"encode_{enc_name}")
encoded = encoder(obj)
encoded[AMPLO_JSON_KEY] = enc_name
return encoded
# Fallback: Let the base class default method raise the TypeError
else:
super().default(obj)
def encode_logger(self, obj: Logger) -> dict[str, str | int]:
return {"name": obj.name, "level": obj.level}
def encode_ndarray(self, obj: npt.NDArray[Any]) -> dict[str, str | list[Any]]:
return {
"dtype": str(obj.dtype),
"values": json.loads(self.encode(obj.tolist())),
}
def encode_dataclass(self, obj: Any) -> dict[str, str]:
return {
"module": obj.__module__,
"class": obj.__class__.__name__,
"fields": json.loads(self.encode(dc_asdict(obj))),
}
def encode_params_object(self, obj: Any) -> dict[str, Any]:
params = obj.get_params(deep=False)
settings = {k: v for k, v in vars(obj).items() if k not in params}
return {
"module": obj.__module__,
"class": obj.__class__.__name__,
"params": json.loads(self.encode(params)),
"settings": json.loads(self.encode(settings)),
}
def encode_pickled_object(self, obj: Any) -> dict[str, str]:
return {"pickle": base64.b64encode(pickle.dumps(obj)).decode("utf-8")}
class AmploJSONDecoder(json.JSONDecoder):
"""
JSON Decoder extension.
"""
def __init__(self, **kwargs):
kwargs["object_hook"] = self.object_hook
super().__init__(**kwargs)
def object_hook(self, obj):
try:
name = obj[AMPLO_JSON_KEY]
decoder = getattr(self, f"decode_{name}")
except (KeyError, AttributeError):
return obj
else:
return decoder(obj)
def decode_logger(self, enc: dict[str, str | int]) -> Logger:
assert isinstance(enc["name"], str)
return Logger(enc["name"], enc["level"])
def decode_ndarray(self, enc: dict[str, str | list[Any]]) -> npt.NDArray[Any]:
assert isinstance(enc["values"], list)
assert isinstance(enc["dtype"], str)
return np.array(enc["values"], dtype=np.dtype(enc["dtype"]))
def decode_dataclass(self, enc: dict[str, str | dict[str, Any]]) -> Any:
assert isinstance(enc["module"], str)
assert isinstance(enc["class"], str)
assert isinstance(enc["fields"], dict)
# Import class
module = importlib.import_module(enc["module"])
class_ = getattr(module, enc["class"])
# Create dataclass
return class_(**enc["fields"])
def decode_params_object(self, enc: dict[str, Any]) -> Any:
# Import class
module = importlib.import_module(enc["module"])
class_ = getattr(module, enc["class"])
if not (hasattr(class_, "get_params") and hasattr(class_, "set_params")):
raise ValueError("Expected to decode a class that implements get_params().")
# Translate legacy params and settings
for base_class in get_superclasses(class_):
if hasattr(base_class, "_legacy_names"):
legacy_names = getattr(base_class, "_legacy_names")
if callable(legacy_names) and isinstance(legacy_names(), dict):
for legacy_key, new_key in legacy_names().items():
if legacy_key in enc["params"]:
enc["params"][new_key] = enc["params"].pop(legacy_key)
elif legacy_key in enc["settings"]:
enc["settings"][new_key] = enc["settings"].pop(legacy_key)
# Encode object and inject params & settings
obj = class_(**enc["params"])
for key, value in enc["settings"].items():
setattr(obj, key, value)
return obj
def decode_pickled_object(self, obj: dict[str, str]) -> Any:
return pickle.loads(base64.b64decode(obj["pickle"]))
def dump(obj: Any, fp: StringIO, *, allow_pickle: bool = False, **kwargs) -> None:
return json.dump(obj, fp, cls=AmploJSONEncoder, **kwargs, allow_pickle=allow_pickle)
def dumps(obj: Any, *, allow_pickle: bool = False, **kwargs) -> str:
return json.dumps(obj, cls=AmploJSONEncoder, **kwargs, allow_pickle=allow_pickle)
def load(fp: BytesIO | StringIO, **kwargs) -> Any:
return json.load(fp, cls=AmploJSONDecoder, **kwargs)
def loads(s: str | bytes, **kwargs) -> Any:
return json.loads(s, cls=AmploJSONDecoder, **kwargs)
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/utils/json.py
|
json.py
|
from __future__ import annotations
import json
import os
import re
from logging import Logger
from pathlib import Path
from time import time
from typing import Any, Iterable
from warnings import warn
import numpy as np
import pandas as pd
from requests import HTTPError
from amplo.api.platform import AmploPlatformAPI
from amplo.api.storage import AzureBlobDataAPI
from amplo.utils.logging import get_root_logger
__all__ = [
"boolean_input",
"parse_json",
"NpEncoder",
"get_file_metadata",
"merge_logs",
]
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, pd.Series):
return obj.to_list()
if isinstance(obj, pd.DataFrame):
obj.to_json()
return super().default(obj)
def boolean_input(question: str) -> bool:
x = input(question + " [y / n]")
if x.lower() == "n" or x.lower() == "no":
return False
elif x.lower() == "y" or x.lower() == "yes":
return True
else:
warn('Sorry, I did not understand. Please answer with "n" or "y"')
return boolean_input(question)
def parse_json(json_string: str | dict[Any, Any]) -> str | dict[Any, Any]:
if isinstance(json_string, dict):
return json_string
else:
try:
return json.loads(
json_string.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
.replace("nan", "NaN")
.replace("None", "null")
)
except json.decoder.JSONDecodeError:
warn(f"Cannot validate, impassable JSON: {json_string}")
return json_string
def get_file_metadata(file_path: str | Path) -> dict[str, str | float]:
"""
Get file metadata from given path.
Parameters
----------
file_path : str or Path
File path.
Returns
-------
dict of {str: str or float}
File metadata.
Raises
------
FileNotFoundError
When the path does not exist.
IsADirectoryError
When the path resolves a directory, not a file.
"""
from amplo.utils import check_dtypes
check_dtypes(("file_path", file_path, (str, Path)))
file_path = Path(file_path)
if not file_path.exists():
raise FileNotFoundError(f"File does not exist: '{file_path}'")
if not file_path.is_file():
raise IsADirectoryError(f"Path is not a file: '{file_path}'")
return {
"file_name": str(file_path.name),
"full_path": str(file_path.resolve()),
# "creation_time": os.path.getctime(str(file_path)),
"last_modified": os.path.getmtime(str(file_path)),
}
def _get_api_clients(
azure: tuple[str, str] | bool = False,
platform: tuple[str, str] | bool | None = None,
):
"""
Gathers the api clients for merge_logs
"""
from amplo.api.platform import AmploPlatformAPI
if not azure:
blob_api = None
else:
blob_api = AzureBlobDataAPI.from_os_env(
*azure if isinstance(azure, tuple) else (None, None)
)
# Mirror azure parameter when platform is not set
if platform is None:
platform = bool(azure)
# Get amplo platform client
if not platform:
platform_api = None
else:
platform_api = AmploPlatformAPI.from_os_env(
*platform if isinstance(platform, tuple) else (None, None)
)
return blob_api, platform_api
def _get_folders(
parent_folder: str | Path,
blob_api: AzureBlobDataAPI | None = None,
more_folders: list[str | Path] | None = None,
) -> list[Path]:
"""
Lists folders for merge_logs
"""
folders: list[Path]
if blob_api:
folders = [Path(f) for f in blob_api.ls_folders(parent_folder)]
else:
if not Path(parent_folder).exists():
raise ValueError(f"{parent_folder} directory does not exist.")
folders = [
folder for folder in Path(parent_folder).iterdir() if folder.is_dir()
]
# Add more_folders
if more_folders:
folders += [Path(f) for f in more_folders]
# return [Path(f) for f in folders]
return [Path(f) for f in folders]
def _read_files_in_folders(
folders: Iterable[str | Path],
target: str,
blob_api: AzureBlobDataAPI | None = None,
logger: Logger | None = None,
) -> tuple[list[str], pd.DataFrame, dict[str, dict[str, Any]]]:
"""
Use pandas to read all non-hidden and non-empty files into a DataFrame.
Parameters
----------
folders : iterable of (str or Path)
Directory names.
target : str
Target column & directory name
blob_api : AzureBlobDataAPI or None, optional, default: None
If None, tries to read data from local folder, else from Azure
logger : Logger or None, optional, default: None
When provided, will log progress every 90 seconds.
Returns
-------
filenames : list of str
data : pd.DataFrame
All files of the folders merged into one multi-indexed DataFrame.
metadata : list of dict of {str : str or float}
Warnings
--------
UserWarning
When any directory is empty, or has no supported file type.
"""
# Map folders to pathlib.Path object
folder_paths = [Path(f) for f in folders]
# Initialize
file_names, data, metadata = [], [], {}
last_time_logged = time()
for folder_count, folder in enumerate(sorted(folder_paths)):
# List all files
if blob_api:
files = [Path(f) for f in blob_api.ls_files(folder)]
else:
files = [f for f in folder.iterdir() if f.is_file()]
# Remove hidden files
hidden_files = [f for f in files if re.match(r"^\..*", f.name)]
files = list(set(files) - set(hidden_files))
# Remove empty files
if blob_api:
empty_files = [f for f in files if blob_api.get_size(f) == 0]
else:
empty_files = [f for f in files if f.stat().st_size == 0]
files = list(set(files) - set(empty_files))
# Sanity check
if not files:
warn(f"Directory is empty and thus skipped: '{folder}'")
continue
# Read files
for i, file_ in enumerate(sorted(files)):
# read_pandas() may raise an EmptyDataError when the file has no content.
# The try...except catches such errors and warns the user instead.
try:
if blob_api:
datum = blob_api.read_pandas(file_, low_memory=False)
metadatum = blob_api.get_metadata(file_)
else:
datum = pd.read_parquet(file_)
metadatum = get_file_metadata(file_)
except pd.errors.EmptyDataError:
warn(f"Empty file detected and thus skipped: '{file_}'")
continue
# Convert to dataframe
if logger:
logger.debug(
f"{file_} {len(datum)} samples, {len(datum.keys())} columns."
)
if isinstance(datum, pd.Series):
datum = datum.to_frame()
# Set multi-index
datum = datum.set_index(
pd.MultiIndex.from_product(
[[str(file_)], datum.index.values], names=["log", "index"]
)
)
# Add target
datum[target] = (folder.name == target) * 1
# Append
file_names.append(str(file_))
data.append(datum)
metadata[str(file_)] = metadatum
if logger and time() - last_time_logged > 90:
last_time_logged = time()
logger.info(f".. progress: {folder_count / len(folder_paths) * 100:.1f} %")
# Concatenate data
df = pd.concat(data, axis=0)
# Validate data
if target not in df:
raise ValueError("Target not in data.")
if df[target].nunique() != 2:
raise ValueError(f"Number of unique labels is {df[target].nunique()} != 2.")
return file_names, df, metadata
def _map_datalogs_to_file_names(
file_names: list[str],
platform_api: AmploPlatformAPI | None = None,
logger: Logger | None = None,
) -> dict[str, Any]:
"""
Get datalogs for every filename.
Parameters
----------
file_names : list of str
Files names to get datalogs from - if available.
platform_api : AmploPlatformAPI or None, optional, default: None
API to get datlogs from.
logger : Logger or None, optional, default: None
When provided, will log progress every 90 seconds.
Returns
-------
list of dict
Datalogs for every filename.
"""
if not platform_api:
return {}
# It is assumed that the 6th and 5th path position of the (first) filename contains
# the team and machine name, respectively, if you count from right to left.
# E.g., "Team/Machine/data/Category/Issue/log_file.csv"
# Remove path prefixes, otherwise datalogs will not be found
file_names = ["/".join(str(fname).split("/")[-6:]) for fname in file_names]
# Extract team and machine
try:
team, machine = file_names[0].split("/")[-6:-4]
except IndexError:
warn("Got an empty list of file names")
return {}
# Get datalog for each filename
datalogs = {}
last_time_logged = time()
for file_count, fname in enumerate(file_names):
try:
datalog = platform_api.get_datalog(team, machine, fname)
except HTTPError:
# No matching datalog found. Do still append it to preserve the order.
datalog = {}
datalogs[fname] = datalog
if logger and time() - last_time_logged > 90:
last_time_logged = time()
logger.info(f".. progress: {file_count / len(file_names) * 100:.1f} %")
return datalogs
def _mask_intervals(datalogs: dict[str, Any], data: pd.DataFrame) -> pd.DataFrame:
"""
Masks the data with the intervals given by the datalogs.
Parameters
----------
datalogs : list of dict
Datalogs dictionary that should contain the keys 'selected' and 'datetime_col'.
data : pd.DataFrame
Data for splitting.
Returns
-------
data_out : pd.DataFrame
Selected data.
Warnings
--------
UserWarning
When no valid match for the start or stop time of the data interval was found,
i.e. when the time difference is more than 1 second.
"""
for filename in data.index.get_level_values("log").unique():
# Get intervals and timestamp column from datalog
datalog = datalogs.get(filename, {})
intervals = datalog.get("selected", [])
ts_col = datalog.get("datetime_col", "")
# Validate
if not intervals or not ts_col:
continue
elif ts_col not in data:
warn(f"Cannot select intervals as the column '{ts_col}' is not present.")
continue
# Convert ts_col
if not pd.api.types.is_numeric_dtype(data[ts_col]):
data[ts_col] = (
pd.to_datetime(data[ts_col], errors="coerce").view(int) / 10**9
)
# Extract intervals
drop_mask = True
for interval in intervals:
ts_first, ts_last = interval
drop_mask = (
(data[ts_col] < ts_first) | (data[ts_col] > ts_last)
) & drop_mask
if isinstance(drop_mask, pd.Series) and not drop_mask.loc[filename].any():
continue
data.drop(data.loc[(filename, drop_mask), :].index, inplace=True)
return data
def merge_logs(
parent: str | Path,
target: str,
more_folders: list[str | Path] | None = None,
azure: tuple[str, str] | bool = False,
platform: tuple[str, str] | bool | None = None,
) -> tuple[pd.DataFrame, dict[str, dict[str, Any]]]:
"""
Combine log files of all subdirectories into a multi-indexed DataFrame.
The function can handle logs from a local directory as well as data coming from an
Azure blob storage. For the latter case it is furthermore capable to select
intervals using Amplo's datalogs.
Notes
-----
Make sure that each protocol is located in a subdirectory whose name represents the
respective label.
An exemplary directory structure of ``parent_folder``:
``
parent_folder
├─ Label_1
│ ├─ Log_1.*
│ └─ Log_2.*
├─ Label_2
│ └─ Log_3.*
└─ ...
``
Parameters
----------
parent_folder : str or Path
Directory that contains subdirectories with tabular data files.
target : str
The target folder.
more_folders : list of str or Path, optional
Additional folder names with tabular data files to append.
azure : (str, str) or bool, default: False
Use this parameter to indicate that data is in Azure blob storage.
If False, it is assumed that data origins from local directory.
If True, the AzureBlobDataAPI is initialized with default OS env variables.
Otherwise, it will use the tuple to initialize the api.
platform : (str, str) or bool or None, default: None
Use this parameter for selecting data according to Amplo's datalogs.
If None, its value is set to bool(azure).
If False, no datalogs information will be used.
If True, the AmploPlatformAPI is initialized with default OS env variables.
Otherwise, it will use the tuple to initialize the api.
verbose : int, default = 1
Returns
-------
data : pd.DataFrame
All files of the folders merged into one multi-indexed DataFrame.
Multi-index names are 'log' and 'index'.
metadata : dict of {int : dict of {str : str or float}}
Metadata of merged data.
"""
from amplo.utils import check_dtypes
logger = get_root_logger()
check_dtypes(
("parent_folder", parent, (str, Path)),
("target_col", target, str),
("more_folders", more_folders, (type(None), list)),
)
# Get azure blob client
blob_api, platform_api = _get_api_clients(azure, platform)
# Get child folders
folders = _get_folders(parent, blob_api, more_folders)
if target not in [f.name for f in folders]:
raise ValueError(f"Target {target} not present in folders.")
logger.info(f"Found {len(folders)} folders.")
# Pandas read files
fnames, data, metadata = _read_files_in_folders(folders, target, blob_api, logger)
logger.info(f"Found {len(fnames)} files.")
# Masking data
if platform_api:
logger.info("Reading datalogs from platform")
datalogs = _map_datalogs_to_file_names(fnames, platform_api, logger)
if datalogs:
logger.info("Masking intervals from datalogs")
data = _mask_intervals(datalogs, data)
return data, metadata
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/utils/io.py
|
io.py
|
from __future__ import annotations
from typing import Any, Callable
import lightgbm
import numpy as np
from lightgbm import LGBMClassifier as _LGBMClassifier
from sklearn.model_selection import train_test_split
from amplo.classification._base import BaseClassifier
from amplo.utils import check_dtypes
def _validate_lightgbm_callbacks(callbacks) -> list[Callable[..., Any]]:
if not callbacks:
return []
valid_callbacks = []
for cb in callbacks:
if not isinstance(cb, str):
raise ValueError(f"Expected a string but got '{cb}' of type '{type(cb)}'.")
if cb.startswith("early_stopping_rounds="):
n_rounds = int(cb.removeprefix("early_stopping_rounds="))
valid_callbacks.append(lightgbm.early_stopping(n_rounds, verbose=False))
else:
raise NotImplementedError(f"Unknown callback '{cb}'.")
return valid_callbacks
class LGBMClassifier(BaseClassifier):
"""
Amplo wrapper for lightgbm.LGBMClassifier.
Parameters
----------
callbacks : list of str, optional
The following callbacks are currently supported:
- early stopping, "early_stopping_rounds=100"
test_size : float, default: 0.1
Test size for train-test-split in fitting the model.
random_state : int, default: None
Random state for train-test-split in fitting the model.
verbose : {0, 1, 2}, default: 0
Verbose logging.
**model_params : Any
Model parameters for underlying lightgbm.LGBMClassifier.
"""
model: _LGBMClassifier # type hint
def __init__(
self,
callbacks: list[str] | None = None,
test_size: float = 0.1,
random_state: int | None = None,
verbose: int = 0,
**model_params,
):
# Verify input dtypes and integrity
check_dtypes(
("callbacks", callbacks, (type(None), list)),
("test_size", test_size, float),
("random_state", random_state, (type(None), int)),
("model_params", model_params, dict),
)
if not 0 <= test_size < 1:
raise ValueError(f"Invalid attribute for test_size: {test_size}")
# Set up callbacks
callbacks = callbacks or []
for cb_name, cb_default_value in [("early_stopping_rounds", 100)]:
# Skip if already present in callbacks
if any(callback.startswith(cb_name) for callback in callbacks):
continue
# Pop model parameters into callbacks
callbacks.append(f"{cb_name}={model_params.pop(cb_name, cb_default_value)}")
# Set up model parameters
default_model_params = {
"n_estimators": 1000, # number of boosting rounds
"force_col_wise": True, # reduce memory cost
"verbosity": verbose - 1, # don't use "verbose" due to self.reset()
}
for k, v in default_model_params.items():
if k not in model_params:
model_params[k] = v
model = _LGBMClassifier(**model_params)
# Set attributes
self.callbacks = callbacks
self.test_size = test_size
self.random_state = random_state
super().__init__(model=model, verbose=verbose)
def fit(self, x, y=None, **fit_params):
self.classes_ = np.unique(y)
# Set up fitting callbacks
callbacks = _validate_lightgbm_callbacks(self.callbacks)
# Split data and fit model
xt, xv, yt, yv = train_test_split(
x, y, stratify=y, test_size=self.test_size, random_state=self.random_state
)
self.model.fit(
xt, yt, eval_set=[(xv, yv)], callbacks=callbacks, eval_metric="logloss"
)
self.is_fitted_ = True
return self
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/classification/lgbm.py
|
lgbm.py
|
import numpy as np
from sklearn.ensemble import StackingClassifier as _StackingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from amplo.base.exceptions import NotFittedError
from amplo.classification._base import BaseClassifier
from amplo.utils import check_dtypes
def _get_default_estimators(n_samples=None):
defaults = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"GaussianNB": GaussianNB,
"KNeighborsClassifier": KNeighborsClassifier,
"LogisticRegression": LogisticRegression,
}
if n_samples is not None and n_samples < 5000:
defaults.update({"SVC": SVC})
return defaults
def _make_estimator_stack(estimators, add_defaults=True, n_samples=None):
"""
Make a stack of estimators for the stacking model.
Parameters
----------
estimators : list of str
List of estimators for the stack.
add_defaults : bool, default: True
Whether to add default estimators to the stack.
n_samples : int, optional
(Expected) number of samples to determine the default estimators.
Returns
-------
list of (str, estimator)
Stack of estimators.
"""
from amplo.automl.modelling import get_model
check_dtypes(
("estimators", estimators, list),
*[(f"estimators_item: `{est}`", est, str) for est in estimators],
)
# Initialize
stack = {}
# Add default models
if add_defaults:
for model_name, model in _get_default_estimators(n_samples).items():
stack[model_name] = model() # initialize model
# Add Amplo models
for model_name in estimators:
if model_name in stack:
# Skip default models
continue
stack[model_name] = get_model(model_name)
return [(key, value) for key, value in stack.items()]
def _get_final_estimator(n_samples=None, n_features=None):
check_dtypes(
("n_samples", n_samples, (type(None), int)),
("n_features", n_features, (type(None), int)),
)
many_samples = not n_samples or n_samples > 10_000
many_features = not n_features or n_features > 100
solver = "lbfgs" if many_samples or many_features else "sag"
return LogisticRegression(max_iter=2000, solver=solver)
class StackingClassifier(BaseClassifier):
"""
Stacking classifier.
Parameters
----------
add_to_stack : list of str, optional
List of estimators for the stack of estimators.
add_defaults_to_stack : bool, default: True
Whether to add default estimators to the stack. This option will be set to True
when the `add_to_stack` parameter is None.
n_samples : int, optional
(Expected) number of samples.
n_features : int, optional
(Expected) number of features.
verbose : {0, 1, 2}, default: 0
Verbose logging.
**model_params : Any
Model parameters for underlying models.
"""
model: _StackingClassifier # type hint
def __init__(
self,
add_to_stack=None,
add_defaults_to_stack=True,
n_samples=None,
n_features=None,
verbose=0,
**model_params,
):
check_dtypes(("add_defaults_to_stack", add_defaults_to_stack, bool))
# Set attributes
if add_to_stack is None:
add_to_stack = []
add_defaults_to_stack = True
model = _StackingClassifier(
_make_estimator_stack(add_to_stack, add_defaults_to_stack, n_samples),
_get_final_estimator(n_samples, n_features),
)
model.set_params(**model_params)
# Set attributes
self.add_to_stack = add_to_stack
self.add_defaults_to_stack = add_defaults_to_stack
self.n_samples = n_samples
self.n_features = n_features
super().__init__(model=model, verbose=verbose)
def fit(self, x, y=None, **fit_params):
# When `self.n_samples` or `self.n_features` is None or badly initialized, we
# reset the stacking estimator as its stack and final estimator depend on that.
if self.n_samples != x.shape[0] or self.n_features != x.shape[1]:
self.n_samples, self.n_features = x.shape
# Get previous model parameters
prev_model_params = self._get_model_params()
# Init new stacking classifier
self.model = _StackingClassifier(
_make_estimator_stack(
self.add_to_stack or [], self.add_defaults_to_stack, self.n_samples
),
_get_final_estimator(self.n_samples, self.n_features),
)
# Update model parameters from previous model
model_params = self._get_model_params()
for key in set(model_params).intersection(prev_model_params):
model_params[key] = prev_model_params[key]
self.model.set_params(**model_params)
# Normalize
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
std[std == 0] = 1
self._mean = np.asarray(mean).reshape(-1).tolist()
self._std = np.asarray(std).reshape(-1).tolist()
x -= mean
x /= std
# Fit model
self.model.fit(x, y)
def predict(self, x, y=None, **kwargs):
mean = np.array(self._mean)
std = np.array(self._std)
return self.model.predict((x - mean) / std, **kwargs).reshape(-1)
def predict_proba(self, x, **kwargs):
if not self.is_fitted_:
raise NotFittedError
mean = np.array(self._mean)
std = np.array(self._std)
return self.model.predict_proba((x - mean) / std, **kwargs)
def _get_model_params(self, deep=True):
model_params = self.model.get_params(deep)
non_serializable = ["estimators", "final_estimator"]
if deep:
non_serializable.extend(name for name, _ in model_params["estimators"])
for key in non_serializable:
model_params.pop(key)
return model_params
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/classification/stacking.py
|
stacking.py
|
from __future__ import annotations
import numpy as np
import pandas as pd
from catboost import CatBoostClassifier as _CatBoostClassifier
from sklearn.model_selection import train_test_split
from amplo.classification._base import BaseClassifier
from amplo.utils import check_dtypes
class CatBoostClassifier(BaseClassifier):
"""
Amplo wrapper for catboost.CatBoostClassifier.
Parameters
----------
test_size : float, default: 0.1
Test size for train-test-split in fitting the model.
random_state : int, default: None
Random state for train-test-split in fitting the model.
verbose : {0, 1, 2}, default: 0
Verbose logging.
**model_params : Any
Model parameters for underlying catboost.CatBoostClassifier.
"""
model: _CatBoostClassifier # type hint
def __init__(
self,
test_size: float = 0.1,
random_state: int | None = None,
verbose=0,
**model_params,
):
# Verify input dtypes and integrity
check_dtypes(
("test_size", test_size, float),
("random_state", random_state, (type(None), int)),
("model_params", model_params, dict),
)
if not 0 <= test_size < 1:
raise ValueError(f"Invalid attribute for test_size: {test_size}")
# Set up model
default_model_params = {
"n_estimators": 1000,
"auto_class_weights": "Balanced",
"allow_writing_files": False,
"early_stopping_rounds": 100,
"use_best_model": True,
"verbose": verbose,
}
for k, v in default_model_params.items():
if k not in model_params:
model_params[k] = v
model = _CatBoostClassifier(**model_params)
# Set attributes
self.test_size = test_size
self.random_state = random_state
super().__init__(model=model, verbose=verbose)
def fit(self, x: pd.DataFrame, y: pd.Series, **fit_params):
# Split data and fit model
xt, xv, yt, yv = train_test_split(
x, y, stratify=y, test_size=self.test_size, random_state=self.random_state
)
self.model.fit(
xt,
yt,
eval_set=[(xv, yv)],
early_stopping_rounds=self.model.get_params().get("early_stopping_rounds"),
use_best_model=self.model.get_params().get("use_best_model"),
)
self.is_fitted_ = True
self.classes_ = np.unique(y)
return self
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/classification/catboost.py
|
catboost.py
|
from __future__ import annotations
from copy import deepcopy
import pandas as pd
from amplo.base.exceptions import NotFittedError
from amplo.base.objects import BaseEstimator
from amplo.classification._base import BaseClassifier
class PartialBoostingClassifier(BaseClassifier):
"""
Amplo wrapper for classification boosting models.
The number of estimators being used in the prediction are limited.
Parameters
----------
model
Boosting model to wrap.
step : int
Number of iterations/estimators to limit the model on predictions.
verbose : {0, 1, 2}
Verbose logging.
"""
_SUPPORTED_MODELS = [
"AdaBoostClassifier",
"GradientBoostingClassifier",
"LGBMClassifier",
"XGBClassifier",
"CatBoostClassifier",
]
def __init__(self, model, step, verbose=0):
model = deepcopy(model)
super().__init__(model=model, verbose=verbose)
model_name = type(model).__name__
if model_name not in self._SUPPORTED_MODELS:
raise ValueError(f"Unsupported model {model_name}")
if model_name in ("AdaBoostClassifier", "GradientBoostingClassifier"):
model.estimators_ = model.estimators_[:step]
self.step = int(step)
self.classes_ = model.classes_
def _get_prediction_kwargs(self):
model_name = type(self.model).__name__
if model_name in ("AdaBoostClassifier", "GradientBoostingClassifier"):
return {}
elif model_name == "LGBMClassifier":
return {"num_iterations": self.step}
elif model_name == "XGBClassifier":
return {"iteration_range": (0, self.step)}
elif model_name == "CatBoostClassifier":
return {"ntree_end": self.step}
else:
raise AttributeError(f"Unsupported model {model_name}")
def fit(self, x: pd.DataFrame, y: pd.Series, *args, **kwargs):
self.classes_ = y.unique()
return self.model.fit(x, y)
def predict(self, x: pd.DataFrame, y: pd.Series | None = None, **kwargs):
return self.model.predict(x, **kwargs, **self._get_prediction_kwargs())
def predict_proba(self, x: pd.DataFrame, *args, **kwargs):
if not self.is_fitted_:
raise NotFittedError
return self.model.predict_proba(x, **kwargs, **self._get_prediction_kwargs())
@staticmethod
def n_estimators(model: BaseEstimator) -> int:
model_name = type(model).__name__
if model_name in ("AdaBoostClassifier", "GradientBoostingClassifier"):
return len(model.estimators_) # type: ignore
elif model_name in ("LGBMClassifier", "XGBClassifier"):
return model.model.n_estimators
elif model_name == "CatBoostClassifier":
return model.model.tree_count_
else:
raise ValueError(f"Unsupported model {model_name}")
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/classification/partial_boosting.py
|
partial_boosting.py
|
from __future__ import annotations
import numpy as np
import pandas as pd
import xgboost.callback
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier as _XGBClassifier
from amplo.classification._base import BaseClassifier
from amplo.utils import check_dtypes
def _validate_xgboost_callbacks(callbacks):
if not callbacks:
return []
valild_callbacks = []
for cb in callbacks:
if not isinstance(cb, str):
raise ValueError(f"Expected a string but got '{cb}' of type '{type(cb)}'.")
if cb.startswith("early_stopping_rounds="):
n_rounds = int(cb.removeprefix("early_stopping_rounds="))
valild_callbacks.append(xgboost.callback.EarlyStopping(n_rounds))
else:
raise NotImplementedError(f"Unknown callback '{cb}'.")
return valild_callbacks
class XGBClassifier(BaseClassifier):
"""
Amplo wrapper for xgboost.XGBClassifier.
Parameters
----------
callbacks : list of str, optional
The following callbacks are currently supported:
- early stopping, "early_stopping_rounds=100"
test_size : float, default: 0.1
Test size for train-test-split in fitting the model.
random_state : int, default: None
Random state for train-test-split in fitting the model.
verbose : {0, 1, 2}, default: 0
Verbose logging.
**model_params : Any
Model parameters for underlying xgboost.XGBClassifier.
"""
model: _XGBClassifier # type hint
def __init__(
self,
callbacks: list[str] | None = None,
test_size: float = 0.1,
random_state: int | None = None,
verbose: int = 0,
**model_params,
):
# Verify input dtypes and integrity
check_dtypes(
("callbacks", callbacks, (type(None), list)),
("test_size", test_size, float),
("random_state", random_state, (type(None), int)),
("model_params", model_params, dict),
)
if not 0 <= test_size < 1:
raise ValueError(f"Invalid attribute for test_size: {test_size}")
# Set up callbacks
callbacks = callbacks or []
for cb_name, cb_default_value in [("early_stopping_rounds", 100)]:
# Skip if already present in callbacks
if any(callback.startswith(cb_name) for callback in callbacks):
continue
# Pop model parameters into callbacks
callbacks.append(f"{cb_name}={model_params.pop(cb_name, cb_default_value)}")
# Set up model
default_model_params = {
"n_estimators": 100, # number of boosting rounds
"random_state": random_state,
"verbosity": verbose,
}
for k, v in default_model_params.items():
if k not in model_params:
model_params[k] = v
model = _XGBClassifier(
**model_params, callbacks=_validate_xgboost_callbacks(callbacks)
)
# Set attributes
self.callbacks = callbacks
self.test_size = test_size
self.random_state = random_state
super().__init__(model=model, verbose=verbose)
def fit(self, x: pd.DataFrame, y: pd.Series, **fit_params):
self.reset()
self.classes_ = np.unique(y)
# Split data and fit model
xt, xv, yt, yv = train_test_split(
x, y, stratify=y, test_size=self.test_size, random_state=self.random_state
)
# Note that we have to set `verbose` in `fit`.
# Otherwise, it will still verbose print the evaluation.
self.model.fit(xt, yt, eval_set=[(xv, yv)], verbose=bool(self.verbose))
self.is_fitted_ = True
return self
|
Amplo
|
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/classification/xgb.py
|
xgb.py
|
# Amsync
Created with the aim that, anyone with basic knowledge of python, create any bot without much difficulty
<br>
<br>
# Installation
```
pip install Amsync
```
<br>
<br>
# Minimal example
```py
from amsync import Bot, Msg
bot = Bot('email', 'password', prefix='/')
@bot.on()
async def ready():
print('Ready')
@bot.add()
async def hi(m: Msg):
await bot.send(f'Hi {m.nickname}')
bot.run()
```
**[Incredible documentation to create beautiful bots](https://github.com/ellandor/Amsync/blob/main/docs/docs.md)**
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/README.md
|
README.md
|
import sys
from sys import argv
from re import search
from subprocess import run as _run
from colorama import Fore, init
from platform import system
from pathlib import Path
init()
def clear():
a = 'cls' if system() == 'Windows' else 'clear'
_run(a, shell=True)
def exit(m):
print(m)
sys.exit(1)
def create(name):
if Path('.git').exists():
_run('rmdir .git /s /q', shell=True)
return {
'Starting repository': 'git init',
'Adding files': 'git add .',
'Committing repository': 'git commit -m "Add in heroku"',
f'Creating {Fore.CYAN}{name} {Fore.WHITE}on heroku': f'heroku create {name}',
'Adding project on heroku': 'git push heroku master',
'Starting bot': 'heroku ps:scale worker=1'
}
def update():
return {
'Adding files': 'git add .',
'Committing repository': 'git commit -m "Update"',
'Updating project on heroku': 'git push heroku master',
}
def apps():
return {
'': 'heroku apps'
}
def destroy(app):
return {
'': f'heroku destroy {app} -c {app}'
}
def start():
return {
'': 'heroku ps:scale worker=1'
}
def stop():
return {
'': 'heroku ps:stop worker'
}
def restart():
return {
'': 'heroku ps:restart'
}
def workers():
return {
'': 'heroku ps'
}
def init():
with open('Procfile', 'w') as f:
f.write('worker: python bot.py')
with open('runtime.txt', 'w') as f:
f.write('python-3.8.9')
with open('requirements.txt', 'w') as f:
f.write('amsync')
if not Path('.env').exists():
print(f'{Fore.CYAN}Email{Fore.WHITE}: ', end='')
email = input()
print(f'{Fore.CYAN}Password{Fore.WHITE}: ', end='')
password = input()
with open('.env', 'w') as f:
f.write(f'EMAIL={email}\nPASSWORD={password}')
if not Path('bot.py').exists():
with open('bot.py', 'w') as f:
f.write(
"""
from amsync import Bot, Msg
bot = Bot()
@bot.on()
async def ready():
print('Ready')
@bot.add()
async def hi(m: Msg):
await bot.send(f'Hi {m.nickname}')
bot.run()
""".strip())
def run(cmds):
for text, cmd in cmds.items():
if text:
print(text)
tmp = _run(cmd, capture_output=True, text=True, shell=True)
if tmp.returncode:
print(f'{Fore.RED}Error in: {Fore.RESET}{cmd}')
if tmp.stderr:
exit(tmp.stderr)
else:
exit(tmp.stdout)
return tmp.stdout or tmp.stderr
def main():
args = ' '.join(argv[1:])
if args == 'init':
init()
print('Done')
if args == 'start':
print(run(start()))
if args == 'stop':
print(run(stop()))
if args == 'restart':
print(run(restart()))
if args == 'workers':
print(run(workers()))
if args == 'update':
run(update())
print('Done')
if 'create' in args:
try:
app = args.split()[1]
search(r'^([a-z]|[0-9]|-){3,}$', app).group(0)
run(create(app))
except IndexError:
exit('No project name')
except AttributeError:
exit('Invalid name')
elif args == 'apps':
print(run(apps()))
elif 'destroy' in args:
try:
app = args.split()[1]
print(run(destroy(app)))
except IndexError:
app = run(apps()).split('\n')[1:-2]
if not app or not app[0]:
exit('No project found')
while True:
clear()
for i, e in enumerate(app):
print(f'{Fore.CYAN}{i}. {Fore.WHITE}{e}')
try:
n = int(input('\nNumber: '))
if n < 0 or n >= len(app):
continue
except ValueError:
continue
break
clear()
print(run(destroy(app[n])))
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/scripts/amsync.py
|
amsync.py
|
from __future__ import annotations
from uuid import uuid4
from typing import (
Any,
Callable,
Literal,
NoReturn,
Dict
)
from pathlib import Path
from asyncio import gather
from ujson import dumps, dump, load
from aiohttp import request
from filetype import guess_mime
from pybase64 import b64encode
from .enum import MediaType
from .utils import (
get_value,
words,
on_limit,
fix_ascii,
to_list,
one_or_list
)
from .dataclass import (
Res,
Reply,
Msg,
ChatMsg,
Embed,
DataUser,
DataChat
)
from .exceptions import (
EmptyCom,
SmallReasonForBan,
AminoSays
)
__all__ = [
'Req',
'User',
'File',
'Chat',
'Community',
'My'
]
ignore_codes = [
1628 # Sorry, you cannot pick this member.. | Chat.config
]
headers: dict[str, str] = {'NDCDEVICEID': '0184a516841ba77a5b4648de2cd0dfcb30ea46dbb4e911a6ed122578b14c7b662c59f9a2c83101f5a1'}
actual_com: str | None = None
actual_chat: str | None = None
bot_id: str | None = None
API = 'https://service.narvii.com/api/v1/'
class Req:
async def new(
method: str,
url: str,
**kwargs
) -> Res:
"""
Create a request
**kwargs are the extra arguments of aiohttp.request
"""
async with request(
method = method,
url = url,
**kwargs
) as res:
return await Res._make(res)
async def _req(
method: str,
url: str,
data: dict[str, Any] | None = None,
need_dumps: bool = True
) -> Res:
"""
Create a request for the amino api
Headers are automatically inserted into the request
#### need_dumps
If need use ujson.dumps on the data
"""
res = await Req.new(
method = method,
url = API + url,
data = dumps(data) if need_dumps else data,
headers = headers
)
api_status_code = res.json['api:statuscode']
if not res.ok and api_status_code and api_status_code not in ignore_codes:
raise AminoSays(f"{res.json['api:message']}. Code: {api_status_code}")
return res
async def upload_media(file: str) -> str:
"""
Send a file to be used when posting a blog
Returns the file link
"""
return (
await _req(
'post',
'/g/s/media/upload',
await File.get(file),
False,
)
).json['mediaValue']
async def upload_chat_bg(file: str) -> str:
"""
Send a file to be used as chat background
Returns the file link
"""
return (
await _req(
'post',
'g/s/media/upload/target/chat-background',
await File.get(file),
False,
)
).json['mediaValue']
async def upload_chat_icon(file: str) -> str:
"""
Send a file to be used as chat icon
Returns the file link
"""
return (
await _req(
'post',
'g/s/media/upload/target/chat-cover',
await File.get(file),
False,
)
).json['mediaValue']
class Message:
def from_ws(self, j: Dict[str, Any]) -> Msg:
"""
Returns a Msg containing the information from the websocket message
Update actual_com and actual_chat with the chat and community of the message received
"""
global actual_chat, actual_com
actual_chat = get_value(j, 'chatMessage', 'threadId', convert=str)
actual_com = get_value(j, 'ndcId', convert=str)
return Msg._make(j)
def from_chat(self, j: Dict[str, Any]) -> ChatMsg:
"""
Returns a ChatMsg containing information from Chat.messages messages
"""
return ChatMsg._make(j)
class _CreateData:
"""
Stores methods for creating Message.send data
"""
async def msg(
type: int,
msgs: list[str],
reply: Reply
) -> list[Dict[str, Any]]:
"""
Creates the data for sending a message
"""
return [
{'type': type, 'content': i, 'replyMessageId': reply}
if reply
else {'type': type, 'content': i}
for i in msgs
]
async def file(files: list[str | bytes]) -> list[Dict[str, Any]]:
"""
Creates the data for sending a file
"""
return [await File.process(i) for i in files]
async def embed(embed: Embed) -> list[Dict[str, Any]]:
"""
Creates the data for sending a embed
"""
if embed.image:
embed.image = [[100, await upload_media(embed.image), None]]
return [
{
'content': embed.msg_text,
'attachedObject': {
'link': embed.link,
'title': embed.title,
'content': embed.text,
'mediaList': embed.image,
},
}
]
async def send(
self,
*msgs: list[str],
files: str | list[str] | None = None,
type_: int | None = 0,
embed: Embed | None = None,
reply: str | None = None,
com: str | None = None,
chat: str | None = None,
) -> Res | list[Res]:
"""
Send a message, file, embed or reply
#### reply
Message id to reply
"""
com = com or actual_com
chat = chat or actual_chat
files = to_list(files)
if msgs:
data = await self._CreateData.msg(type_, msgs, reply)
elif files:
data = await self._CreateData.file(files)
else:
data = await self._CreateData.embed(embed)
async def foo(i) -> Res:
return await _req(
'post',
f'x{com}/s/chat/thread/{chat}/message',
data=i
)
return one_or_list(await gather(*[foo(i) for i in data]))
class User:
async def search(
uids: str | list[str],
com: str | None = None
) -> DataUser | list[DataUser]:
"""
Get profile information for a community user
"""
com = com or actual_com
uids = to_list(uids)
async def foo(uid: str) -> DataUser:
return DataUser._make((await _req('get', f'x{com}/s/user-profile/{uid}')).json['userProfile'])
return one_or_list(await gather(*[foo(uid) for uid in uids]))
async def ban(
uid: str,
*,
reason: str,
com: str | None = None
) -> Res:
"""
Ban a user
"""
if words(reason) < 3:
raise SmallReasonForBan('Put a reason with at least three words')
return await _req(
'post',
f'x{com or actual_com}/s/user-profile/{uid}/ban',
data={'reasonType': 200, 'note': {'content': reason}},
)
async def unban(
uid: str,
*,
reason: str = '',
com: str | None = None
) -> Res:
"""
Unban a user
"""
return await _req(
'post',
f'x{com or actual_com}/s/user-profile/{uid}/unban',
data={'note': {'content': reason}} if reason else None,
)
class File:
"""
Stores methods for handling a file
"""
def type(file: str | bytes) -> Literal[MediaType.LINK, MediaType.BYTES, MediaType.PATH]:
"""
Checks whether the file is a link, bytes or path
"""
if isinstance(file, str) and file.startswith('http'):
return MediaType.LINK
if isinstance(file, bytes):
return MediaType.BYTES
return MediaType.PATH
async def get(file: str | bytes) -> bytes:
"""
Returns the bytes of a file
If the file is a link, download the file
"""
type = File.type(file)
if type == MediaType.LINK:
async with request('get', file) as res:
return await res.read()
if type == MediaType.BYTES:
return file
with open(file, 'rb') as f:
return f.read()
def b64(file_bytes: bytes) -> str:
"""
Convert bytes to base64
"""
return b64encode(file_bytes).decode()
async def process(file: str | bytes) -> dict[str, Any] | NoReturn:
"""
Returns the data to be used Message.send
"""
if (
File.type(file) not in (MediaType.LINK, MediaType.BYTES)
and not Path(file).exists()
):
raise FileNotFoundError(file)
b = await File.get(file)
b64 = File.b64(b)
type = (guess_mime(b) or 'audio/mp3').split('/')
if type[-1] == 'gif':
return {
'mediaType': 100,
'mediaUploadValue': b64,
'mediaUploadValueContentType': 'image/gif',
'mediaUhqEnabled': True,
}
if type[0] == 'image':
return {
'mediaType': 100,
'mediaUploadValue': b64,
'mediaUhqEnabled': True,
}
if type[-1] == 'mp3':
return {
'type': 2,
'mediaType': 110,
'mediaUploadValue': b64,
'mediaUhqEnabled': True,
}
class Chat:
async def search(
chat: str | None = None,
com: str | None = None
) -> DataChat:
"""
Search for chat information
"""
return DataChat._make((await _req('get', f'x{com or actual_com}/s/chat/thread/{chat or actual_chat}')).json)
async def messages(
check: Callable[[ChatMsg], bool] = lambda _: True,
start: int | None = None,
end: int | None = None,
com: str | None = None,
chat: str | None = None,
) -> list[ChatMsg]:
"""
Returns a list containing the most recent to the oldest messages in a chat
### check
```
def check(m: ChatMsg):
return m.level > 8
await Chat.messages(check=check)
```
Get all messages from people with a level > 8
### start, end
```
msgs = (await Chat.messages())[10: 100]
msgs = await Chat.messages(start=10, end=100)
```
The two are the same, the advantage of the second way is that the first gets all the messages and then gets the 10-100 messages
The second gets only 10-100 messages instead of getting all, it is faster
"""
com = com or actual_com
chat = chat or actual_chat
messages = []
res = await _req(
'get',
f'x{com}/s/chat/thread/{chat}/message?v=2&pagingType=t&size=100',
)
token = res.json['paging']['nextPageToken']
for msg_ in res.json['messageList']:
if check(msg := MESSAGE.from_chat(msg_)):
messages.append(msg)
while True:
res = await _req(
'get',
f'x{com}/s/chat/thread/{chat}/message?v=2&pagingType=t&pageToken={token}&size=100',
)
for msg in res.json['messageList']:
if check(msg := MESSAGE.from_chat(msg)):
messages.append(msg)
if on_limit(messages, end):
break
try:
token = res.json['paging']['nextPageToken']
except KeyError:
break
return messages[start:end]
async def clear(
msgs: str | list[str] | None = None,
check: Callable[[ChatMsg], bool] = lambda _: True,
com: str | None = None,
chat: str | None = None,
start: int | None = None,
end: int | None = None,
) -> Res | list[Res]:
"""
Delete chat messages
## If msgs == None, it will delete all chat messages
### msgs
Message ids to be deleted
### check
```
def check(m: ChatMsg):
return m.level > 8
await Chat.messages(check=check)
```
Delete all messages from people with a level > 8
### start, end
Explanation in Chat.message
"""
com = com or actual_com
chat = chat or actual_chat
msgs = (
to_list(msgs)
if msgs
else [
msg.id for msg in await Chat.messages(
check=check, com=com, chat=chat, start=start, end=end
)
]
)
async def foo(msg):
return await _req(
'post',
f'x{com}/s/chat/thread/{chat}/message/{msg}/admin',
data={'adminOpName': 102},
)
return one_or_list(await gather(*[foo(msg) for msg in msgs]))
async def members(
check: Callable[[DataUser], bool] = lambda _: True,
com: str | None = None,
chat: str | None = None,
start: int | None = None,
end: int | None = None,
) -> list[DataUser]:
"""
Returns members of a chat
### check
```
def check(m: DataUser):
return 'L' in m.nickname
await Chat.members(check=check)
```
Get all members that have 'L' in the nickname
### start, end
```
msgs = (await Chat.members())[10: 100]
msgs = await Chat.members(start=10, end=100)
```
The two are the same, the advantage of the second way is that the first gets all the members and then gets the 10-100 members
The second gets only 10-100 members instead of getting all, it is faster
"""
com = com or actual_com
chat = chat or actual_chat
async def foo(i):
res = await _req(
'get',
f'x{com}/s/chat/thread/{chat}/member?start={i}&size=100&type=default&cv=1.2',
)
return [
i for i in [
DataUser._make(i) for i in res.json['memberList'] if res.json['memberList']
]
if check(i)
]
members_count = (await _req(
'get', f'x{com}/s/chat/thread/{chat}'
)).json['thread']['membersCount']
MAX_MEMBERS_COUNT_IN_CHAT = 1000
return (
await gather(
*[
foo(i) for i in range(0, MAX_MEMBERS_COUNT_IN_CHAT, 100)
if i <= members_count
]
)
)[0][start:end]
async def join(
chats: str | list[str],
com: str | None = None
) -> Res | list[Res]:
"""
Enter a chat
"""
async def foo(i):
return await _req(
'post', f'x{com or actual_com}/s/chat/thread/{i}/member/{bot_id}'
)
return one_or_list(await gather(*[foo(i) for i in to_list(chats)]))
async def leave(
chats: str | list[str],
com: str | None = None
) -> Res | list[Res]:
"""
Leave a chat
"""
async def foo(i):
return await _req(
'delete', f'x{com or actual_com}/s/chat/thread/{i}/member/{bot_id}'
)
return one_or_list(await gather(*[foo(i) for i in to_list(chats)]))
async def create(
name: str,
text: str | None = None,
bg: str | bytes = None,
icon: str | bytes = None,
only_fans: bool = False,
invite_members: list[str] = [],
com: str | None = None
) -> Res:
"""
Create a chat
"""
img = [100, await upload_chat_bg(bg), None] if bg else bg
data = {
'backgroundMedia': img,
'extensions': {
'bm': img,
'fansOnly': only_fans
},
'title': name,
'content': text,
'icon': await upload_chat_icon(icon) if icon else icon,
'inviteeUids': invite_members,
# need this to work
'type': 2,
'eventSource': 'GlobalComposeMenu'
}
return await _req('post', f'x{com or actual_com}/s/chat/thread', data=data)
async def delete(
chat: str | None = None,
com: str | None = None
) -> Res:
"""
Delete a chat
"""
return await _req('delete', f'x{com or actual_com}/s/chat/thread/{chat or actual_chat}')
async def edit(
name: str | None = None,
text: str | None = None,
bg: str | bytes | None = None,
pin: bool | None = None,
announcement: str | None = None,
only_view: bool | None = None,
members_can_invite: bool | None = None,
can_send_coins: bool | None = None,
change_adm_to: str | None = None,
com: str | None = None,
chat: str | None = None
) -> None:
"""
Edit a chat
"""
com = com or actual_com
chat = chat or actual_chat
info = await Chat.search(chat=chat)
if name or text:
data = {
'extensions': {
'bm': [100, await upload_chat_bg(bg), None] if bg else bg,
'fansOnly': info.only_fans
},
'title': name or info.name,
'content': text or info.text,
'icon': await upload_chat_icon(info.icon) if info.icon else info.icon,
# need this to work
'type': 2,
'eventSource': 'GlobalComposeMenu'
}
await _req('post', f'x{com}/s/chat/thread/{chat}', data=data)
if bg:
await _req('post', f'x{com}/s/chat/thread/{chat}/member/{bot_id}/background', data=await File.process(bg))
elif bg == False:
await _req('delete', f'x{com}/s/chat/thread/{chat}/member/{bot_id}/background')
if pin:
await _req('post', f'x{com}/s/chat/thread/{chat}/pin')
elif pin == False:
await _req('post', f'x{com}/s/chat/thread/{chat}/unpin')
if announcement:
await _req('post', f'x{com}/s/chat/thread/{chat}', data={'announcement': announcement, 'pinAnnouncement': True})
elif announcement == False:
await _req('post', f'x{com}/s/chat/thread/{chat}', data={'pinAnnouncement': False})
if only_view:
await _req('post', f'x{com}/s/chat/thread/{chat}/view-only/enable')
elif only_view == False:
await _req('post', f'x{com}/s/chat/thread/{chat}/view-only/disable')
if members_can_invite:
await _req('post', f'x{com}/s/chat/thread/{chat}/members-can-invite/enable')
elif members_can_invite == False:
await _req('post', f'x{com}/s/chat/thread/{chat}/members-can-invite/disable')
if can_send_coins:
await _req('post', f'x{com}/s/chat/thread/{chat}/tipping-perm-status/enable')
elif can_send_coins == False:
await _req('post', f'x{com}/s/chat/thread/{chat}/tipping-perm-status/disable')
if change_adm_to:
await _req('post', f'x{com}/s/chat/thread/{chat}/transfer-organizer', data={'uidList': [change_adm_to]})
async def change_co_hosts(
add: list[str] | str | None = None,
remove: list[str] | str | None = None,
com: str | None = None,
chat: str | None = None
) -> Res | list[Res]:
"""
Add or remove co-hosts
"""
com = com or actual_com
chat = chat or actual_chat
add = to_list(add)
if add:
return await _req('post', f'x{com}/s/chat/thread/{chat}/co-host', data={'uidList': add})
elif remove:
async def foo(i):
return await _req('delete', f'x{com}/s/chat/thread/{chat}/co-host/{i}')
return one_or_list(await gather(*[foo(i) for i in remove]))
async def save(filename: str | None = None) -> None:
"""
Saves all information from a chat to a .json
"""
chat = await Chat.search()
info = {
'name': chat.name,
'text': chat.text,
'announcement': chat.announcement,
'bg': chat.bg,
'icon': chat.icon,
'adm': chat.adm,
'co_hosts': chat.co_hosts,
'members_can_invite': chat.members_can_invite,
'can_send_coins': chat.can_send_coins,
'is_pinned': chat.is_pinned,
'only_view': chat.only_view,
'only_fans': chat.only_fans,
'members': [i.id for i in await Chat.members()]
}
n = 0
while Path(f'{n}.json').exists():
n += 1
with open(filename or f'{n}.json', 'w') as f:
dump(info, f, indent=4, escape_forward_slashes=False)
async def load(filename: str) -> str:
"""
Creates a chat containing the .json information created by Chat.save
The name will be a uuid4 in the beginning for the program to identify the chat that was created,
after that it will change to the correct name
"""
with open(filename, 'r') as f:
f = load(f)
tmp_chat_name = str(uuid4())
await Chat.create(
name = tmp_chat_name,
text = f['text'],
bg = f['bg'],
icon = f['icon'],
only_fans = f['only_fans'],
invite_members = f['members']
)
chats = await Community.chats()
names = [i['name'] for i in chats]
ids = [i['id'] for i in chats]
chat = ids[names.index(tmp_chat_name)]
await Chat.edit(
name = f['name'],
pin = f['is_pinned'],
announcement = f['announcement'],
only_view = f['only_view'],
members_can_invite = f['members_can_invite'],
can_send_coins = f['can_send_coins'],
change_adm_to = f['adm'] if f['adm'] != bot_id else None,
chat = chat
)
await Chat.change_co_hosts(f['co_hosts'])
return chat
class Community:
async def chats(
need_print: bool = False,
ignore_ascii: bool = False,
com: str | None = None
) -> dict[str, list[DataChat]]:
"""
Returns the chats that you are in the community
### need_print
Print the chats in a readable form
### ignore_ascii
Removes special characters, which can disrupt the need_print
"""
if not (com := to_list(com or actual_com)):
raise EmptyCom('Enter a com or send a message in a chat')
async def foo(i):
res = await _req(
'get', f'x{i}/s/chat/thread?type=public-all&start=0&size=100'
)
return {str(i): [{'name': i['title'], 'id': i['threadId']} for i in res.json['threadList']]}
a = await gather(*[foo(i) for i in com])
chats = {k: v for i in a for k, v in i.items()}
if need_print:
for i, e in chats.items():
max_name = len(
max(
[
i['name'] if not ignore_ascii else fix_ascii(i['name'])
for i in e
],
key=len,
)
)
print(i)
for n in e:
name = n['name'] if not ignore_ascii else fix_ascii(n['name'])
a = max_name - len(name)
print(f" {name} {' '*a}-> {n['id']}")
print()
return [j for i in chats.values() for j in i]
async def staff(com=None) -> Dict[list[Dict[str, str]]]:
"""
Returns a dictionary containing community leaders and curators
"""
if not (com := com or actual_com):
raise EmptyCom('Enter a com or send a message in a chat')
leaders = [{'nickname': i['nickname'], 'uid': i['uid']} for i in (await _req('get', f'x{com}/s/user-profile?type=leaders&start=0&size=100')).json['userProfileList']]
curators = [{'nickname': i['nickname'], 'uid': i['uid']} for i in (await _req('get', f'x{com}/s/user-profile?type=curators&start=0&size=100')).json['userProfileList']]
return {'leaders': leaders, 'curators': curators}
class My:
async def chats(
need_print: bool = False,
ignore_ascii: bool = False
) -> dict[str, list[str, list[str]]]:
"""
Returns chats in which you are from all communities
### need_print
Print the chats in a readable form
### ignore_ascii
Removes special characters, which can disrupt the need_print
"""
res = await _req('get', 'g/s/community/joined?v=1&start=0&size=50')
coms = {str(i['ndcId']): [i['name'], []] for i in res.json['communityList']}
async def foo(i):
return (await _req(
'get', f'x{i}/s/chat/thread?type=joined-me&start=0&size=100'
)).json
chats = await gather(*[foo(i) for i in coms])
for i in chats:
for j in i['threadList']:
com_id = str(j['ndcId'])
chat_id = j['threadId']
is_private_chat = j['membersQuota'] == 2
chat_name = (
j['membersSummary'][1]['nickname']
if is_private_chat
else j['title']
)
coms[com_id][1].append(
(
chat_name if not ignore_ascii else fix_ascii(chat_name),
chat_id,
)
)
if need_print:
for i, e in coms.items():
max_name = (
len(max([i[0] for i in e[1]], key=len)) if e[1] else 0
)
print(f'{coms[i][0]} - {i}')
for j in coms[i][1]:
a = (max_name - len(j[0])) + 1
print(f' {j[0]} {" "*a}-> {j[1]}')
print()
return [j for i in list(coms.values()) for j in i[1]]
async def communities(
need_print: bool = False,
ignore_ascii: bool = False
) -> dict[str, str]:
"""
Returns all the communities you are in
### need_print
Print the chats in a readable form
### ignore_ascii
Removes special characters, which can disrupt the need_print
"""
res = await _req('get', f'g/s/community/joined?v=1&start=0&size=100')
coms = {
i['name']
if not ignore_ascii
else fix_ascii(i['name']): str(i['ndcId'])
for i in res.json['communityList']
}
if need_print:
max_name = len(max(coms.keys(), key=len))
for i, e in coms.items():
a = max_name - len(i)
print(f'{i} {" "*a} -> {e}')
return coms
# # # # # # #
# Cache #
# # # # # # #
MESSAGE = Message()
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/obj.py
|
obj.py
|
from __future__ import annotations
from re import search
from typing import AsyncIterator, Literal
from asyncio import AbstractEventLoop, Future, sleep
from binascii import Error
from contextlib import suppress
from ujson import loads
from aiohttp import ClientSession, WSServerHandshakeError
from colorama import Fore
from pybase64 import urlsafe_b64decode
from . import obj
from .db import _DB
from .obj import Message, _req
from .enum import WsStatus
from .utils import Slots, clear
from .dataclass import Msg
class Ws(Slots):
__slots__ = [
'_events',
'_decoded'
]
def __init__(
self,
loop: AbstractEventLoop,
email: 'Bot.email', # type: ignore
password: 'Bot.password', # type: ignore
only_chats: 'Bot.only_chats' = {}, # type: ignore
ignore_chats: 'Bot.ignore_chats' = {} # type: ignore
):
self._deviceid: str = obj.headers['NDCDEVICEID']
self._loop: AbstractEventLoop = loop
self._db: _DB = _DB()
self.futures: list[Future] = []
self._msg = Message()
self._email = email
self._password = password
self._only_chats = only_chats
self._ignore_chats = ignore_chats
self._status = WsStatus.OPEN
async def _get_sid(self) -> str:
"""
Get the account sid
"""
return (await _req('post', 'g/s/auth/login', data={
'email': self._email,
'secret': f'0 {self._password}',
'deviceID': self._deviceid,
})).json['sid']
async def _connect(self) -> AsyncIterator[Msg]:
"""
Connect the websocket
"""
async with ClientSession() as session:
while True:
try:
ws = await session.ws_connect(
f'wss://ws1.narvii.com/?signbody={self._deviceid}',
headers=obj.headers,
)
break
except WSServerHandshakeError as e:
if str(e)[0] == '5':
clear()
for i in range(5):
print(f'Amino servers died, wait {Fore.CYAN}{5-i}{Fore.WHITE}s')
await sleep(1)
clear()
print(f'{Fore.GREEN}Reconnecting...{Fore.WHITE}')
else:
raise
clear()
self._call_events('ready')
while self._status == WsStatus.OPEN: # for tests
if ws.closed:
self._call_events('close')
yield WsStatus.CLOSED
# The socket sometimes receives a frame that is not a json,
# causing a TypeError, ignoring it does not cause any problems
with suppress(TypeError):
res = await ws.receive_json(loads=loads)
if res['t'] == 1000:
yield self._msg.from_ws(res['o'])
def _call_events(
self,
name: str,
*m: list[Msg]
) -> None:
"""
Calls all events with the specific name
"""
for i in self._events[name]:
self._loop.create_task(i(*m))
def _can_call(self, msg: Msg) -> Literal[True] | None:
if not self._only_chats and not self._ignore_chats:
return True
for community in self._only_chats:
if (
msg.chat in self._only_chats[community]
or not self._only_chats[community]
and msg.com == community
):
return True
for community in self._ignore_chats:
if (
msg.chat not in self._ignore_chats[community]
or not self._ignore_chats[community]
and msg.com != community
):
return True
async def run(
self,
call: 'Bot._call', # type: ignore
events: 'Bot.events', # type: ignore
bot: 'Bot' # type: ignore
) -> None:
"""
Start the bot
"""
if not self._db.get_account(self._email):
self._db.add_account(self._email, await self._get_sid())
sid = self._db.get_account(self._email)
obj.headers['NDCAUTH'] = f'sid={sid}'
while True:
try:
decoded = urlsafe_b64decode(sid).decode('cp437')
break
except Error:
sid = sid[:-1]
id_ = search(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}', decoded).group()
bot.sid = sid
bot.id = id_
obj.bot_id = id_
self._events = events
events = {
'0:0': 'message',
'100:0': 'message',
'101:0': 'join_chat',
'102:0': 'leave_chat',
'0:100': 'image',
}
async for m in self._connect():
if m == WsStatus.CLOSED:
# Reconnect
return await self.run(call, self._events, bot)
if self._can_call(m):
with suppress(KeyError):
self._call_events(events[f'{m.type}:{m.media_type}'], m)
if self.futures:
for future in self.futures:
future.set_result(m)
self.futures.clear()
self._loop.create_task(call(m))
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/ws.py
|
ws.py
|
from time import time
from contextlib import contextmanager
from peewee import (
SqliteDatabase,
CharField,
IntegerField,
Model,
OperationalError
)
db = SqliteDatabase('db.db')
_1_DAY = 86400
_3_DAYS = 259200
class Account(Model):
email = CharField()
sid = CharField()
change_in = IntegerField()
class Meta:
database = db
class Update(Model):
up_lib_in = IntegerField()
up_deps_in = IntegerField()
class Meta:
database = db
@contextmanager
def query():
try:
db.connect()
yield
# Database is already connected
except OperationalError:
yield
finally:
db.close()
class _DB:
def __init__(self):
Account.create_table()
Update.create_table()
def add_account(self, email, sid):
with query():
Account(email=email, sid=sid, change_in=int(time())).save()
def get_account(self, email):
with query():
try:
acc = Account.get(Account.email == email)
if int(time()) - acc.change_in >= 86400:
acc.delete_instance()
return None
return acc.sid
except Account.DoesNotExist:
return None
def update_time_of(self, attr):
with query():
up = Update.get()
if attr == 'lib':
up.up_lib_in = int(time())
elif attr == 'deps':
up.up_deps_in = int(time())
up.save()
def create_update(self):
Update(up_lib_in=int(time()), up_deps_in=int(time())).save()
def lib_need_update(self):
with query():
try:
if int(time()) - Update.get().up_lib_in >= _1_DAY:
self.update_time_of('lib')
return True
except Update.DoesNotExist:
self.create_update()
return False
def deps_need_update(self):
with query():
if int(time()) - Update.get().up_deps_in >= _3_DAYS:
self.update_time_of('deps')
return True
class DB:
pass
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/db.py
|
db.py
|
from __future__ import annotations
from typing import Dict, Any
from dataclasses import dataclass
from ujson import loads
from .utils import Slots, get_value
__all__ = [
'Res',
'Reply',
'Msg',
'ChatMsg',
'Embed',
'DataUser',
'DataChat'
]
@dataclass
class Res(Slots):
"""
Represents a response from Req
"""
bytes: bytes
headers: Dict[str, str]
json: Dict[str, Any]
ok: bool
status: int
text: str
url: str
@classmethod
async def _make(cls, req) -> Res:
return cls(
bytes = await req.read(),
headers = req.headers,
json = await req.json(loads=loads),
ok = req.status < 400,
status = req.status,
text = await req.text(),
url = req.real_url
)
@dataclass
class Reply(Slots):
"""
Represents the message that was replied to
"""
icon: str | None
id: str | None
nickname: str | None
uid: str | None
@dataclass
class Msg(Slots):
"""
Represents a websocket message
"""
chat: str | None
com: str | None
extensions: dict[str, Any]
file_link: str | None
icon: str | None
id: str | None
level: int | None
media_type: str | None
mentioned_users: list[str]
nickname: str | None
ref_id: int | None
reply: Reply
text: str | None
type: str | None
uid: str | None
@classmethod
def _make(cls, j) -> Msg:
cm: dict[str, Any] = j['chatMessage']
ext: dict[str, Any] = get_value(cm, 'extensions') or {}
r = None
if 'replyMessage' in ext:
r = Reply(
icon = get_value(ext, 'replyMessage', 'author', 'icon'),
id = get_value(ext, 'replyMessageId'),
nickname = get_value(ext, 'replyMessage', 'author', 'nickname'),
uid = get_value(ext, 'replyMessage', 'author', 'uid')
)
return cls(
chat = get_value(cm, 'threadId', convert=str),
com = get_value(j, 'ndcId', convert=str),
extensions = ext,
file_link = get_value(cm, 'mediaValue'),
icon = get_value(cm, 'author', 'icon'),
id = get_value(cm, 'messageId'),
level = get_value(cm, 'author', 'level'),
media_type = get_value(cm, 'mediaType'),
mentioned_users = [u['uid'] for u in get_value(ext, 'mentionedArray') or []],
nickname = get_value(cm, 'author', 'nickname'),
ref_id = get_value(cm, 'clientRefId'),
reply = r,
text = get_value(cm, 'content'),
type = get_value(cm, 'type'),
uid = get_value(cm, 'uid')
)
@dataclass
class ChatMsg(Slots):
"""
Represents a message obtained from Chat.messages
"""
chat: str | None
com: str | None
extensions: dict[str, Any]
file_link: str | None
icon: str | None
id: str | None
level: int | None
media_type: int | None
mentioned_users: list[str]
nickname: str | None
ref_id: int | None
reply: Reply | None
text: str | None
type: str | None
uid: str | None
@classmethod
def _make(cls, j) -> ChatMsg:
ext: dict[str, Any] = get_value(j, 'extensions') or {}
r = None
if 'replyMessage' in ext:
r = Reply(
icon = get_value(ext, 'replyMessage', 'author', 'icon'),
id = get_value(ext, 'replyMessageId'),
nickname = get_value(ext, 'replyMessage', 'author', 'nickname'),
uid = get_value(ext, 'replyMessage', 'author', 'uid')
)
return cls(
chat = get_value(j, 'threadId', convert=str),
com = get_value(j, 'ndcId', convert=str),
extensions = ext,
file_link = get_value(j, 'mediaValue'),
icon = get_value(j, 'author', 'icon'),
id = get_value(j, 'messageId'),
level = get_value(j, 'author', 'level'),
media_type = get_value(j, 'mediaType'),
mentioned_users = [u['uid'] for u in get_value(ext, 'mentionedArray') or []],
nickname = get_value(j, 'author', 'nickname'),
ref_id = get_value(j, 'clientRefId'),
reply = r,
text = get_value(j, 'content'),
type = get_value(j, 'type'),
uid = get_value(j, 'uid')
)
class Embed(Slots):
"""
Represents the Embed that will be sent in Message.send
"""
def __init__(
self,
msg_text: str,
title: str,
text: str,
link: str,
image: str | bytes = None
):
self.msg_text = msg_text
self.title = title
self.text = text
self.link = link
self.image = image
@dataclass
class DataUser(Slots):
"""
Represents information from a user profile
"""
bio: str | None
blogs_count: int | None
com: str | None
comments_count: int | None
followers_count: str | None
following_count: str | None
im_following: bool
level: str | None
nickname: str | None
posts_count: int | None
id: str | None
reputation: int | None
role: str | None
visitors_count: int | None
@classmethod
def _make(cls, j) -> DataUser:
return cls(
bio = get_value(j, 'content'),
blogs_count = get_value(j, 'blogsCount'),
com = get_value(j, 'ndcId', convert=str),
comments_count = get_value(j, 'commentsCount'),
followers_count = get_value(j, 'membersCount'),
following_count = get_value(j, 'joinedCount'),
im_following = get_value(j, 'followingStatus') == 1,
level = get_value(j, 'level'),
nickname = get_value(j, 'nickname'),
posts_count = get_value(j, 'postsCount'),
id = get_value(j, 'uid'),
reputation = get_value(j, 'reputation'),
role = {0: 'member', 101: 'curator', 100: 'leader', 102: 'agent'}[j['role']],
visitors_count = get_value(j, 'visitoresCount')
)
@dataclass
class DataChat(Slots):
"""
Represents information from a chat
"""
adm: str | None
announcement: str | None
bg: str | None
can_send_coins: bool
co_hosts: list[str]
extensions: Dict[str, Any]
icon: str | None
id: str | None
is_pinned: bool
is_private: bool
members_can_invite: bool
name: str | None
only_fans: bool
only_view: bool
text: str | None
@classmethod
def _make(cls, j):
t: Dict[str, Any] = j['thread']
ext: Dict[str, Any] = get_value(t, 'extensions') or {}
return cls(
adm = get_value(t, 'author', 'uid'),
announcement = get_value(ext, 'announcement') or '',
bg = get_value(ext, 'bm')[1] if 'bm' in ext else None,
can_send_coins = get_value(t, 'tipInfo', 'tippable'),
co_hosts = get_value(ext, 'coHost') or [],
extensions = ext,
icon = get_value(t, 'icon'),
id = get_value(t, 'threadId'),
is_pinned = get_value(t, 'isPinned'),
is_private = get_value(t, 'membersQuota') == 2,
members_can_invite = get_value(t, 'membersCanInvite') or True,
name = get_value(t, 'title'),
only_fans = get_value(ext, 'fansOnly'),
only_view = get_value(ext, 'viewOnly') or False,
text = get_value(t, 'content') or ''
)
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/dataclass.py
|
dataclass.py
|
from __future__ import annotations
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont, ImageOps
from typing import Tuple
from pathlib import Path
from filetype import guess_mime
from .exceptions import FontNotFound
__all__ = [
'Color',
'MakeImage',
'ProgressBar'
]
Size = Tuple[int, int]
RGBA = Tuple[int, int, int, int]
class Color:
RED = (255, 0, 0, 255)
GREEN = (0, 255, 0, 255)
BLUE = (0, 0, 255, 255)
CYAN = (0, 255, 255, 255)
ORANGE = (255, 128, 0, 255)
YELLOW = (255, 255, 0, 255)
GREEN = (0, 255, 0, 255)
BLUE = (0, 0, 255, 255)
PURPLE = (128, 0, 255, 255)
PINK = (255, 0, 255, 255)
WHITE = (255, 255, 255, 255)
GRAY = (128, 128, 128, 255)
BLACK = (0, 0, 0, 255)
TRANSPARENT = (0, 0, 0, 0)
PRETTY_BLACK = (26, 26, 26, 255)
class MakeImage:
def __init__(self, img) -> None:
if isinstance(img, bytes):
img = Image.open(BytesIO(img))
self.img: Image.Image = img
@property
def size(self) -> tuple(int, int):
return self.img.size
@property
def bytes(self) -> bytes:
arr = BytesIO()
self.save(arr)
return arr.getvalue()
@staticmethod
def type(b) -> str:
# Only first 261 bytes representing the max file header is required
#
# https://github.com/h2non/filetype.py#features
return guess_mime(b[:261]).split('/')[1]
@classmethod
def new(
cls,
size: Size,
color: RGBA | Color = Color.WHITE
) -> MakeImage:
return cls(Image.new('RGBA', size, color))
@classmethod
def from_path(cls, path) -> MakeImage:
return cls(Image.open(path))
@classmethod
def convert(cls, im: Image.Image) -> MakeImage:
return cls(im)
def get_text_pos(
self,
draw: ImageDraw.Draw,
text: str,
font: ImageFont.truetype
) -> Size:
w, h = draw.textsize(text, font=font)
W, H = self.size
return W-w, H-h
def get_image_pos(self, im: Image.Image) -> Size:
W, H = self.size
w, h = im.size
return W-w, H-h
def resize(
self,
size: Size,
preserve_aspect: bool = False
) -> None:
if preserve_aspect:
self.img.thumbnail(size, Image.BICUBIC)
else:
self.img = self.img.resize(size, Image.BICUBIC)
def crop(self, size: Size) -> None:
W, H = self.size
cw, ch = W//2, H//2
w, h = size
left = cw - w//2
top = ch - h//2
right = cw + w//2
bottom = ch + h//2
self.img = self.img.crop((left, top, right, bottom))
def save(
self,
path: str,
format: str = 'webp',
quality: int | None = None
) -> None:
format = format.lower()
if format in ('jpg', 'jpeg', 'png'):
self.img.save(path, format, quality=quality or 80)
elif format == 'webp':
self.img.save(path, format, quality=quality or 90)
else:
self.img.save(path, format, quality=quality)
@staticmethod
def calc(
size: Size,
position: str | None = None,
move: Size = (0, 0)
) -> Size:
w, h = size
if position == 'center':
return tuple(map(sum, zip((w//2, h//2), move)))
if position == 'top':
return tuple(map(sum, zip((w//2, 0), move)))
if position == 'right':
return tuple(map(sum, zip((w, h//2), move)))
if position == 'bottom':
return tuple(map(sum, zip((w//2, h), move)))
if position == 'left':
return tuple(map(sum, zip((0, h//2), move)))
return move
def text(
self,
text: str,
position: str | None = None,
move: Size = (0, 0),
font: Tuple[str, int] | None = None,
color: RGBA | Color = Color.WHITE,
stroke: int = 0,
stroke_color: RGBA | Color = Color.BLACK,
) -> None:
draw = ImageDraw.Draw(self.img)
if font:
if not font[0] or not font[1]:
raise Exception('No font-file or font-size')
if not Path(font[0]).exists():
raise FontNotFound(f"Font '{font[0]}' was not found in the current folder")
font = ImageFont.truetype(*font)
w, h = self.get_text_pos(draw, text, font)
draw.text(
self.calc((w, h), move=move, position=position),
text,
font = font,
fill = color,
stroke_width = stroke,
stroke_fill = stroke_color,
)
def paste(
self,
im: MakeImage | Image.Image,
position: str | None = None,
move: Size = (0, 0)
) -> None:
if isinstance(im, MakeImage):
im = im.img
try:
self.img.paste(
im, self.calc(self.get_image_pos(im), position, move), im.convert('RGBA')
)
except ValueError: # bad transparency mask
self.img.paste(
im, self.calc(self.get_image_pos(im), position, move)
)
def circular_thumbnail(self) -> None:
w, h = self.size
w, h = w*3, h*3
mask = Image.new('L', (w, h), 0)
# Place the entire mask in the center of the image,
# without the 5, part of the mask's edges is slightly cut
ImageDraw.Draw(mask).ellipse((5, 5, w-5, h-5), fill=255)
mask = mask.resize(self.size, Image.BICUBIC)
self.img = ImageOps.fit(self.img, mask.size, Image.BICUBIC)
self.img.putalpha(mask)
def to_img(self, n_frame: int = 0) -> MakeImage:
self.img.seek(n_frame)
self.save('tmp.webp')
with open('tmp.webp', 'rb') as tmp:
tmp = MakeImage(tmp.read())
Path('tmp.webp').unlink(missing_ok=True)
return tmp
def add_border(
self,
size: Size,
color: RGBA | Color = Color.WHITE
) -> None:
W, H = self.size
mask = self.img.copy().resize((W + size*2, H + size*2))
fill = Image.new('RGBA', (W + size*2, H + size*2), color)
bg = Image.new('RGBA', (W + size*2, H + size*2), Color.TRANSPARENT)
w, h = bg.size
bg.paste(fill, mask=mask)
bg.paste(self.img, ((w-W)//2, (h-H)//2), self.img)
self.img = bg
def show(self) -> None:
self.img.show()
class ProgressBar(MakeImage):
def __init__(
self,
size: Size,
radius: int = 30,
color: RGBA | Color = Color.WHITE,
bg_color: RGBA | Color = Color.PRETTY_BLACK
):
self.img = Image.new('RGBA', size, Color.TRANSPARENT)
self.radius = radius
self.color = color
self.bg = bg_color
ImageDraw.Draw(self.img).rounded_rectangle(
(0, 0, *size), radius, bg_color
)
def fill(self, px: int) -> None:
w, h = self.img.size
bg_fill = self.img.copy()
bg = self.img.copy()
pixdata = bg_fill.load()
for y in range(bg_fill.size[1]):
for x in range(bg_fill.size[0]):
if pixdata[x, y] == self.bg:
pixdata[x, y] = self.color
bg_fill = bg_fill.crop((0, 0, px*2, h))
bg.paste(bg_fill, mask=bg_fill)
self.img = bg.resize((w, h), Image.BICUBIC)
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/image.py
|
image.py
|
from __future__ import annotations
import sys
from re import search
from os import environ, execl
from asyncio import (
gather,
wait_for,
new_event_loop,
iscoroutinefunction,
run_coroutine_threadsafe,
AbstractEventLoop,
TimeoutError,
Future
)
from dotenv import load_dotenv
from typing import (
Any,
Callable,
Awaitable,
Coroutine,
Dict,
Literal,
NoReturn
)
from pathlib import Path
from threading import Thread
from subprocess import run
from colorama import Fore, Style, init
from .ws import Ws
from .db import _DB
from .obj import Message, Req, Community, _req, My
from .utils import Slots, clear, one_or_list, to_list
from .dataclass import Msg, Embed, Res
from .exceptions import (
AccountNotFoundInDotenv,
EventIsntAsync,
InvalidDotenvKeys,
CommandIsntAsync,
InvalidChatChoice,
InvalidEvent,
InvalidRole
)
__all__ = ['Bot']
with open(f'{Path(__file__).parent}/__init__.py') as f:
version = search(r'p[0-9]+.[0-9]+.[0-9]+', f.read()).group()[1:]
Coro_return_ws_msg = Callable[[], Coroutine[Msg, None, None]]
Coro_return_Any = Callable[[], Coroutine[Any, None, None]]
Coro_return_None = Callable[[], Coroutine[None, None, None]]
class Bot(Slots):
"""
Represents the bot
"""
__slots__ = ['_ws']
def __init__(
self,
email: str | None = None,
password: str | None = None,
prefix: str = '/',
only_chats: dict[str, list[str]] = {},
ignore_chats: dict[str, list[str]] = {}
):
"""
#### only_chats
Dictionary of chats that the bot will *hear* the commands
#### ignore_chats
Dictionary of chats that the bot will ignore the commands
#### Example:
```
chats = {
'1111111': ['00000000-0000-0000-0000-000000000000', '11111111-1111-1111-1111-111111111111'],
'2222222': [] # Empty list means all chats`
}
bot = Bot(only_chats=chats)
```
The bot had listened to the 00000.... 11111.... chats from the 1111111 community,
and had listened to all the chats in the community 2222222.
"""
init()
# load_dotenv is not identifying the .env on project folder,
# so I use Path to get the absolute .env path
load_dotenv(Path('.env').absolute())
try:
self._email = email or environ['EMAIL']
self._password = password or environ['PASSWORD']
except KeyError:
raise InvalidDotenvKeys('Your .env must have the keys: EMAIL and PASSWORD')
if not self._email or not self._password:
raise AccountNotFoundInDotenv('Put your email and password in .env')
if only_chats and ignore_chats:
raise InvalidChatChoice('Enter chats only in "only_chats" or "ignore_chats"')
self.id: str = 'ws.run'
self.sid: str = 'ws.run'
self.staff: Dict[str, Dict[str, list[str]]] = {}
self._db: _DB = _DB()
self._msg: Message = Message()
self._loop: AbstractEventLoop = new_event_loop()
self.prefix = prefix
self.only_chats = only_chats
self.ignore_chats = ignore_chats
self.commands: dict[str, dict[str, list[str], Coro_return_None, str]] = {}
self.events: dict[str, list[Coro_return_None]] = {
'ready': [],
'close': [],
'message': [],
'join_chat': [],
'leave_chat': [],
'image': []
}
def add(
self,
help: str = 'No help',
aliases: list[str] = [],
staff: Literal['any', 'curator', 'leader'] | None = None
) -> Callable[[Coro_return_ws_msg], None]:
"""
Adds a command to the bot
```
@bot.add()
async def hi(m: Msg):
await bot.send(f'Hi, {m.nickname}')
```
Created the `hi` command
"""
def foo(f: Coro_return_ws_msg) -> None:
if not iscoroutinefunction(f):
raise CommandIsntAsync('Command must be async: "async def ..."')
if staff not in ['any', 'curator', 'leader', None]:
raise InvalidRole(
f'{Fore.RED}{staff}{Fore.WHITE}. Choose between '
f'{Fore.CYAN}any{Fore.WHITE}, '
f'{Fore.CYAN}curator{Fore.WHITE}, '
f'{Fore.CYAN}leader{Fore.WHITE}'
)
self.commands[f.__name__] = {
'aliases': aliases,
'def': f,
'help': help,
'staff': staff
}
return foo
def on(self) -> Callable[[Coro_return_ws_msg], None]:
"""
Adds a event to the bot
```
@bot.on()
async def message(m: Msg):
print(m.text)
```
Created the `message` event
"""
def foo(f: Coro_return_ws_msg) -> None:
if f.__name__ not in self.events:
raise InvalidEvent(f.__name__)
if not iscoroutinefunction(f):
raise EventIsntAsync('Event must be async: "async def ..."')
self.events[f.__name__].append(f)
return foo
async def check_update(self) -> NoReturn | None:
"""
Checks whether lib or lib dependencies need updating
Checks each day if the lib needs to update
Checks every 3 days if the dependencies need to update
If the program runs on heroku, the program will not check for updates
"""
def try_update() -> NoReturn | None:
if self._db.deps_need_update():
cmd = run('pip install -U amsync', capture_output=True, text=True)
else:
cmd = run('pip install -U amsync --no-deps', capture_output=True, text=True)
if cmd.returncode:
clear()
print(f'Error updating from version {Style.BRIGHT}{version}{Style.NORMAL} to {Fore.CYAN}{new}{Fore.WHITE}\n\n')
print(cmd.stderr or cmd.stdout)
sys.exit(1)
if (
'DYNO' not in environ # not in heroku
and self._db.lib_need_update()
):
new = (await Req.new('get', 'https://pypi.org/pypi/Amsync/json')).json['info']['version']
if new != version:
print(f'There is a new version: {Fore.CYAN}{new}{Fore.WHITE}')
print(f'Actual version: {Style.BRIGHT}{version}{Style.NORMAL}\n')
print(f'Do you want to update it? (Y/n) ', end='')
if input().lower() == 'y':
clear()
print('Updating...')
try_update()
clear()
print('Restarting...\n')
Path('db.db').unlink(missing_ok=True)
execl(sys.executable, Path(__file__).absolute(), *sys.argv)
clear()
def run(self) -> None:
"""
Start the bot
"""
self._loop.run_until_complete(self.check_update())
self._ws = Ws(
loop = self._loop,
email = self._email,
password = self._password,
only_chats = self.only_chats,
ignore_chats = self.ignore_chats
)
Thread(target=self._loop.run_forever).start()
fut = run_coroutine_threadsafe(
self._ws.run(
call = self._call,
events = self.events,
bot = self),
self._loop
)
# On error "run_coroutine_threadsafe" pauses the program as a raise Exception,
# but does not print the exception on the screen.
# So it is necessary to take the exception and raise it to show
try:
fut.result()
except:
raise fut.exception()
async def status(
self,
s: Literal['on', 'off'],
com: str | list[str] | None = None
) -> Res | list[Res]:
"""
Changes the status of the bot
'on' the bot goes online
'off' the bot goes offline
By default, the bot changes the status in all communities where it is
However you can insert the communities so it stays online or offline
"""
assert s in ['on', 'off'], f"Choose 'on' or 'off', not {s}"
async def foo(i):
return await _req('post', f'x{i}/s/user-profile/{self.id}/online-status', data)
data = {'onlineStatus': 1} if s == 'on' else {'onlineStatus': 2, 'duration': 86400} # 1 day
com = to_list(com or [i for i in (await My.communities(False)).values()])
return one_or_list(await gather(*[foo(i) for i in com]))
async def send(
self,
*msgs: list[str],
files: str | None = None,
type_: int | None = 0,
embed: Embed | None = None,
reply: str | None = None,
com: str | None = None,
chat: str | None = None
) -> Res | list[Res]:
"""
Send a message, file, embed or reply
#### reply
Message id to reply
"""
return await self._msg.send(
*msgs,
files = files,
type_ = type_,
embed = embed,
reply = reply,
com = com,
chat = chat
)
async def wait_for(
self,
check: Callable[[Msg], bool] = lambda _: True,
timeout: int | None = None
) -> Awaitable[Future, int | None] | None:
"""
Wait for a message until the check is met or timeout finish
If the condition is met, the message returns, if not returns None
```
def check(_m: Msg):
return _m.text == 'Hello'
await bot.wait_for(check=check, timeout=10)
```
Wait for a message to have the text "Hello" or pass 10 seconds
"""
future = self._loop.create_future()
self._ws.futures.append(future)
try:
if check(msg := await wait_for(future, timeout)):
return msg
# Calls wait_for until the condition is met
return await self.wait_for(check, timeout)
except TimeoutError:
# Delete the future canceled by asyncio.wait_for
del self._ws.futures[self._ws.futures.index(future)]
def _is_alias(self, name):
for command_name, args in self.commands.items():
if name in args['aliases']:
return command_name
async def _is_staff(self, m: Msg, role: Literal['any', 'curator', 'leader']) -> bool:
if m.com not in self.staff:
self.staff[m.com] = await Community.staff(m.com)
leaders = [i['uid'] for i in self.staff[m.com]['leaders']]
curators = [i['uid'] for i in self.staff[m.com]['curators']]
if role == 'any':
return m.uid in leaders + curators
if role == 'leader':
return m.uid in leaders
if role == 'curator':
return m.uid in curators
async def _call(self, m: Msg) -> None:
if m.text and m.text.startswith(self.prefix):
splited = m.text.split()
name = splited[0][len(self.prefix):]
command_name = self._is_alias(name) or name
if command_name in self.commands:
cmd = self.commands[command_name]
staff = cmd['staff']
if not staff or await self._is_staff(m, staff):
if (
len(splited) > 1
and splited[1] in (f'{self.prefix}h', f'{self.prefix}help')
):
await self.send(cmd['help'])
else:
# Remove command name from text
m.text = ' '.join(splited[1:])
self._loop.create_task(cmd['def'](m))
|
Amsync
|
/Amsync-0.0.53.tar.gz/Amsync-0.0.53/amsync/bot.py
|
bot.py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
import re
import pathlib
from ando.error import ExperimentError, \
SessionError, SubjectError, MetaDataError, DerivativeDataError, \
RawDataError
dir_rules = os.path.join(os.path.dirname(__file__)) + '/rules/'
def parse_all_path(nested_list_of_dir):
"""
Transforms this
[
['Landing', 'sub-anye', '180116_001_m_anye_land-001', 'source'],
['Landing', 'sub-enya', '180116_001_m_enya_land-001', 'source'],
['Landing', 'sub-enyo'],
['Landing', 'sub-enyo', '180116_001_m_enyo_land-001']
]
into
[
['Landing', 'sub-anye', '180116_001_m_anye_land-001', 'source'],
['Landing', 'sub-enya', '180116_001_m_enya_land-001', 'source'],
]
Checking for the longest chain with the same sub-chain
"""
def _test_is_included(my_list_of_lists, list_elem):
for my_list_elem in my_list_of_lists:
if all([val[0] == val[1] for val in zip(my_list_elem,
list_elem)]):
return True
return False
def _merge_duplicates(my_list_of_lists, max_length=3):
"""
Transform this
[
['Landing', 'sub-anye', '180116_001_m_anye_land-001', 'metadata']
['Landing', 'sub-anye', '180116_001_m_anye_land-001', 'rawdata']
['Landing', 'sub-anye', '180116_001_m_anye_land-001','derivatives']
]
to
[
[
'Landing', 'sub-anye',
'180116_001_m_anye_land-001',
'rawdata',
'metadata',
'derivatives',
]
]
Args:
my_list_of_lists ([list]): [list of path to process]
max_length (int, optional): [number of folder in session directory
coresponding to rawdata metadata derivatives and sources].
Defaults to 3.
Returns:
[list]: [list of concatenate sub folder at the end of the list ]
todo:
This might have to be re-implemented more efficiently.
At the moment this is the best solution so far to get feedbacks on where does the error happen.
if we use the BIDS implementation we can just say if the Directory follows the AnDO specs .
"""
merged_list = []
for my_list_elem in my_list_of_lists:
simil_list = []
for my_list_elem2 in my_list_of_lists:
if all([val[0] == val[1] for i, val in
enumerate(zip(my_list_elem, my_list_elem2))
if i < max_length]):
simil_list.append(my_list_elem2)
if len(simil_list) > 1:
new_list = simil_list[0][:max_length]
for remain_list in simil_list:
new_list.append(remain_list[max_length])
merged_list.append(new_list)
else:
merged_list.append(simil_list[0])
return merged_list
new_list_of_lists = []
for list_elem in sorted(nested_list_of_dir,
key=lambda sublist: len(sublist), reverse=True):
if not _test_is_included(new_list_of_lists, list_elem):
new_list_of_lists.append(list_elem)
# Removing duplicate
new_list_of_lists = _merge_duplicates(new_list_of_lists)
unique_data = [list(x) for x in set(tuple(x)for x in new_list_of_lists)]
return unique_data
def create_nested_list_of_path(directory):
"""
Function that get the path given in arg
to create a list of path as follow
take the last element of the path and walks through to get every sub
dir as follow:
/home/garciaj/AnDOChecker/checker/tests/ds001/Data/Landing/
to
[['Landing', 'sub-enya', 'y180116-land-001', 'Sources']]
"""
list_of_dir = []
# take the last folder pass in arg: tests/ds007/data/Landing -> Landing
path = pathlib.PurePath(directory)
sub = directory.split(path.name)[0]
# take everything before last tests/ds007/data/Landing -> tests/ds007/data
for (root, dirs, _) in os.walk(directory):
for d in dirs:
list_of_dir.append(os.path.join(root, d).replace(sub, ''))
nested_list_of_dir = []
for each in list_of_dir:
nested_list_of_dir.append(each.split(os.sep))
nested_list_of_dir_parsed = parse_all_path(nested_list_of_dir)
return nested_list_of_dir_parsed
def is_AnDO(directory, verbose, webcall):
"""
Check if file path adhere to AnDO.
Main method of the validator. uses other class methods for checking
different aspects of the directory path.
Args:
directory ([str]): [names of the directory to check]
Returns:
[bool]: [does the directory adhere to the ando specification]
"""
if webcall == False :
validate = []
names = create_nested_list_of_path(directory)
for item in names:
validate.append(check_Path(item, verbose))
return any(validate)
else :
found_err = check_Path(directory, True)
if found_err == False :
return 0,None;
else : return found_err
def check_Path(names, verbose):
"""
Check if file path adhere to AnDO.
Main method of the validator. uses other class methods for checking
different aspects of the directory path.
Args:
names ([list]): [names to check]
Raises:
ExperimentError: raised if it does not respect the experiment rules
SessionError: raised if it does not respect the session rules
SubjectError: raised if it does not respect the subject rules
RawDataError: raised if it does not respect the rawdata rules
DerivativeDataError: raised if it does not respect the derivatives rules
MetaDataError: raised if it does not respect the metadata rules
Returns:
[bool]: true if error is found else false
[out]: feedback for the web page
"""
bool_error = 0
out = list()
# only error that exit without checking other folder
if not is_experiment(names[0]):
try:
raise ExperimentError(names)
except ExperimentError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
return bool_error, out
if not is_session(names):
try:
raise SessionError(names)
except SessionError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if not is_subject(names):
try:
raise SubjectError(names)
except SubjectError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if len(names) == 6:
if not is_rawdata(names):
try:
raise RawDataError(names)
except RawDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if not is_derivatives(names):
try:
raise DerivativeDataError(names)
except DerivativeDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if not is_metadata(names):
try:
raise MetaDataError(names)
except MetaDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
else:
if not is_metadata(names):
try:
raise MetaDataError(names)
except MetaDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if not is_rawdata(names):
try:
raise RawDataError(names)
except RawDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if not is_derivatives(names):
try:
raise DerivativeDataError(names)
except DerivativeDataError as e:
if verbose is True:
print(e.strerror)
out.append(e.strout)
bool_error = 1
if len(out) >= 1:
return bool_error, out
else:
return bool_error
def is_experiment(names):
"""[Check names follows experiment rules]
Args:
names ([str]): [names founds in the path]
Returns:
[type]: [True or false ]
"""
regexps = get_regular_expressions(dir_rules
+ 'experiment_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
return any(flatten(conditions))
def is_rawdata(names):
"""[Check names follows rawdata rules]
Args:
names ([str]): [names founds in the path]
Returns:
[bool]: [true or false ]
"""
regexps = get_regular_expressions(dir_rules
+ 'rawdata_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
# print(flatten(conditions))
return any(flatten(conditions))
def is_metadata(names):
"""[Check names follows metadata rules]
Args:
names ([str]): [names founds in the path]
Returns:
[bool]: [true or false ]
"""
regexps = get_regular_expressions(dir_rules
+ 'metadata_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
# print(flatten(conditions))
return any(flatten(conditions))
def is_derivatives(names):
"""[Check names follows derivatives rules]
Args:
names ([str]): [names founds in the path]
Returns:
[bool]: [true or false ]
"""
regexps = get_regular_expressions(dir_rules
+ 'derivatives_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
# print(flatten(conditions))
return any(flatten(conditions))
def is_session(names):
"""[Check names follows session rules]
Args:
names ([str]): [names founds in the path]
Returns:
[bool]: [true or false ]
"""
regexps = get_regular_expressions(dir_rules + 'session_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
# print(flatten(conditions))
return any(flatten(conditions))
def is_subject(names):
"""[Check names follows subject rules]
Args:
names ([str]): [names founds in the path]
Returns:
[bool]: [true or false ]
"""
regexps = get_regular_expressions(dir_rules + 'subject_rules.json')
conditions = []
if type(names) == str:
conditions.append([re.compile(x).search(names) is not None
for x in regexps])
elif type(names) == list:
for word in names:
conditions.append([re.compile(x).search(word) is not None
for x in regexps])
# print(flatten(conditions))
return any(flatten(conditions))
def get_regular_expressions(fileName):
'''
https://github.com/bids-standard/bids-validator/tree/master/bids-validator/
using function to read regex in rule files
'''
regexps = []
with open(fileName, 'r') as f:
rules = json.load(f)
for key in list(rules.keys()):
rule = rules[key]
regexp = rule['regexp']
if 'tokens' in rule:
tokens = rule['tokens']
for token in list(tokens):
regexp = regexp.replace(token, '|'.join(tokens[token]))
regexps.append(regexp)
return regexps
def flatten(seq):
"""
Format list the proper way
example:
[[x],[y],[z]]--->[x,y,z]
:param seq: list to format
"""
list_flaten = []
for elt in seq:
t = type(elt)
if t is tuple or t is list:
for elt2 in flatten(elt):
list_flaten.append(elt2)
else:
list_flaten.append(elt)
return list_flaten
|
AnDOChecker
|
/AnDOChecker-0.3.2-py3-none-any.whl/ando/engine.py
|
engine.py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""[Exception raised when the name does not follow the AnDO specification
self.strerror : corresponding to the output in CLI mod
self.strout : corresponding to the output of web mod by returning
html
]
"""
# flake8: noqa: E501
class ExperimentError(Exception):
"""
Exception raised when the name does not follow the AnDO specification
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror = 'Level 1 error [experiment folder] at : ' + names[0] + '\n' \
+ ' It should follow the exp-NAME format, where:\n' \
+ ' - NAME is a string designating the name of your experiment\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at experiment folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[Experiment folder error] at : "+names[0]+" </strong></h4><br><b><i>" \
+ '</b></i>It should follow the exp-NAME format, where: '\
+ ' <ul><li> NAME is a string designating the name of your experiment</li><ul></div></div>'
class SubjectError(Exception):
"""Exception raised when the name does not follow the AnDO specification of subject level
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror = 'Level 2 error [subject folder] at : ' + names[1] + '\n' \
+ ' It should follow the sub-ID format, where:\n' \
+ ' - ID is a string designating the IDentifier of the animal\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at subject folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[Subject folder error] at : "+names[0]+" </strong></h4><br><b><i>" \
+ '</b></i>It should follow the sub-ID format, where: '\
+ ' <ul><li> ID is a string designating the IDentifier of the animal</li><ul></div></div>'
class SessionError(Exception):
"""Exception raised when the name does not follow the AnDO specification of session level
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror = 'Level 3 error [session folder] at : ' + names[2] + '\n' \
+ ' It should follow the ses-YYYYMMDD_XXX_BBBB format, where:\n' \
+ ' - ‘ses-’ is an imposed prefix\n' \
+ ' - ‘YYYYMMDD’ is the date of the session (8 digits, for instance 20180430 for April 30, 2018)\n' \
+ ' - XXX is the number of the session acquired on that date (3 digits, for instance 001 for the first session)\n' \
+ ' - BBBB is a string freely usable by the research group / user\n' \
+ ' (is a string freely usable by the research group / user (for instance to add extra info on \n' \
+ ' the version of the experimental protocol, on the type of preparation, on the user-friendly name of the animal etc.);\n' \
+ ' this string cannot contain the underscore character.\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at Session folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[session folder error] at : "+names[2]+" </strong></h4><br><b><i>" \
+ '</b></i>It should follow the ses-YYYYMMDD_XXX_BBBB format format, where: '\
+ ' <ul><li>"ses-" is an imposed prefix</li>'\
+ '<li>"YYYYMMDD" is the date of the session (8 digits, for instance 20180430 for April 30, 2018)</li>'\
+ '<li>"BBBB" is a string freely usable by the research group / user , this string cannot contain the underscore character.</li>'\
+ '<ul></div></div>'
class RawDataError(Exception):
"""Exception raised when the name does not follow the AnDO specification of rawdata level
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror = 'Level 4 error [rawdata folder missing]\n' \
+ ' A folder called rawdata should be present in the session folder ' + names[2] + '\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at rawdata folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[rawdata folder error] at : "+names[2]+" </strong></h4><br><b><i>" \
+ '</b></i> A folder called <i><b>"rawdata" </i></b>'\
+ ' should be present in the session folder.</div></div>'
class MetaDataError(Exception):
"""Exception raised when the name does not follow the AnDO specification of rawdata level
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror = 'Level 4 error [metadata folder missing]\n' \
+ ' A folder called metadata should be present in the session folder ' + names[2] + '\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at metadata folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[metadata folder error] at : "+names[2]+" </strong></h4><br><b><i>" \
+ '</b></i> A folder called <i><b>"metadata" </i></b>'\
+ ' should be present in the session folder.</div></div>'
class DerivativeDataError(Exception):
"""Exception raised when the name does not follow the AnDO specification of derivatives level
Args:
Exception ([Exception]): [raised in engine.py]
"""
def __init__(self, arg):
names = arg
self.strerror ='Level 4 error [derivatives folder missing]\n' \
+ ' A folder called derivatives should be present in the session folder ' + names[2] + '\n'
self.strout = '<div class="card"><div class="card-header bg-danger text-white">' \
+ '1 error found at derivatives folder level. </div><div class="card-body"> ' \
+ '<h4 class="em-header clearfix"><strong class="em-header pull-left">Error 4 type ' \
+ "[derivatives folder error] at : "+names[2]+" </strong></h4><br><b><i>" \
+ '</b></i> A folder called <i><b>"derivatives" </i></b>'\
+ ' should be present in the session folder.</div></div>'
|
AnDOChecker
|
/AnDOChecker-0.3.2-py3-none-any.whl/ando/error.py
|
error.py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import os
import argparse
import os.path
import sys
from ando.engine import is_AnDO
dir_rules = os.path.join(os.path.dirname(__file__)) + 'rules/'
def main():
"""
Main file of the validator. uses other class methods for checking
different aspects of the directory path.
usage: checker.py [-h] [-v] path
positional arguments:
path Path to your folder
optional arguments:
-h, --help show this help message and exit
-v, --verbose increase output verbosity
"""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true',
help='increase output verbosity')
parser.add_argument('path', help='Path to your folder ')
args = parser.parse_args()
if args.verbose:
try:
directory = args.path
except IndexError:
directory = '.'
if not os.path.isdir(args.path):
print('Directory does not exist:', args.path)
exit(1)
error_found = is_AnDO(directory, True , False)
if error_found == 1:
print("\n" +
directory +
": Is Not validated by AnDOChecker")
else:
print("\n" +
directory +
": Is validated by AnDOChecker")
else:
try:
directory = args.path
except IndexError:
directory = '.'
if not os.path.isdir(args.path):
print('Directory does not exist:', args.path)
exit(1)
error_found = is_AnDO(directory, False, False)
if error_found:
print("\n" +
directory +
": Is Not validated by AnDOChecker")
else:
print("\n" +
directory +
": Is validated by AnDOChecker")
if __name__ == '__main__':
main()
|
AnDOChecker
|
/AnDOChecker-0.3.2-py3-none-any.whl/ando/checker.py
|
checker.py
|
import pandas as pd
import argparse
import os
import sys
# columns required
COLUMNS = ["experiments_name", "subjects_names",
"years", "months", "days",
"sessions_numbers", "comments"
]
def create_Struct(csv_file, pathToDir):
"""
Create structure with csvfile given in argument
This file must follows format where :
first row ==> experiments_name,subjects_names,years,mouths,days,sessions_numbers,comments
Args:
csv_file ([csv file ]): [Csv file that contains a list of directories to create ]
pathToDir ([Path to directory]): [Path to directory where the directories will be created]
"""
dirnames = []
df = pd.read_csv(csv_file)
# Formating months and days , cannot format years
df["months"] = df.months.map("{:02}".format)
df["days"] = df.days.map("{:02}".format)
header = df.columns.values.tolist()
# Check is the header contains the right names
if header != COLUMNS:
print("Failed : Csv does not have the expected columns please " \
+ " check the documentation at 'https://github.com/INT-NIT/AnDOChecker/tools/' ")
exit(1)
if df.isnull().values.any():
number_of_null_values = df.isnull().sum().sum()
print("There are " +str(number_of_null_values)+" null values in the cvs file")
exit(1)
list_of_information = list()
for index, row in df.iterrows():
my_list = [
row["experiments_name"], row["subjects_names"],
row["years"], row["months"], row["days"],
row["sessions_numbers"], row["comments"]
]
list_of_information.append(my_list)
for _, information in enumerate(list_of_information):
# Check if digits or not and addapt the string
if information[5] < 10:
num_sessions = "_00"+str(information[5])
else:
num_sessions = "_0"+str(information[5])
# Check if years format is correct
if len(str(information[2])) < 4 :
print("Error date format not valid at row "+ str(_))
exit(1)
dirnames.append("exp-"+str(information[0])+"/"+"sub-"+str(information[1])+"/"+'ses-'+str(
information[2])+str(information[3])+str(information[4])+num_sessions+"_"+str(information[6])+"/derivatives")
dirnames.append("exp-"+str(information[0])+"/"+"sub-"+str(information[1])+"/"+'ses-'+str(
information[2])+str(information[3])+str(information[4])+num_sessions+"_"+str(information[6])+"/metadata")
dirnames.append("exp-"+str(information[0])+"/"+"sub-"+str(information[1])+"/"+'ses-'+str(
information[2])+str(information[3])+str(information[4])+num_sessions+"_"+str(information[6])+"/rawdata")
for directory in dirnames:
try:
# Create the directories is they do not exist
os.makedirs(pathToDir+str(directory))
except OSError:
# Error handling when directory already exists
print("Creation of the directory %s failed, already exist" % directory)
else:
print("Successfully created the directory %s " % directory)
def main():
"""
usage: AnDO_Creator.py [-h] pathToCsv pathToDir
positional arguments:
pathToCsv Path to your folder
pathToDir Path to your csv file
optional arguments:
-h, --help show this help message and exit
"""
parser = argparse.ArgumentParser()
parser.add_argument('pathToCsv', help='Path to your folder')
parser.add_argument('pathToDir', help='Path to your csv file')
# Create two argument groups
args = parser.parse_args()
# Check if directory exists
if not os.path.isdir(args.pathToDir):
print('Directory does not exist:', args.pathToDir)
exit(1)
create_Struct(args.pathToCsv, args.pathToDir)
if __name__ == '__main__':
main()
|
AnDOcreator
|
/AnDOcreator-0.1-py3-none-any.whl/script/AnDOcreator.py
|
AnDOcreator.py
|
import pandas as pd
import argparse
import os
import sys
def show_struct(directory):
"""
Show the structure of the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d "+directory
os.system(cmd)
def show_experiments(directory):
"""
Show the experiments in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 1 "+directory
os.system(cmd)
def show_subjects(directory):
"""
Show the subjects in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 2 "+directory
os.system(cmd)
def show_sessions(directory):
"""
Show the sessions in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 3 "+directory
os.system(cmd)
def main():
"""
usage: AnDO_Viewer.py [-h] [-S] [-Se] [-Su] [-Ss] pathToDir
positional arguments:
pathToDir Path to the folder to show
optional arguments:
-h, --help show this help message and exit
-S, --show show dir structure
-Se, --show_experiments
show experiments folder only
-Su, --show_subjects show subjects folder only
-Ss, --show_sessions show sessions folder only
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-S', '--show', help=' show dir structure ', action='store_true', default=True)
parser.add_argument('-Se', '--show_experiments',
help=' show experiments folder only', action='store_true')
parser.add_argument('-Su', '--show_subjects',
help=' show subjects folder only', action='store_true')
parser.add_argument('-Ss', '--show_sessions',
help=' show sessions folder only', action='store_true')
parser.add_argument('pathToDir', help='Path to the folder to show')
args = parser.parse_args()
## Check if directory exists
if not os.path.isdir(args.pathToDir):
print('Directory does not exist:', args.pathToDir)
exit(1)
if args.show:
show_struct(args.pathToDir)
if args.show_experiments:
show_experiments(args.pathToDir)
if args.show_subjects:
show_subjects(args.pathToDir)
if args.show_sessions:
show_sessions(args.pathToDir)
if __name__ == '__main__':
main()
|
AnDOviewer
|
/AnDOviewer-0.1-py3-none-any.whl/script/AnDOviewer.py
|
AnDOviewer.py
|
# AnPaFinder
Version 1.2.2 released 04/2022
A simple package built to find anagrams and palindromes from a webpage.
Includes functionality to store and process strings and substrings,
and easily parse text from a webpage.
## Installation
`AnPaFinder` can be installed via `pip`:
```
$ python3 -m pip install AnPaFinder
```
-Find the anagrams and palindromes from a webpage in one of three possible ways:
- Option 1. In python environment:
```
import anpa_tools.anpatools as anp
anp.find_anpas("<webpage url>")
```
- Option 2. Run anpatools.py from command line:
```
$ python3 anpatools.py --url "<webpage url>"
```
- Option 3. Using the shell script in the **Home Page - anpaFinder GitHub repository** under `/bin`:
```
$ ./AP_finder.bash -w "<webpage url>"
```
-Play with classes and functions in `anpa_tools` package.
- More info to come
|
AnPaFinder
|
/AnPaFinder-1.2.2.tar.gz/AnPaFinder-1.2.2/README.md
|
README.md
|
import itertools
from collections import Counter, defaultdict
from html.parser import HTMLParser
import urllib.request
import string as strimp
import argparse
class StringObject:
"""Various functions for stored strings"""
def __init__(self, string):
self.string = string
self.sub_string = None
self.loaded_string = None
self.save_file = None
def __repr__(self):
"""Return a string representation of string object"""
return self.string
def substring(self, substring):
"""set value of self.sub_string"""
self.sub_string = substring
return self.sub_string
def append(self):
"""
Appends a sub_string to string, and returns the appended string.
>>> test = StringObject()
>>> test.string = 'This is my string'
>>> print(test.string)
This is my string
>>> test.sub_string = ', but only for now.'
>>> print(test.append(test.sub_string))
This is my string, but only for now.
"""
appended = str(self.string) + str(self.sub_string)
return appended
def remove(self):
"""
Removes a sub_string from string, and returns the truncated string.
>>> test = StringObject()
>>> test.string = 'This is my string, but only for now.'
>>> print(test.string)
This is my string, but only for now.
>>> test.sub_string = ', but only for now.'
>>> print(test.remove(test.sub_string))
This is my string
>>> test.sub_string = 'This is my string,'
>>> print(test.remove(test.sub_string))
but only for now.
"""
truncated = self.string.replace(str(self.sub_string), '')
return truncated
def mirror_string(self):
"""
Returns the mirrored string of string.
>>> test = StringObject()
>>> test.string = 'This is my string'
>>> print(test.string)
This is my string
>>> print(test.mirror_string())
gnirts ym si sihT
"""
mirror = str(self.string[::-1])
return mirror
def load_string(self, load_file):
"""Loads a string from load_file and returns that string."""
with open(load_file) as load_string:
self.loaded_string = load_string.read()
return self.loaded_string
def save_string(self, save_file):
"""Saves the string to save_file."""
with open(save_file, 'w') as saved:
saved.write(self.string)
class Anagram(StringObject):
"""Inherits from StringObject class and can get anagrams of string."""
def __init__(self):
super().__init__(self)
self.string = None
self.words = []
def find_anagrams(self, words):
"""Takes a list of words, and hashes the letter frequency of each word
as keys in a dictionary and creates a value list to which words having
the same letter frequency are appended to the list. Returns the values
if the length of the list is greater than 1.
"""
anagrams_dict = defaultdict(list)
self.words = words
for word in words:
if len(word) > 2:
anagrams_dict[frozenset(dict(Counter(word)).items())].append(word)
return [anagrams for key, anagrams in anagrams_dict.items() if len(anagrams) > 1]
def create_all_anagrams(self):
"""Takes string and returns a list of string's anagrams (permutations)."""
anagrams = [''.join(perm) for perm in itertools.permutations(self.string)]
return anagrams
class Palyndrome(StringObject):
"""Inherits from StringObject and can identify palindromes."""
def __init__(self, string):
super().__init__(self)
self.string = string
def find_palindromes(self):
"""Checks if mirrored string is equal to string.
If true, returns the mirrored string."""
mirror = StringObject.mirror_string(self)
if mirror == str(self.string) and len(mirror) > 2:
palindrome = mirror
return palindrome
class ParserHTML(HTMLParser):
"""Inherits from HTMLParser in python standard library.
Parses text from inside <p> tags, adds text to a list, returns the list"""
data_list = []
def __init__(self):
HTMLParser.__init__(self)
self.is_data = False
def handle_starttag(self, tag, attrs): #why the attrs parameter?
if tag == 'p':
self.is_data = True
def handle_endtag(self, tag):
if tag == 'p':
self.is_data = False
def handle_data(self, data):
if self.is_data:
self.data_list.append(data)
return self.data_list
def access_webpage(url):
"""
Opens and reads a webpage from URL and returns raw HTML from webpage.
"""
webpage = urllib.request.urlopen(url)
content = webpage.read().decode()
return content
def parse_page(content):
"""
Takes raw html as input and parses text from <p> tags.
Returns a list of parsed text
"""
pars = ParserHTML()
pars.feed(str(content))
parsed_data = pars.data_list
return parsed_data
def clean_data(data_list):
"""
Takes a list of strings and iterates through the list to make all letters
lowercase and remove punctuation. Appends the cleaned strings to a new
list. Returns the new list.
"""
string_list = []
for item in data_list:
item = item.lower()
item = item.translate(str.maketrans('', '', strimp.punctuation))
string_list.append(item)
return string_list
def split_sentence(string_list):
"""
Takes a list of sentences or multi-word strings and iterates through the
list to split each sentence into a list of words. Iterates through the
words, and appends each word once to a new list. Returns new list.
"""
word_list = []
for items in string_list:
words = items.split(' ')
for word in words:
word = word.replace('\n', '')
word = word.replace('\\', '')
if word != '' and word not in word_list:
word_list.append(word)
return word_list
def find_anpas(URLin):
"""Takes a web address (string) as input and returns a list of anagram
sets and palindromes from the web page."""
web_content = access_webpage(URLin)
parsed_list = parse_page(web_content)
data_strings = clean_data(parsed_list)
data_words = split_sentence(data_strings)
print("\n\nAnagrams:")
anagrams = Anagram()
anagram_groups = anagrams.find_anagrams(data_words)
print(anagram_groups)
print("\n\nPalindromes:")
palindromes = [Palyndrome(n) for n in data_words]
for palyndrome in palindromes:
pal = palyndrome.find_palindromes()
if pal is not None:
print(pal)
if __name__ == '__main__':
#Add command line argument
parser = argparse.ArgumentParser(description='Process web url.')
parser.add_argument('--url', type=str, required=True,\
help='Web address to the page to analyze.')
#Assign command line arguments
args = parser.parse_args()
URL = args.url
#Function calls
web_content = access_webpage(URL)
parsed_list = parse_page(web_content)
data_strings = clean_data(parsed_list)
data_words = split_sentence(data_strings)
print("\n\nAnagrams:")
anagrams = Anagram()
anagram_groups = anagrams.find_anagrams(data_words)
print(anagram_groups)
print("\n\nPalindromes:")
palindromes = [Palyndrome(n) for n in data_words]
for palyndrome in palindromes:
pal = palyndrome.find_palindromes()
if pal is not None:
print(pal)
|
AnPaFinder
|
/AnPaFinder-1.2.2.tar.gz/AnPaFinder-1.2.2/src/anpa_tools/anpatools.py
|
anpatools.py
|
from typing import ClassVar, Literal, Pattern
import re
class Color:
"""Color values.
This class offers methods for parsing and interpreting AnTeML #COLOR value
strings.
#COLOR value strings can be one of:
- The literals "BLACK", "RED", "GREEN", "YELLOW", "BLUE", "MAGENTA",
"CYAN", or "WHITE".
- Literal "D" followed by a decimal number between 0 and 255, e.g.
"D0", "D28", "D255".
- Literal "X" followed by hex triplet for RGB values as either RGB or
RRGGBB, e.g. "X333" (=0x333333) or "XA03F22" (=0xA03F22).
"""
names: ClassVar[dict[str, tuple[str, str]]] = {
# Name fg-dec bg-dec
"black": ("30", "40"),
"red": ("31", "41"),
"green": ("32", "42"),
"yellow": ("33", "43"),
"blue": ("34", "44"),
"magenta": ("35", "45"),
"cyan": ("36", "46"),
"white": ("37", "47"),
"default": ("39", "49"),
}
decimal_pattern: ClassVar[Pattern[str]] = re.compile(
r"\AD(?:(?:0?\d{1,2})|(?:[0-2][0-5][0-5]))\Z",
re.IGNORECASE
)
hex_pattern: ClassVar[Pattern[str]] = re.compile(
r"\AX(?:[0-9A-F]{3}|[0-9A-F]{6})\Z",
re.IGNORECASE
)
cache: ClassVar[
dict[
str,
tuple[
Literal["name", "decimal", "hex"],
str,
str
] | None
]
] = {}
@classmethod
def clear_cache(cls):
"""Clear the color lookup cache."""
cls.cache = {}
@classmethod
def parse(cls, value: str) -> tuple[Literal["name", "decimal", "hex"], str, str] | None:
"""Parse and cache a color value string.
Checks if the value has already been cached first, and if so returns
the cached values. Otherwise parses the string, adds the value to the
cache and then returns it.
Returns: A tuple containing the triplet "type" ("name", "fg_value",
"bg_value"), or `None` if *value* is not a valid color value string.
"""
value = value.lower()
if value in cls.cache:
return cls.cache[value]
if value in cls.names:
cls.cache[value] = ("name", cls.names[value][0], cls.names[value][1])
elif match := cls.decimal_pattern.fullmatch(value):
v = match[0][1:].lstrip("0")
cls.cache[value] = ("decimal", v, v)
elif match := cls.hex_pattern.fullmatch(value):
v = match[0][1:].lower()
if len(v) == 3:
v = "".join([f"{c}{c}" for c in v])
cls.cache[value] = ("hex", v, v)
else:
cls.cache[value] = None
return cls.cache[value]
@classmethod
def type(cls, value: str) -> Literal["name", "decimal", "hex"] | None:
"""Determine the color string type."""
if parsed := cls.parse(value):
return parsed[0]
return None
@classmethod
def fg_value(cls, value: str) -> str | None:
"""Determine the foreground value of a color string."""
if parsed := cls.parse(value):
return parsed[1]
return None
@classmethod
def bg_value(cls, value: str) -> str | None:
"""Determine the foreground value of a color string."""
if parsed := cls.parse(value):
return parsed[2]
return None
class FontWeight:
"""FontWeight values.
This class offers methods for parsing and interpreting AnTeML #FONTWEIGHT
value strings.
#FONTWEIGHT value strings can be one of:
- The literals "BOLD", "NORMAL", or "LIGHT".
"""
weights: ClassVar[dict[str, str]] = {
# Name code
"bold": "1",
"normal": "22",
"light": "2",
"default": "22",
}
@classmethod
def parse(cls, value: str) -> str | None:
"""Parse a font weight and return corresponding ANSI value."""
value = value.lower()
if value in cls.weights:
return cls.weights[value]
return None
class Boolean:
"""Boolean values.
Evaluates a truthy/falsy string to bool `True` or `False`.
Truthy values: "1", "yes", "on", "true".
Falsy values: "0", "no", "off", "false".
"""
truthy_values: ClassVar[set[str]] = {
"1",
"yes",
"on",
"true",
}
falsy_values: ClassVar[set[str]] = {
"0",
"no",
"off",
"false",
}
@classmethod
def parse(cls, value: str) -> bool | None:
"""Convert truthy/falsy string to boolean, or None if indeterminate."""
value = value.lower()
if value in cls.truthy_values:
return True
if value in cls.falsy_values:
return False
return None
class ScreenMode:
"""ScreenMode values.
This class offers methods for parsing and interpreting AnTeML #SCREENMODE
value strings.
#SCREENMODE value strings can have one of the following values:
* "40x24m": 40 x 25 monochrome text
* "40x25c": 40 x 25 color text
* "80x25m": 80 x 25 monochrome text
* "80x25c": 80 x 25 color text
* "320x200c4": 320 x 200 4-color graphics
* "320x200m": 320 x 200 monochrome graphics
* "640X200m": 640 x 200 monochrome graphics
* "320x200c": 320 x 200 256-color graphics
* "640x200c": 640 x 200 16-color graphics
* "640x350m": 640 x 350 monochrome 2-color graphics
* "640x350c": 640 x 350 16-color graphics
* "640x480m": 640 x 480 monochrome 2-color graphics
* "640x480c": 640 x 480 16-color graphics
"""
modes: ClassVar[dict[str, tuple[str, str]]] = {
# Name Code Description
"40x24m": ("0", "40 x 25 monochrome text"),
"40x25c": ("1", "40 x 25 color text"),
"80x25m": ("2", "80 x 25 monochrome text"),
"80x25c": ("3", "80 x 25 color text"),
"320x200c4": ("4", "320 x 200 4-color graphics"),
"320x200m": ("5", "320 x 200 monochrome graphics"),
"640X200m": ("6", "640 x 200 monochrome graphics"),
"320x200c": ("13", "320 x 200 color graphics"),
"640x200c": ("14", "640 x 200 16-color graphics"),
"640x350m": ("15", "640 x 350 monochrome 2-color graphics"),
"640x350c": ("16", "640 x 350 16-color graphics"),
"640x480m": ("17", "640 x 480 monochrome 2-color graphics"),
"640x480c": ("18", "640 x 480 16-color graphics"),
"320x200c": ("19", "320 x 200 256-color graphics"),
}
@classmethod
def parse(cls, value: str) -> str | None:
"""Parse a screen mode string and return corresponding ANSI value."""
value = value.lower()
if value in cls.modes:
return cls.modes[value][0]
return None
# @classmethod
# def make_mode_list(cls) -> str:
# buffer: list[str] = []
# for mode_name, mode_values in cls.modes.items():
# buffer.append(f'* "{mode_name}": {mode_values[1]}')
# return "\n".join(buffer)
|
AnTeML
|
/AnTeML-0.1.4-py3-none-any.whl/anteml/attributes.py
|
attributes.py
|
import logging
from abc import ABC, abstractmethod
from typing import ClassVar, Final, Type
from textwrap import wrap
from .errors import AntemlAttributeError
from .attributes import Color, FontWeight, ScreenMode, Boolean
__all__ = ["Element", "ElementAlias", "AntemlElement", "FgElement", "elements"]
logger = logging.getLogger(__name__)
class Element(ABC):
"""Abstract base class for AnTeML Elements.
All AnTeML Element handler are derived from this class. Note that unlike in
a common DOM model, individual pairs of SGML element tags are not
represented by an individual instance of the related Element class, rather
the related Element class represents a stack keeping track of the state of
all instances of use of this tag in the SGML document.
On the one hand, this aligns more easily with how the underlying parser
(`HTMLParser` from the `html.parsing` package) works, on the other hand it
facilitates stream-based parsing and presentation of AnTeML content, where
a tag might be opened long before it is known when (or whether) it will be
closed to revert to the previous state.
By default, the behaviour of an Element handler is as follows:
* The handler's class attributes `tag` and `default` provide static
information about the the Element (associated tag, default-value ANSI
code). This should normally be treated as `typing.Final` in derived
classes.
* The handler is instantiated by the parser either on parser-initialisation
or when the associated tag is encountered for the first time, and a
reference is held by the parser for the parsers' lifetime.
* When the associated opening tag is encountered, the element handler's
`start()` method is called by the parser.
For most element types, the `start()` method does the following:
- On the first call, this will push the element's `default` value onto
the element handler's `stack` and set the element handler's `state`
to the value associated with the state encoded in the tag (and its
tuple of attrs, if any).
- On subsequent calls, the current `state` is pushed onto the `stack`
before setting the active `state` to the value associated with the
state encoded in the tag (and its tuple of attrs, if any).
- Finally, the ANSI code needed to present the current state is
returned.
* When the associated closing tag is encountered, the element handler's
`end()` method is called by the parser.
For most element types, the `end()` method does the following:
- Pop the last state from the element handler's `stack` and set the
current `state` to its value.
- Return the ANSI code needed to present the current state.
"""
# Class attributes
tag: ClassVar[str] = ""
default: ClassVar[str] = ""
# Instance attributes
stack: list[str]
state: str | None
def __init__(self):
"""Initialise a handler for instances of the element type."""
self.stack = []
self.state = None
def push_state(self, state: str) -> str:
"""Push the current `state` onto the stack and set current `state` to *state*."""
if self.state is not None:
self.stack.append(self.state)
else:
self.stack.append(self.default)
self.state = state
return state
def pop_state(self) -> str:
"""Pop *state* from the `stack`, set current `state` to *state* and return *state*."""
try:
state = self.stack.pop()
except IndexError:
state = self.default
self.state = state
return state
@abstractmethod
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an element: called when the associated tag is opened."""
...
@abstractmethod
def end(self) -> str:
"""Destruct an element: called when the associated tag is closed."""
...
class AntemlElement(Element):
"""Handler for the optional ANTEML element.
AnTeML may optionally be enclosed by `<ANTEML>...</ANTEML>` tags. In current
implementations these have no effect whatever, and purely serve as
facilitators for the inclusion or identification of AnTeML content inside
other document content on the user's side.
"""
# Class attributes
tag: Final[str] = "anteml"
default: Final[str] = ""
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an AntemlElement instance when the associated tag is opened.
This does nothing at all and always returns an empty string, because
`<ANTEML>...</ANTEML>` have no associated presentation or parsing behaviour.
"""
return ""
def end(self) -> str:
"""Destruct an AntemlElement instance when the associated tag is closed.
This does nothing at all and always returns an empty string, because
`<ANTEML>...</ANTEML>` have no associated presentation or parsing behaviour.
"""
return ""
class BrElement(Element):
"""Handler for the BR (break row) element.
The break row element inserts a new line in the output, similar to HTML
`<br>`. The BR element accepts no attribute, and the closing tag is optional
(typical usage will be as a sole opening tag `<br>` or a XML-style self-closed
tag `<br />`).
"""
# Class attributes
tag: Final[str] = "br"
default: Final[str] = "\n"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
if len(attrs) > 0:
raise AntemlAttributeError(f"<BR> tag does not accept attributes (0 expected, {len(attrs)} given)")
return self.default
def end(self) -> str:
return ""
class FwElement(Element):
"""Handler for the Fw (font weight) element.
The font weight elemenent sets the terminal text to bold/bright, normal, or
light/dim/faint. The exact realisation depends on the implementation of the
terminal.
The FW element expects exactly 1 attribute, a FontWeight, which may be one
of the literals "BOLD", "NORMAL", or "LIGHT".
Example:
`Hello <FW BOLD>World</FW>! What is your <FW BOLD>name</FW>?`
This will produce the text "Hello **World**! What is your **name**?" with
the words "World" and "name" in boldface or brighter highlight (depending
on the terminal used).
"""
# Class attributes
tag: Final[str] = "fw"
default: Final[str] = "\x1b[22m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise a FwElement instance when the associated tag is opened."""
if len(attrs) != 1:
raise AntemlAttributeError(f"<FW> tag has the incorrect number of attributes (exactly 1 expected, {len(attrs)} given)")
if fw := FontWeight.parse(attrs[0][0]):
code = self.default + f"\x1b[{fw}m"
return self.push_state(code)
else:
raise AntemlAttributeError(f"Invalid #FONTWEIGHT attribute '{attrs[0][0]}' on <FW> tag")
def end(self) -> str:
"""Destruct a FwElement instance when the associated tag is closed. """
return self.pop_state()
class BgElement(Element):
"""Handler for the BG (background) element.
The background elemenent sets the background color of the terminal text. The
BG element acceepts one bare attribute value of type `#COLOR`.
Example:
`Hello <BG YELLOW>World</BG>! What is your <BG GREEN>name</BG>?`
This will set the background color of "World" to yellow (ANSI background
color `43`), then reset it to the default (ANSI background color
`49`), then set the background color of "name" to green (ANSI background
color `42`) before resetting it back to the default (ANSI background
color `49`) preceding the question mark.
Example:
`<BG X333>Do you like reading <BG X4169E1>Royal Blue</BG> text?</BG>`
This will initially set the background color to dark grey (hex
color #333333). The background will then be set to royal blue (hex color
#4169E1) before "Royal", and revert to dark grey (hex color #333333)
following "Blue". At the end of the string, the color will be reset to
the default (ANSI background color `49`).
"""
# Class attributes
tag: Final[str] = "bg"
default: Final[str] = "\x1b[49m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise a BgElement instance when the associated tag is opened."""
if len(attrs) != 1:
raise AntemlAttributeError(f"<BG> tag has the incorrect number of attributes (exactly 1 expected, {len(attrs)} given)")
if color_attr := Color.parse(attrs[0][0]):
color_t, color_bg = color_attr[0], color_attr[2]
code: str = ""
if color_t == "name":
code = f"\x1b[{color_bg}m"
elif color_t == "decimal":
code = f"\x1b[48;5;{color_bg}m"
elif color_t == "hex":
r, g, b = [int(c, 16) for c in wrap(color_bg, 2)]
code = f"\x1b[48;2;{r};{g};{b}m"
return self.push_state(code)
else:
raise AntemlAttributeError(f"Invalid #COLOR attribute '{attrs[0][0]}' on <BG> tag")
def end(self) -> str:
"""Destruct a BgElement instance when the associated tag is closed. """
return self.pop_state()
class FgElement(Element):
"""Handler for the FG (foreground) element.
The foreground elemenent sets the foreground color of the terminal text. The
FG element acceepts one bare attribute value of type `#COLOR`.
Example:
`Hello <FG YELLOW>World</FG>! What is your <FG GREEN>name</FG>?`
This will set the color to yellow (ANSI foreground color `33`) before
the text "World", then reset it to the default (ANSI foreground color
`39`) before the exclamation mark, before setting the foreground color
to green (ANSI foreground color `32`) before the text "name" and
resetting it back to the default (ANSI foreground color `39`) preceding
the question mark.
Example:
`<FG X333>Do you like reading <FG X4169E1>Royal Blue</FG> text?</FG>`
This will initially set the text foreground color to dark grey (hex
color #333333). The color will then be set to royal blue (hex color
#4169E1) before "Royal", and revert to dark grey (hex color #333333)
following "Blue". At the end of the string, the color will be reset to
the default (ANSI foreground color `39`).
"""
# Class attributes
tag: Final[str] = "fg"
default: Final[str] = "\x1b[39m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an FgElement instance when the associated tag is opened."""
if len(attrs) != 1:
raise AntemlAttributeError(f"<FG> tag has the incorrect number of attributes (exactly 1 expected, {len(attrs)} given)")
if color_attr := Color.parse(attrs[0][0]):
color_t, color_fg = color_attr[0:2]
code: str = ""
if color_t == "name":
code = f"\x1b[{color_fg}m"
elif color_t == "decimal":
code = f"\x1b[38;5;{color_fg}m"
elif color_t == "hex":
r, g, b = wrap(color_fg, 2)
code = f"\x1b[38;2;{r};{g};{b}m"
return self.push_state(code)
else:
raise AntemlAttributeError(f"Invalid #COLOR attribute '{attrs[0][0]}' on <FG> tag")
def end(self) -> str:
"""Destruct an FgElement instance when the associated tag is closed."""
return self.pop_state()
class IElement(Element):
"""Handler for the I (italic) element.
The italic elemenent sets the terminal text to italic. The I element accepts
no attributes.
Example:
`Hello <I>World</I>!`
This will produce the text "Hello *World*!" with
the words "World" set in italic.
"""
# Class attributes
tag: Final[str] = "i"
default: Final[str] = "\x1b[23m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an IElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<I> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[3m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a IElement instance when the associated tag is closed."""
return self.pop_state()
class UElement(Element):
"""Handler for the U (underline) element.
The underline elemenent sets the terminal text to underlined. The U element
accepts no attributes.
Example:
`Hello <U>World</U>!`
This will produce the text "Hello __World__!" with
the word "World" set underlined.
"""
# Class attributes
tag: Final[str] = "u"
default: Final[str] = "\x1b[24m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an UElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<U> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[4m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a UElement instance when the associated tag is closed."""
return self.pop_state()
class SElement(Element):
"""Handler for the S (strikethrough) element.
The strikethrough elemenent sets the terminal text to stikethrough. The S
element accepts no attributes.
Example:
`Hello <S>World</S>!`
This will produce the text "Hello <s>World</s>!" with
the word "World" struck through.
"""
# Class attributes
tag: Final[str] = "s"
default: Final[str] = "\x1b[29m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an SElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<S> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[9m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a SElement instance when the associated tag is closed."""
return self.pop_state()
class BlinkElement(Element):
"""Handler for the BLINK element.
The BLINK elemenent sets the terminal text to blinking mode. The BLINK
element accepts no attributes.
Example:
`Hello <BLINK>World</BLINK>!`
This will produce the text "Hello <blink>World</blink>!" with
the word "World" blinking.
"""
# Class attributes
tag: Final[str] = "blink"
default: Final[str] = "\x1b[25m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an BlinkElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<BLINK> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[5m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a BlinkElement instance when the associated tag is closed."""
return self.pop_state()
class InvertElement(Element):
"""Handler for the INVERT element.
The INVERT elemenent sets the terminal text to inverted/inverse display mode
(foreground and background color swapped). The INVERT element accepts no
attributes.
Example:
`Hello <INVERT>World</INVERT>!`
This will produce the text "Hello World!" with the word "World" using
the inverted display mode to the rest of the terminal text.
"""
# Class attributes
tag: Final[str] = "invert"
default: Final[str] = "\x1b[27m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise an InvertElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<INVERT> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[7m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a InvertElement instance when the associated tag is closed."""
return self.pop_state()
class HideElement(Element):
"""Handler for the HIDE element.
The HIDE elemenent sets the terminal text to hidden display mode, often
realised as barely visible text depending on the terminal. The HIDE element
accepts no attributes.
Example:
`Hello <HIDE>World</HIDE>!`
This will produce the text "Hello World!" with the word "World" using
the hidden display mode, and either not or barely visible but still
using up the space it normally would.
"""
# Class attributes
tag: Final[str] = "hide"
default: Final[str] = "\x1b[28m"
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise a HideElement instance when the associated tag is opened."""
if len(attrs) > 0:
raise AntemlAttributeError(f"<HIDE> tag does not accept attributes (0 expected, {len(attrs)} given)")
code = f"\x1b[8m"
return self.push_state(code)
def end(self) -> str:
"""Destruct a HideElement instance when the associated tag is closed."""
return self.pop_state()
class ScreenElement(Element):
"""Handler for the SCREEN element.
The SCREEN element sets the terminal emulation mode and/or text-wrapping
mode.
SCREEN accepts two optional attributes:
* MODE=#SCREENMODE - See `attributes.ScreenMode` for possible values.
* WRAP=#BOOL - Whether linewrapping should be used or not, boolean value
may be indicated by a truthy value like "1", "on", "yes", "true" or a
falsy value like "0", "off", "no", "false".
"""
# Class attributes
tag: Final[str] = "screen"
default: Final[str] = "\x1b7h"
def _determine_screen_mode(self, value: str | None) -> str:
if value is None:
raise AntemlAttributeError(f"Attribute 'MODE' specified without value on <SCREEN> tag")
v = ScreenMode.parse(value)
if v is None:
raise AntemlAttributeError(f"Invalide value {value!r} for 'MODE' attribute on <SCREEN> tag")
return v
def _determine_line_wrap(self, value: str | None) -> bool:
if value is None:
raise AntemlAttributeError(f"Attribute 'WRAP' specified without value on <SCREEN> tag")
v = Boolean.parse(value)
if v is None:
raise AntemlAttributeError(f"Invalide value {value!r} for 'WRAP' attribute on <SCREEN> tag")
return v
def start(self, attrs: list[tuple[str, str | None]]) -> str:
"""Initialise a ScreenElement instance when the associated tag is opened."""
if len(attrs) > 2:
raise AntemlAttributeError(f"<SCREEN> tag has too many attributes (max. 2 expected, {len(attrs)} given)")
screen_mode: str | None = None
line_wrap: bool | None = None
for attr_name, attr_value in attrs:
attr_name = attr_name.lower()
if attr_name == "mode":
if screen_mode is not None:
raise AntemlAttributeError(f"Attribute 'MODE' specified more than once on <SCREEN> tag")
screen_mode = self._determine_screen_mode(attr_value)
if attr_name == "wrap":
if line_wrap is not None:
raise AntemlAttributeError(f"Attribute 'WRAP' specified more than once on <SCREEN> tag")
line_wrap = self._determine_line_wrap(attr_value)
start_code: str = ""
end_code: str = ""
if screen_mode is not None:
start_code += f"\x1b[={screen_mode}h"
if line_wrap is True:
start_code += "\x1b[=7h"
end_code += "\x1b[=7h" # Assume line wrapping is on by default
if line_wrap is False:
start_code += "\x1b[=7l"
end_code += "\x1b[=7h"
if screen_mode is not None:
end_code += f"\x1b[={screen_mode}l"
self.push_state(end_code)
return start_code
def end(self) -> str:
"""Destruct a HideElement instance when the associated tag is closed."""
return self.pop_state()
class ElementAlias(ABC):
"""Abstract base class for Alias Elements.
An `ElementAlias` looks and behaves somewhat similarly to a regular `Element`,
however, they are really aliases for another `Element`. The crucial
difference is than the `start()` and `end()` methods of an `ElementAlias`
don't return the ANSI presentation code associated with an `Element`/tag,
but rather they return a different AnTeML tag with which the original
occurence of the `ElementAlias`'s tags are to be overwritten.
"""
# Class attributes
tag: ClassVar[str] = ""
resolves_to: ClassVar[Type[Element]] = Element
@abstractmethod
def start(self, attrs: list[tuple[str, str | None]]) -> tuple[Type[Element], list[tuple[str, str | None]]]:
"""Returns AnTeML markup to rewrite the `ElementAlias` start tag."""
return (self.resolves_to, attrs)
@abstractmethod
def end(self) -> Type[Element]:
"""Returns AnTeML markup to rewrite the `ElementAlias` end tag."""
return self.resolves_to
class BElementAlias(ElementAlias):
"""An element alias for bold/bright text.
The B element is an alias for the `FwElement` with the `FontWeight`
attribute set to `bold`. That is, `<B>Hello!</B>` is equivalent to
`<FW BOLD>Hello!</FW>` and will get rewritten as such.
"""
# Class attributes
tag: Final[str] = "b"
resolves_to: Final[Type[Element]] = FwElement
def start(self, attrs: list[tuple[str, str | None]]) -> tuple[Type[Element], list[tuple[str, str | None]]]:
if len(attrs) > 0:
raise AntemlAttributeError(f"<B> tag does not accept attributes (0 expected, {len(attrs)} given)")
return (self.resolves_to, [("bold", None)])
def end(self) -> Type[Element]:
return self.resolves_to
class LElementAlias(ElementAlias):
"""An element alias for light/dim/faint text.
The L element is an alias for the `FwElement` with the `FontWeight`
attribute set to `light`. That is, `<L>Hello!</L>` is equivalent to
`<FW LIGHT>Hello!</FW>` and will get rewritten as such.
"""
# Class attributes
tag: Final[str] = "l"
resolves_to: Final[Type[Element]] = FwElement
def start(self, attrs: list[tuple[str, str | None]]) -> tuple[Type[Element], list[tuple[str, str | None]]]:
if len(attrs) > 0:
raise AntemlAttributeError(f"<L> tag does not accept attributes (0 expected, {len(attrs)} given)")
return (self.resolves_to, [("light", None)])
def end(self) -> Type[Element]:
return self.resolves_to
elements: dict[str, Type[Element] | Type[ElementAlias]] = {
# Core elements
"anteml": AntemlElement,
"br": BrElement,
"bg": BgElement,
"fg": FgElement,
"fw": FwElement,
"i": IElement,
"u": UElement,
"s": SElement,
"blink": BlinkElement,
"hide": HideElement,
"invert": InvertElement,
"screen": ScreenElement,
# Alias elements
"b": BElementAlias,
"l": LElementAlias,
}
|
AnTeML
|
/AnTeML-0.1.4-py3-none-any.whl/anteml/elements.py
|
elements.py
|
import re
import logging
from typing import Callable, Any, Type, ClassVar, Pattern
from html.parser import HTMLParser
from .entities import entities as default_entities
from .elements import Element, ElementAlias, elements as default_elements
__all__ = ["AntemlParser"]
logger = logging.getLogger(__name__)
class AntemlParser(HTMLParser):
"""Standard parser for AnTeML."""
# Class attributes
strip_whitespace_patterns: ClassVar[list[tuple[Pattern[str], str]]] = [
(re.compile(r"\s\s+", re.U), r" "),
(re.compile(r"\s(?=\<[^\<]*\>\s)"), r""),
(re.compile(r"\A((?:\<[^\<]*\>)*)\s"), r"\1"),
(re.compile(r"\s((?:\<[^\<]*\>\s?)*)\Z"), r"\1"),
] # @TODO: Find a better whitespace stripping pattern/algorithm
doctype_pattern: ClassVar[Pattern[str]] = re.compile(
r"\ADOCTYPE\s+AnTeML\s?.*\Z",
re.IGNORECASE
)
# Instance attributes
element_handlers: dict[str, Element | ElementAlias]
entities: dict[str, tuple[str, str]]
options: dict[str, bool]
receiver: Callable[[str], Any]
def __init__(
self,
receiver: Callable[[str], Any],
*,
strip_whitespace: bool = True,
strip_unknown: bool = False,
strip_comments: bool = False,
element_map: dict[str, Type[Element] | Type[ElementAlias]] | None = None,
entity_map: dict[str, tuple[str, str]] | None = None
) -> None:
"""Initialise and reset a new instance of the AnTeML parser."""
super().__init__(convert_charrefs=False)
self.receiver = receiver
self.options = {
"strip_whitespace": strip_whitespace,
"strip_unknown": strip_unknown,
"strip_comments": strip_comments
}
self.element_handlers = {}
if element_map is None:
element_map = default_elements
for name, handler in element_map.items():
self.element_handlers[name] = handler()
self.entities = {}
if entity_map is None:
self.entities = default_entities
else:
self.entities = entity_map
def feed(self, data: str, strip_whitespace: bool | None = None) -> None:
if strip_whitespace is None:
strip_whitespace = self.options["strip_whitespace"]
if strip_whitespace:
return super().feed(self.strip_whitespace(data))
return super().feed(data)
def handle_charref(self, name: str) -> None:
if name.startswith("x"):
char = chr(int(name[1:], 16))
else:
char = chr(int(name))
logger.debug(f"CHARREF: {char}")
self.output(char)
def handle_comment(self, data: str) -> None:
logger.debug(f"COMMENT: {data}")
if not self.options["strip_comments"]:
self.output(f"<!-- {data} -->")
def handle_data(self, data: str) -> None:
logger.debug("DATA: {data}")
self.output(data)
def handle_decl(self, decl: str) -> None:
logger.debug(f"DECLARATION: {decl}")
if not self.doctype_pattern.match(decl) and not self.options["strip_unknown"]:
self.output(f"<!{decl}>")
def resolve_endtag_alias(
self,
tag: str,
) -> str:
if tag in self.element_handlers:
handler = self.element_handlers[tag]
if isinstance(handler, ElementAlias):
element_type = handler.end()
logger.debug(f"REWRITING ENDTAG ALIAS: {tag} -> {element_type.tag}")
tag = element_type.tag
return tag
def resolve_starttag_alias(
self,
tag: str,
attrs: list[tuple[str, str | None]]
) -> tuple[str, list[tuple[str, str | None]]]:
if tag in self.element_handlers:
handler = self.element_handlers[tag]
if isinstance(handler, ElementAlias):
element_type, element_attrs = handler.start(attrs)
logger.debug(f"REWRITING ENDTAG ALIAS: {tag} (ATTRS: {attrs}) -> {element_type.tag} (ATTRS: {element_attrs})")
tag = element_type.tag
attrs = element_attrs
return (tag, attrs)
def handle_endtag(self, tag: str) -> None:
orig_tag = tag
tag = self.resolve_endtag_alias(tag.lower())
if tag in self.element_handlers:
handler = self.element_handlers[tag]
if not isinstance(handler, Element):
logger.critical(f"Encountered invalid element handler {handler!r} for tag {tag!r}")
return
logger.debug(f"ENDTAG: {tag}")
v = handler.end()
logger.debug(f"ENDTAG VALUE: {v!r}")
self.output(v)
else:
logger.warning(f"UNKNOWN ENDTAG: {tag}")
if not self.options["strip_unknown"]:
self.output(f"</{orig_tag}>") # type: ignore
def handle_entityref(self, name: str) -> None:
if name in self.entities:
value = self.entities[name][0];
logger.debug(f"ENTITYREF: {name!r} -> {value!r}")
elif not self.options["strip_unknown"]:
value = f"&{name};"
logger.debug(f"UNKNOWN ENTITYREF: {name!r} -> {value!r}")
else:
logger.warning(f"UNRESOLVABLE ENTITYREF: {name!r}")
value = ""
self.output(value)
def handle_pi(self, data: str) -> None:
# @ TODO: IMPLEMENT
logger.debug(f"PROCESSING INSTRUCTION: {data}")
logger.error("PROCESSING INSTRUCTIONS NOT IMPLEMENTED YET")
def handle_startendtag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
tag, attrs = self.resolve_starttag_alias(tag.lower(), attrs)
if tag not in self.element_handlers:
logger.warning(f"UNKNOWN STARTENDTAG: {tag} (ATTRS: {attrs})")
if not self.options["strip_unknown"]:
self.output(self.get_starttag_text()) # type: ignore
return
handler = self.element_handlers[tag]
if not isinstance(handler, Element):
logger.critical(f"Encountered invalid element handler {handler!r} for tag {tag!r}")
return
logger.debug(f"STARTENDTAG: {tag} (ATTRS: {attrs})")
v = handler.start(attrs) + handler.end()
logger.debug(f"STARTENDTAG VALUE: {v!r}")
self.output(v)
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
tag, attrs = self.resolve_starttag_alias(tag.lower(), attrs)
if tag not in self.element_handlers:
logger.warning(f"UNKNOWN STARTTAG: {tag} (ATTRS: {attrs})")
if not self.options["strip_unknown"]:
self.output(self.get_starttag_text()) # type: ignore
return
handler = self.element_handlers[tag]
if not isinstance(handler, Element):
logger.critical(f"Encountered invalid element handler {handler!r} for tag {tag!r}")
return
logger.debug(f"STARTTAG: {tag} (ATTRS: {attrs})")
v = handler.start(attrs)
logger.debug(f"STARTTAG VALUE: {v!r}")
self.output(v)
def output(self, data: str) -> None:
"""Pass output to the parser's receiver."""
self.receiver(data)
def set_receiver(self, receiver: Callable[[str], Any]) -> None:
"""Set the receiver for data processed by the parser."""
self.receiver = receiver
def strip_whitespace(self, data: str) -> str:
"""Remove redundant whitespace from AnTeML code."""
for pattern, repl in self.strip_whitespace_patterns:
data = pattern.sub(repl, data)
return data
def unknown_decl(self, data: str) -> None:
"""Handle unknown declarations."""
logger.warning(f"UNKNOWN_DECLARATION: {data}")
super().unknown_decl(data)
|
AnTeML
|
/AnTeML-0.1.4-py3-none-any.whl/anteml/parser.py
|
parser.py
|
# Anaton - A very fast anagram solver!
This is a very fast anagram solver meant to find either a lot of anagrams from smaller sentences or identify longer
anagrams from slightly longer sentences.
## Installation
Pre-requisites: Python 3.7
```
pip install anaton
```
## Usage
When finding anagrams we need 2 things, a seed sentence and a word list to match up against. This package does come
pre-shipped with a dictionary of 10,000 words, but you can use your own wordlist if you want to. Please note this
library only supports english characters a-z
### Solve
It's pretty easy to get going!
```python
from anaton import solve
# Solve anagrams!
for chunk in solve('string to find anagrams from'):
# The anagram solver will return several anagrams at a time, thus we have to iterate over the chunk returned
for anagram in chunk:
print(anagram)
```
Due to the nature of how the anagrams are found, the generator will return a list of very similar anagrams at a time.
If you want you can also restrict it to only find words with a specified minimum length using the `min_word_length`
parameter
```python
from anaton import solve
# Solve anagrams!
for chunk in solve('string to find anagrams from', min_word_length=3):
# The anagram solver will return several anagrams at a time, thus we have to iterate over the chunk returned
for anagram in chunk:
print(anagram)
```
### Dictionary
If you want to use a custom wordlist, you will need to instantiate your own dictionary object. The Dictionary is
expecting your wordlist to already be sorted for performance reasons. If this is not the case, you can easily sort it
on unix systems with this
```
sort wordlist.txt > wordlist.txt
```
To instantiate the Dictionary object just do this:
```python
from anaton import Dictionary
dictionary = Dictionary('path-to-file')
```
If you want to find anagrams for somewhat long string, you may want to filter the search space when loading in the
dictionary, this can be done with the "filter_sentence" parameter
```python
from anaton import Dictionary
dictionary = Dictionary('path-to-file', filter_sentence='string to search within')
```
When you have instantiated the dictionary you can just pass it to the solver!
```python
from anaton import Dictionary, solve
dictionary = Dictionary('path-to-file')
anagram_generator = solve('string to search within', dictionary=dictionary)
```
|
AnaTon
|
/AnaTon-1.1.3.tar.gz/AnaTon-1.1.3/README.md
|
README.md
|
from collections import deque
from itertools import permutations, product
from typing import List, Optional
import os
from .utils import word_to_number, strip
from .dictionary import Dictionary
def solve(letters: str, dictionary: Optional[Dictionary] = None, min_word_length: int = 2) -> List[str]:
"""
A BFS anagram solver that checks every candidate up against a lookup table of depth 1.
The anagram solver represents all letter sets as a number. Every character in a word is represented by a prime
number, then a set of letters is represented by the product of all the character-primes.
The algorithm then searches for different factorisations of the input letter set based on the factor set from the
dictionary.
When a match has been found it then converts the factors back into all the words represented by that factor and
yields all possible permutations of those words separated by spaces.
:param letters:
:param dictionary:
:param min_word_length:
:return:
"""
letters = strip(letters.lower())
letters_numerical = word_to_number(letters)
if dictionary is None:
default_path = os.path.dirname(__file__) + '/wordlist10000'
dictionary = Dictionary(default_path, letters)
queue = deque()
queue.append((letters_numerical, len(letters), len(letters), []))
if str(letters_numerical) in dictionary.words_by_group:
yield dictionary.words_by_group[str(letters_numerical)]
while queue:
letters, length, prev_length, path = queue.popleft()
for i in range(min(length - min_word_length, prev_length), min_word_length - 1, -1):
for word_letters in dictionary.words_by_length[i]:
if path and word_letters > path[-1]:
break
if not(letters % word_letters):
remaining_letters = letters // word_letters
if (word_letters > remaining_letters and i == length - i) or i > length - i:
if str(remaining_letters) in dictionary.words_by_group:
anagrams = cartesian_solutions(path + [word_letters] + [remaining_letters], dictionary)
yield anagrams
continue
queue.append((letters // word_letters, length - i, i, path + [word_letters]))
def cartesian_solutions(path: List[int], dictionary: Dictionary) -> List[str]:
"""
Finds all words in the dictionary matching the factor and returns all permutations of the cartesian product of
said words.
:param path:
:param dictionary:
:return:
"""
words = [dictionary.words_by_group[str(idx)] for idx in path]
solutions = []
for permutation in permutations(words):
solutions.extend([' '.join(c) for c in product(*permutation)])
return solutions
|
AnaTon
|
/AnaTon-1.1.3.tar.gz/AnaTon-1.1.3/anaton/solver.py
|
solver.py
|
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Docutils 0.4.1: http://docutils.sourceforge.net/" />
<title>Anagrammer</title>
<meta name="author" content="Joel Burton" />
<style type="text/css">
/*
:Author: David Goodger
:Contact: [email protected]
:Date: $Date: 2005-12-18 01:56:14 +0100 (Sun, 18 Dec 2005) $
:Revision: $Revision: 4224 $
:Copyright: This stylesheet has been placed in the public domain.
Default cascading style sheet for the HTML output of Docutils.
See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to
customize this style sheet.
*/
/* used to remove borders from tables and images */
.borderless, table.borderless td, table.borderless th {
border: 0 }
table.borderless td, table.borderless th {
/* Override padding for "table.docutils td" with "! important".
The right padding separates the table cells. */
padding: 0 0.5em 0 0 ! important }
.first {
/* Override more specific margin styles with "! important". */
margin-top: 0 ! important }
.last, .with-subtitle {
margin-bottom: 0 ! important }
.hidden {
display: none }
a.toc-backref {
text-decoration: none ;
color: black }
blockquote.epigraph {
margin: 2em 5em ; }
dl.docutils dd {
margin-bottom: 0.5em }
/* Uncomment (and remove this text!) to get bold-faced definition list terms
dl.docutils dt {
font-weight: bold }
*/
div.abstract {
margin: 2em 5em }
div.abstract p.topic-title {
font-weight: bold ;
text-align: center }
div.admonition, div.attention, div.caution, div.danger, div.error,
div.hint, div.important, div.note, div.tip, div.warning {
margin: 2em ;
border: medium outset ;
padding: 1em }
div.admonition p.admonition-title, div.hint p.admonition-title,
div.important p.admonition-title, div.note p.admonition-title,
div.tip p.admonition-title {
font-weight: bold ;
font-family: sans-serif }
div.attention p.admonition-title, div.caution p.admonition-title,
div.danger p.admonition-title, div.error p.admonition-title,
div.warning p.admonition-title {
color: red ;
font-weight: bold ;
font-family: sans-serif }
/* Uncomment (and remove this text!) to get reduced vertical space in
compound paragraphs.
div.compound .compound-first, div.compound .compound-middle {
margin-bottom: 0.5em }
div.compound .compound-last, div.compound .compound-middle {
margin-top: 0.5em }
*/
div.dedication {
margin: 2em 5em ;
text-align: center ;
font-style: italic }
div.dedication p.topic-title {
font-weight: bold ;
font-style: normal }
div.figure {
margin-left: 2em ;
margin-right: 2em }
div.footer, div.header {
clear: both;
font-size: smaller }
div.line-block {
display: block ;
margin-top: 1em ;
margin-bottom: 1em }
div.line-block div.line-block {
margin-top: 0 ;
margin-bottom: 0 ;
margin-left: 1.5em }
div.sidebar {
margin-left: 1em ;
border: medium outset ;
padding: 1em ;
background-color: #ffffee ;
width: 40% ;
float: right ;
clear: right }
div.sidebar p.rubric {
font-family: sans-serif ;
font-size: medium }
div.system-messages {
margin: 5em }
div.system-messages h1 {
color: red }
div.system-message {
border: medium outset ;
padding: 1em }
div.system-message p.system-message-title {
color: red ;
font-weight: bold }
div.topic {
margin: 2em }
h1.section-subtitle, h2.section-subtitle, h3.section-subtitle,
h4.section-subtitle, h5.section-subtitle, h6.section-subtitle {
margin-top: 0.4em }
h1.title {
text-align: center }
h2.subtitle {
text-align: center }
hr.docutils {
width: 75% }
img.align-left {
clear: left }
img.align-right {
clear: right }
ol.simple, ul.simple {
margin-bottom: 1em }
ol.arabic {
list-style: decimal }
ol.loweralpha {
list-style: lower-alpha }
ol.upperalpha {
list-style: upper-alpha }
ol.lowerroman {
list-style: lower-roman }
ol.upperroman {
list-style: upper-roman }
p.attribution {
text-align: right ;
margin-left: 50% }
p.caption {
font-style: italic }
p.credits {
font-style: italic ;
font-size: smaller }
p.label {
white-space: nowrap }
p.rubric {
font-weight: bold ;
font-size: larger ;
color: maroon ;
text-align: center }
p.sidebar-title {
font-family: sans-serif ;
font-weight: bold ;
font-size: larger }
p.sidebar-subtitle {
font-family: sans-serif ;
font-weight: bold }
p.topic-title {
font-weight: bold }
pre.address {
margin-bottom: 0 ;
margin-top: 0 ;
font-family: serif ;
font-size: 100% }
pre.literal-block, pre.doctest-block {
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee }
span.classifier {
font-family: sans-serif ;
font-style: oblique }
span.classifier-delimiter {
font-family: sans-serif ;
font-weight: bold }
span.interpreted {
font-family: sans-serif }
span.option {
white-space: nowrap }
span.pre {
white-space: pre }
span.problematic {
color: red }
span.section-subtitle {
/* font-size relative to parent (h1..h6 element) */
font-size: 80% }
table.citation {
border-left: solid 1px gray;
margin-left: 1px }
table.docinfo {
margin: 2em 4em }
table.docutils {
margin-top: 0.5em ;
margin-bottom: 0.5em }
table.footnote {
border-left: solid 1px black;
margin-left: 1px }
table.docutils td, table.docutils th,
table.docinfo td, table.docinfo th {
padding-left: 0.5em ;
padding-right: 0.5em ;
vertical-align: top }
table.docutils th.field-name, table.docinfo th.docinfo-name {
font-weight: bold ;
text-align: left ;
white-space: nowrap ;
padding-left: 0 }
h1 tt.docutils, h2 tt.docutils, h3 tt.docutils,
h4 tt.docutils, h5 tt.docutils, h6 tt.docutils {
font-size: 100% }
tt.docutils {
background-color: #eeeeee }
ul.auto-toc {
list-style-type: none }
</style>
</head>
<body>
<div class="document" id="anagrammer">
<h1 class="title">Anagrammer</h1>
<table class="docinfo" frame="void" rules="none">
<col class="docinfo-name" />
<col class="docinfo-content" />
<tbody valign="top">
<tr><th class="docinfo-name">Author:</th>
<td>Joel Burton</td></tr>
<tr><th class="docinfo-name">Version:</th>
<td>1.0</td></tr>
<tr class="field"><th class="docinfo-name">License:</th><td class="field-body">GPL 2.0 or newer</td>
</tr>
<tr class="field"><th class="docinfo-name">Summary:</th><td class="field-body">Anagram-finder using dictionaries</td>
</tr>
</tbody>
</table>
<p>This program will help you generate anagrams for playing
word games.</p>
<p>For example, if you run it like:</p>
<pre class="literal-block">
$ anagrammer mushroom
</pre>
<p>it returns:</p>
<pre class="literal-block">
mushroom
8 = 1
mushroom
6 = 3
hommos humors mohurs
5 = 6
homos hours humor mohur moors rooms
</pre>
<p>Which are all of the anagrams of mushroom in the dictionary
over 5 characters long.</p>
<p>You can specify command-line options for the minimum number of
letters, or to use alternate dictionaries. In addition, you can
capitalize letters, and these letters will be required in the
resulting words. For example, if we had typed "MusHroom", above,
our 5-letter words would be: "homos humor mohur", since these
these use both M and H.</p>
<p>There is also a command-line switch (-c) to produce
"single-column" mode, rather than nicely printing results,
as shown above. For our same query, this would return:</p>
<pre class="literal-block">
mushroom
humors
mohurs
hommos
humor
mohur
homos
moors
rooms
hours
</pre>
<p>This output is useful for feeding to other programs/filters.</p>
<div class="section">
<h1><a id="dictionaries" name="dictionaries">Dictionaries</a></h1>
<p>Anagrammer does not come with any real dictionaries; instead, you
must create these.</p>
<p>In order to find anagrams efficiently, it must precompute an
anagram dictionary from a list of words. You can do this as:</p>
<pre class="literal-block">
$ anagrammer --create /path/to/list/of/words/foo
</pre>
<p>Which creates <cite>foo.anagram</cite>, stored in
/usr/local/share/games/anagrammer. You can change the dictionary
path by editing the config.py.</p>
<p>A suitable dictionary to use would be the one that ships with
most Linux/Unix systems, usually found at <cite>/usr/share/dict/words</cite>.
An excellent set of much more comprehensive dictionaries are at
<a class="reference" href="http://personal.riverusers.com/~thegrendel/software.html">http://personal.riverusers.com/~thegrendel/software.html</a></p>
<p>If you cannot find a dictionary, or just want to play with
anagrammer, a play dictionary is included, "mini.lst.gz". This
contains words starting with a, b, or c, that are 3-7 characters
long. You can convert it with:</p>
<pre class="literal-block">
$ anagrammer --create mini.gz
</pre>
<p>Which will create a dictionary called "mini" in the above
directory.</p>
</div>
</div>
</body>
</html>
|
Anagrammer
|
/Anagrammer-1.1.zip/Anagrammer-1.1/README.html
|
README.html
|
Anagrammer
==========
:Author: Joel Burton
:Version: 1.0
:License: GPL 2.0 or newer
:Summary: Anagram-finder using dictionaries
This program will help you generate anagrams for playing
word games.
For example, if you run it like::
$ anagrammer mushroom
it returns::
mushroom
8 = 1
mushroom
6 = 3
hommos humors mohurs
5 = 6
homos hours humor mohur moors rooms
Which are all of the anagrams of mushroom in the dictionary
over 5 characters long.
You can specify command-line options for the minimum number of
letters, or to use alternate dictionaries. In addition, you can
capitalize letters, and these letters will be required in the
resulting words. For example, if we had typed "MusHroom", above,
our 5-letter words would be: "homos humor mohur", since these
these use both M and H.
There is also a command-line switch (-c) to produce
"single-column" mode, rather than nicely printing results,
as shown above. For our same query, this would return::
mushroom
humors
mohurs
hommos
humor
mohur
homos
moors
rooms
hours
This output is useful for feeding to other programs/filters.
Dictionaries
------------
Anagrammer does not come with any real dictionaries; instead, you
must create these.
In order to find anagrams efficiently, it must precompute an
anagram dictionary from a list of words. You can do this as::
$ anagrammer --create /path/to/list/of/words/foo
Which creates `foo.anagram`, stored in
/usr/local/share/games/anagrammer. You can change the dictionary
path by editing the config.py.
A suitable dictionary to use would be the one that ships with
most Linux/Unix systems, usually found at `/usr/share/dict/words`.
An excellent set of much more comprehensive dictionaries are at
http://personal.riverusers.com/~thegrendel/software.html
If you cannot find a dictionary, or just want to play with
anagrammer, a play dictionary is included, "mini.lst.gz". This
contains words starting with a, b, or c, that are 3-7 characters
long. You can convert it with::
$ anagrammer --create mini.gz
Which will create a dictionary called "mini" in the above
directory.
|
Anagrammer
|
/Anagrammer-1.1.zip/Anagrammer-1.1/README
|
README
|
__author__ = "Joel Burton <[email protected]>"
__version__ = "1.0"
import sys
import os.path
import glob
import optparse
import shelve
import textwrap
import convert
import lookup
from config import DICTIONARIES_DIR, MINLENGTH_DEFAULT, DEFAULT_DICT
def anagrammer(words, dictfile, minlength, column=False):
"""Anagram words using dictfile and print results.
@param words: words to anagram
@type words: iterable
@param dictfile: path to shelve dictionary file
@type dictfile: string
@param minlength: minimum # of letters for words
@type minlength: int
@param column: print in single column?
@type column: boolean
@returns: Nothing, just prints
Tests for this function are in tests/anagrammer.txt.
"""
worddict = shelve.open(dictfile, "r")
if not column: print # create top space
for word in words:
found = lookup.lookup(word, minlength, worddict)
if column:
if found:
print "\n".join(found)
else:
print word
# pretty print, grouped by num letters
if not found:
print "\nNone found.\n"
return
print
for wlen in range( len(found[0]), 0, -1 ):
words = [ w for w in found if len(w) == wlen ]
if words:
print "%s = %s" % (wlen, len(words))
print textwrap.fill(
" ".join(sorted(words)),
initial_indent=" ",
subsequent_indent=" ")
print
def start():
"""Interactive start of program."""
dicts = glob.glob("%s/*.anagram" % DICTIONARIES_DIR)
dicts = [ os.path.splitext( os.path.basename(d) )[0] for d in dicts ]
if DEFAULT_DICT in dicts:
default_dict = DEFAULT_DICT
else:
default_dict = dicts[0]
if dicts:
parser = optparse.OptionParser(
usage="%prog [options] rack ...",
version="%prog " + __version__,
description=__doc__
)
else:
parser = optparse.OptionParser(
usage="%prog [options]",
version="%prog " + __version__,
description="""Before using this program to find anagrams, you must
create an anagram dictionary with --convert. Once you have done this,
you can then re-rerun the program to find anagrams."""
)
parser.add_option(
"--convert",
dest="dictpath",
help="Convert wordlist (plaintext or gzipped) at DICTPATH into anagram file.",
type="string",
)
if dicts:
if len(dicts) > 1:
parser.add_option(
"-d", "--dict",
dest="dict",
help="Dictionary to use. Choice of (%s). Defaults to %s." % ( "|".join(dicts), default_dict ),
default=default_dict,
type="choice",
choices=dicts,
)
parser.add_option(
"-c", "--column",
dest="column",
help="Display in simple column (for other programs to read).",
action="store_true",
)
parser.add_option(
"-l","--min-length",
dest="minlength",
help="Minimum number of letters for found words (default: %s)." % MINLENGTH_DEFAULT,
type="int",
default=MINLENGTH_DEFAULT,
)
(options, args) = parser.parse_args()
if options.dictpath:
convert.convert(options.dictpath, DICTIONARIES_DIR)
sys.exit()
if len(args)<1: # no word given
parser.print_help()
sys.exit(1)
word = args[0:]
dictfile = "%s/%s.anagram" % (DICTIONARIES_DIR, getattr(options, "dict", default_dict))
anagrammer(word, dictfile, options.minlength, options.column)
if __name__ == "__main__": start()
|
Anagrammer
|
/Anagrammer-1.1.zip/Anagrammer-1.1/src/anagrammer/anagrammer.py
|
anagrammer.py
|
__author__ = "Joel Burton <[email protected]>"
__version__ = "1.0"
import os.path
import gzip
import shelve
def alphaword(w):
"""foobar -> abfoor
@param w: word to convert
@returns: word with letters in alpha order
For example:
>>> alphaword("foobar")
'abfoor'
>>> alphaword("")
''
"""
return ''.join(sorted(list(w)))
def convert(wordlist, path):
"""Convert a word list into an anagram dictionary.
@param wordlist: path to word list (plaintext or gzipped)
@param path: directory to store resulting file in
@returns: None, but creates resulting file as side-effect.
To demonstrate, let's set up a fake word list:
>>> PATH="/tmp"
>>> f = open("%s/doctest.lst" % PATH, 'w')
>>> print >>f, "cat"
>>> print >>f, "act"
>>> print >>f, "dog"
>>> f.close()
Now convert it and test it:
>>> convert("%s/doctest.lst" % PATH, PATH)
>>> d = shelve.open("%s/doctest.anagram" % PATH)
>>> d['act']
['cat', 'act']
>>> d['dgo']
['dog']
>>> d['pig']
Traceback (most recent call last):
...
KeyError: 'pig'
And clean up:
>>> os.unlink("%s/doctest.lst" % PATH)
>>> os.unlink("%s/doctest.anagram" % PATH)
"""
if not os.path.isdir(path):
if os.path.exists(path):
raise "FileExists", "A file exists at %s, which should be a path for our dictionaries." % path
os.mkdir(path)
name = os.path.splitext(os.path.basename(wordlist))[0]
d = shelve.open("%s/%s.anagram" % (path, name), "n")
if wordlist.endswith('.gz'):
opener = gzip.GzipFile
else:
opener = file
for word in opener(wordlist):
word = word.strip().lower()
letters = alphaword(word)
# Shelve doesn't support "x in list" or "list.setdefault" and doesn't
# notice values mutating, so we have do this very verbosely:
if d.has_key(letters):
d[letters] = d[letters] + [word]
else:
d[letters] = [word]
d.close()
|
Anagrammer
|
/Anagrammer-1.1.zip/Anagrammer-1.1/src/anagrammer/convert.py
|
convert.py
|
Analogy is an experimental open source project for Natural Language Processing. It aims to perform 2 newly introduced NLP tasks: word comparison and sentence comparison.
Analogy provides semantic similiarity and differences between two pieces of text. Text can be in the form of a word or a sentence.
A pretrained model is released to get started. You can also retrain upon an existing model.
Getting Started:
Prerequisites:
Python 3.0 or higher
Stanford Core NLP (3.9.2)
Installing:
pip install analogy
Read instructions on how to install and run stanford corenlp server.
Analogy functions:
1. findComparison(model, word1, word2)
2. findSentenceComparison(model, sentence1, sentence2)
3. trainModel(sentences) #Input is list of sentences
4. retrainModel(model, sentences)
5. saveModel(name, model) #Be sure to add '.npz' at last
6. loadModel(name)
Example:
findComparison(model, "apple", "orange")
Output:
Word1 = apple
Word2 = orange
Similiarity = fruit
|
Analogy
|
/Analogy-0.1.tar.gz/Analogy-0.1/README.md
|
README.md
|
# AnalysisDataLink
This repostitory facilitates easy access to the materialized (SQL) database tables. Have a look at https://github.com/seung-lab/AnnotationPipelineOverview to get a better overview of the system.
The database can be accessed directly as described in [below](https://github.com/seung-lab/AnalysisDataLink#accessing-the-sql-databases-directly). However, it is recommended to use this repository as it not only helps with querying the database but also sets datatypes and converts the geometry coordinates which are stored in a postgis string format.
## Access through the DataLink
The DataLink has three hierarchy levels:
- low level: `datalink_base.py`
- query level: `datalink.py`
- abstract level: `datalink_ext.py`
We anticpate that most users operate on the highest level where queries to the different table schemas are predefined for convenient access. However, these functions might be too limited in some cases and require more low level access. We hope that users contribute to this repo by formulating their currently unsupported queries with the means of the lower level modules and adding them to `datalink_ext.py`.
### Example
Accessing synapses from all pyramidal cells onto all other cells:
```
from analysisdatalink import datalink_ext as de
adle = de.AnalysisDataLinkExt("pinky100", 50, sqlalchemy_database_uri)
# Read all pyramidal cell ids
pyc_ids = adle.query_cell_types("soma_valence", cell_type_include_filter=["e"], return_only_ids=True, exclude_zero_root_ids=True)
# Read synapses restricted to pyramidal cells (takes ~11s and returns 17571 synapses)
synapse_df = adle.query_synapses("pni_synapses_i3", pre_ids=pyc_ids)
```
See below for how to build the `sqlalchemy_database_uri`. For convenience, one can define `DATABASE_URI` as global system variable which will be read if `sqlalchemy_database_uri` is undefined.
## Accessing the SQL databases directly
The SQL database can be accessed in many ways, sqlAlchemy and pandas are a good place to start. Adminer is a good tool to view the database content.
### Table naming
All tables are called following a convention:
```
{dataset_name}_{table_name}_v{materialization_version}
```
For instance, a synapse table might be called: `pinky100_pni_synapses_i3_v38`.
### Pandas examples
Getting all the cell segment ids (also called root ids):
```
import pandas as pd
sql_query = "SELECT * FROM pinky100_cellsegment_v38"
df = pd.read_sql(sql_query, database_uri, index_col="id")
```
where `database_uri` is build as follows:
```
database_uri = "postgresql://{user_name}:{password}@{database_ip}/postgres"
```
|
AnalysisDataLink
|
/AnalysisDataLink-0.4.1.tar.gz/AnalysisDataLink-0.4.1/README.md
|
README.md
|
import numpy as np
from analysisdatalink import datalink
from collections import defaultdict
class AnalysisDataLinkExt(datalink.AnalysisDataLink):
def __init__(self, dataset_name, materialization_version=None,
sqlalchemy_database_uri=None, verbose=True,
annotation_endpoint=None):
super().__init__(dataset_name, materialization_version,
sqlalchemy_database_uri, verbose=verbose,
annotation_endpoint=annotation_endpoint)
def query_synapses(self, synapse_table, pre_ids=None, post_ids=None,
compartment_include_filter=None,
include_autapses=False,
compartment_table=None):
""" Query synapses
:param synapse_table: str
table name without dataset prefix or version suffix
:param pre_ids: None, list or np.ndarray
:param post_ids: None, list or np.ndarray
:param compartment_table: None, str
defines compartment table -- has to be 'postsynapsecompartment'
:param compartment_include_filter: list of str
:param include_autapses: bool
:param compartment_table: None, str
DO NOT USE at the moment since there are no good compartment
labels yet
:return:
"""
filter_in_dict = defaultdict(dict)
filter_equal_dict = defaultdict(dict)
if pre_ids is not None:
filter_in_dict[synapse_table]["pre_pt_root_id"] = [int(pid) for pid in pre_ids]
if post_ids is not None:
filter_in_dict[synapse_table]["post_pt_root_id"] = [int(pid) for pid in post_ids]
if not include_autapses:
filter_equal_dict[synapse_table]["valid"] = True
if compartment_table is not None:
tables = [[synapse_table, "id"],
[compartment_table, "synapse_id"]]
if compartment_include_filter is not None:
filter_in_dict[compartment_table]['label'] = compartment_include_filter
else:
tables = [synapse_table]
df = self.specific_query(tables,
filter_in_dict=filter_in_dict,
filter_equal_dict=filter_equal_dict)
return df
def query_cell_types(self, cell_type_table, cell_type_include_filter=None,
cell_type_exclude_filter=None, return_only_ids=False,
exclude_zero_root_ids=False):
""" Query cell type tables
:param cell_type_table: str
table name without dataset prefix or version suffix
:param cell_type_include_filter: list of str
:param cell_type_exclude_filter: list of str
:param return_only_ids: bool
:param exclude_zero_root_ids: bool
:return: pandas DataFrame or numpy array
"""
filter_in_dict = defaultdict(dict)
if cell_type_include_filter is not None:
filter_in_dict[cell_type_table]["cell_type"] = cell_type_include_filter
filter_notin_dict = defaultdict(dict)
if exclude_zero_root_ids:
filter_notin_dict[cell_type_table]["pt_root_id"] = [0]
if cell_type_exclude_filter is not None:
filter_notin_dict[cell_type_table]['cell_type'] = cell_type_exclude_filter
if return_only_ids:
select_columns = ["pt_root_id"]
else:
select_columns = None
df = self.specific_query(tables=[cell_type_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns)
if return_only_ids:
return np.array(df, dtype = np.uint64).squeeze()
else:
return df
def query_cell_ids(self, cell_id_table, cell_id_filter=None,
cell_id_exclude_filter=None, return_only_ids=False,
exclude_zero_root_ids=False):
""" Query cell ids
:param cell_id_table: str
table name without dataset prefix or version suffix
:param cell_id_filter: list of uint64s
:param cell_id_exclude_filter: list of uint64s
:param return_only_ids: bool
:param exclude_zero_root_ids:bool
:return: pandas DataFrame or numpy array
"""
filter_in_dict = defaultdict(dict)
if cell_id_filter is not None:
filter_in_dict[cell_id_table]['func_id'] = [int(pid) for pid in cell_id_filter]
filter_notin_dict = defaultdict(dict)
if cell_id_exclude_filter is not None:
filter_notin_dict[cell_id_table]['func_id'] = [int(pid) for pid in cell_id_exclude_filter]
if exclude_zero_root_ids is not None:
filter_notin_dict[cell_id_table]['pt_root_id'] = [0]
if return_only_ids:
select_columns = ['pt_root_id']
else:
select_columns = None
df = self.specific_query(tables=[cell_id_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns)
if return_only_ids:
return np.array(df, dtype=np.uint64).squeeze()
else:
return df
def query_coreg(self, coreg_table, cell_id_filter=None,
cell_id_exclude_filter=None, return_only_mapping=False,
exclude_zero_root_ids=False):
""" Queries coregistration
:param coreg_table: str
table name without dataset prefix or version suffix
:param cell_id_filter: list of uint64s
:param cell_id_exclude_filter: list of uint64s
:param return_only_mapping: bool
returns an array of [[root_id, f_id], ...]
:param exclude_zero_root_ids: bool
exclude zero root ids
:return: pandas DataFrame or numpy array
"""
filter_in_dict = defaultdict(dict)
if cell_id_filter is not None:
filter_in_dict[coreg_table]['func_id'] = [int(pid) for pid in cell_id_filter]
filter_notin_dict = defaultdict(dict)
if cell_id_exclude_filter is not None:
filter_notin_dict[coreg_table]['func_id'] = [int(pid) for pid in cell_id_exclude_filter]
if exclude_zero_root_ids is not None:
filter_notin_dict[coreg_table]['pt_root_id'] = [0]
if return_only_mapping:
select_columns = ['pt_root_id', 'func_id']
else:
select_columns = None
df = self.specific_query(tables=[coreg_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns)
if return_only_mapping:
return np.array(df, dtype=np.uint64).squeeze()
else:
return df
|
AnalysisDataLink
|
/AnalysisDataLink-0.4.1.tar.gz/AnalysisDataLink-0.4.1/analysisdatalink/datalink_ext.py
|
datalink_ext.py
|
from .datalink import AnalysisDataLink
mapping_suffix = '_v{}'
def annotation_version_mapping(table,
version_from,
version_to,
dataset_name,
sql_database_uri_base,
mapping_column='pt_root_id',
merge_column='id',
filter_in_dict={},
filter_notin_dict={},
filter_equal_dict={}):
df_merge = multiversion_merged_query(table,
version_from,
version_to,
dataset_name,
sql_database_uri_base,
merge_column='id',
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
filter_equal_dict=filter_equal_dict)
mapping_column_from = mapping_column + mapping_suffix.format(version_from)
mapping_column_to = mapping_column + mapping_suffix.format(version_to)
if mapping_column_from not in df_merge.columns:
raise ValueError('Mapping column ''{}'' not in annotation table'.format(mapping_column))
return df_merge[[mapping_column_from, mapping_column_to]]
def multiversion_merged_query(table,
version_A,
version_B,
dataset_name,
sql_database_uri_base,
merge_column='id',
filter_in_dict={},
filter_notin_dict={},
filter_equal_dict={}):
"""
Returns a merged dataframe of two materialization version queries.
Columns other than annotation id get the suffix of the data version.
Query filtering follows the structure of AnalysisDataLink.specific_query
"""
df_A = _specific_version_query(dataset_name, sql_database_uri_base, version_A,
table, filter_in_dict, filter_notin_dict, filter_equal_dict)
df_B = _specific_version_query(dataset_name, sql_database_uri_base, version_B,
table, filter_in_dict, filter_notin_dict, filter_equal_dict)
return df_A.merge(df_B, on='id', how='outer',
suffixes=(mapping_suffix.format(version_A), mapping_suffix.format(version_B)))
def _specific_version_query(dataset_name, sql_database_uri_base, data_version,
table, filter_in_dict={}, filter_notin_dict={}, filter_equal_dict={}):
dl = AnalysisDataLink(dataset_name=dataset_name,
sqlalchemy_database_uri=sql_database_uri_base,
materialization_version=data_version,
verbose=False)
df = dl.specific_query([table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
filter_equal_dict=filter_equal_dict)
return df
|
AnalysisDataLink
|
/AnalysisDataLink-0.4.1.tar.gz/AnalysisDataLink-0.4.1/analysisdatalink/version_map.py
|
version_map.py
|
from emannotationschemas import models as em_models, \
mesh_models as em_mesh_models
from geoalchemy2.shape import to_shape, from_shape
from geoalchemy2.elements import WKBElement
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import time
import numpy as np
import pandas as pd
import os
import re
import json
import requests
from decimal import Decimal
import analysisdatalink
def build_database_uri(base_uri, dataset_name, materialization_version):
"""Builds database name out of parameters"""
qry_pg = re.search('/postgres$', base_uri)
# Hack to convert old ids. Should be dropped when the new system is rolled out.
if qry_pg is not None:
base_uri = base_uri[0:qry_pg.start()]
database_suffix = em_models.format_database_name(dataset_name, materialization_version)
return base_uri + '/' + database_suffix
def wkb_to_numpy(wkb):
""" Fixes single geometry column """
shp=to_shape(wkb)
return np.array([shp.xy[0][0],shp.xy[1][0], shp.z], dtype=np.int)
def fix_wkb_columns(df):
""" Fixes geometry columns """
if len(df) > 0:
for colname in df.columns:
if isinstance(df.at[0,colname], WKBElement):
df[colname] = df[colname].apply(wkb_to_numpy)
return df
def fix_decimal_columns(df):
if len(df) > 0:
is_decimal = np.vectorize(lambda x: isinstance(x, Decimal))
is_integer_col = np.vectorize(lambda x: float(x).is_integer())
for col in df.columns:
if np.all(is_decimal(df[col])):
if np.all(is_integer_col(df[col])):
df[col] = df[col].apply(np.uint64)
else:
df[col] = df[col].apply(np.float)
return df
def get_materialization_versions(dataset_name, materialization_endpoint=None):
""" Gets materialization versions with timestamps """
if materialization_endpoint is None:
materialization_endpoint = analysisdatalink.materialization_endpoint
url = '{}/api/dataset/{}'.format(materialization_endpoint, dataset_name)
r = requests.get(url)
assert r.status_code == 200
versions = {d['version']:d['time_stamp'] for d in r.json() if d['valid']}
return versions
def get_annotation_info(dataset_name, table_name, annotation_endpoint=None):
""" Reads annotation info from annotation engine endpoint """
if table_name is "postsynapsecompartment":
return {"schema_name": "PostSynapseCompartment"}
if annotation_endpoint is None:
annotation_endpoint = analysisdatalink.annotation_endpoint
url = "%s/dataset/%s/%s" % (annotation_endpoint, dataset_name, table_name)
r = requests.get(url)
assert r.status_code == 200
return json.loads(r.content)
class AnalysisDataLinkBase(object):
def __init__(self, dataset_name, materialization_version=None,
sqlalchemy_database_uri=None, verbose=True,
annotation_endpoint=None, ):
if sqlalchemy_database_uri is None:
sqlalchemy_database_uri = os.getenv('DATABASE_URI')
assert sqlalchemy_database_uri is not None
self._base_engine = create_engine(sqlalchemy_database_uri, echo=verbose)
self._base_sqlalchemy_session = sessionmaker(bind=self._base_engine)
self._this_sqlalchemy_base_session = None
if materialization_version is None:
version_query=self.this_sqlalchemy_base_session.query(em_models.AnalysisVersion)
version_query=version_query.filter(em_models.AnalysisVersion.dataset == dataset_name)
versions=version_query.filter(em_models.AnalysisVersion.valid == True).all()
version_d = {v.version:v.time_stamp for v in versions}
#version_d = get_materialization_versions(dataset_name=dataset_name)
versions = np.array([v for v in version_d.keys()], dtype=np.uint32)
materialization_version = int(np.max(versions))
sqlalchemy_database_uri = build_database_uri(sqlalchemy_database_uri, dataset_name, materialization_version)
if verbose == True:
print('Using URI: {}'.format(sqlalchemy_database_uri))
self._dataset_name = dataset_name
self._materialization_version = materialization_version
self._annotation_endpoint = annotation_endpoint
self._sqlalchemy_database_uri = sqlalchemy_database_uri
self._models = {}
self._models["cellsegment"] = em_models.make_cell_segment_model(
dataset_name, version=self.materialization_version)
self._sqlalchemy_engine = create_engine(sqlalchemy_database_uri,
echo=verbose)
em_models.Base.metadata.create_all(self.sqlalchemy_engine)
self._sqlalchemy_session = sessionmaker(bind=self.sqlalchemy_engine)
self._this_sqlalchemy_session = None
@property
def dataset_name(self):
return self._dataset_name
@property
def materialization_version(self):
return self._materialization_version
@property
def sqlalchemy_database_uri(self):
return self._sqlalchemy_database_uri
@property
def sqlalchemy_engine(self):
return self._sqlalchemy_engine
@property
def sqlalchemy_session(self):
return self._sqlalchemy_session
@property
def this_sqlalchemy_base_session(self):
if self._this_sqlalchemy_base_session is None:
self._this_sqlalchemy_base_session = self._base_sqlalchemy_session()
return self._this_sqlalchemy_base_session
@property
def this_sqlalchemy_session(self):
if self._this_sqlalchemy_session is None:
self._this_sqlalchemy_session = self.sqlalchemy_session()
return self._this_sqlalchemy_session
def model(self, table_name, is_synapse_compartment=False):
""" Returns annotation model for table
:param table_name: str
:return: em annotation model
"""
if not is_synapse_compartment and self._add_annotation_model(table_name):
return self._models[table_name]
# elif is_synapse_compartment and self._add_synapse_compartment_model():
# return self.
else:
raise Exception("Could not make annotation model")
def _add_synapse_compartment_model(self, synapse_table_name,
table_name="postsynapsecompartment"):
if table_name in self._models:
print('Model name \'{}\' already exists'.format(table_name))
return True
try:
self._models[table_name] = em_mesh_models.make_post_synaptic_compartment_model(
dataset=self.dataset_name, synapse_table=synapse_table_name,
version=self.materialization_version)
return True
except Exception as e:
print(e)
return False
def _add_annotation_model(self, table_name):
""" Loads database model for an annotation schema
Args:
table_name: Table name for the database
"""
if table_name in self._models:
return True
av = self.this_sqlalchemy_base_session.query(em_models.AnalysisVersion)\
.filter(em_models.AnalysisVersion.version == self._materialization_version).first()
base_query=self.this_sqlalchemy_base_session.query(em_models.AnalysisTable)
base_query=base_query.filter(em_models.AnalysisTable.analysisversion == av)
base_query=base_query.filter(em_models.AnalysisTable.tablename == table_name)
schema = base_query.first()
schema_name = schema.schema
if schema_name is None:
schema_name = get_annotation_info(self.dataset_name, table_name,
self._annotation_endpoint)
try:
self._models[table_name] = em_models.make_annotation_model(
dataset=self.dataset_name, annotation_type=schema_name,
table_name=table_name, version=self.materialization_version)
if schema_name == 'synapse':
self._add_synapse_compartment_model(synapse_table_name=table_name)
return True
except Exception as e:
print(e)
return False
def _make_query(self, query_args, join_args=None, filter_args=None,
select_columns=None):
"""Constructs a query object with selects, joins, and filters
Args:
query_args: Iterable of objects to query
join_args: Iterable of objects to set as a join (optional)
filter_args: Iterable of iterables
select_columns: None or Iterable of str
Returns:
SQLAchemy query object
"""
query = self.this_sqlalchemy_session.query(*query_args)
if join_args is not None:
query = query.join(*join_args, full=True)
if filter_args is not None:
for f in filter_args:
query = query.filter(*f)
if select_columns is not None:
query = query.with_entities(*select_columns)
return query
def _execute_query(self, query, fix_wkb=True, fix_decimal=True, index_col=None):
""" Query the database and make a dataframe out of the results
Args:
query: SQLAlchemy query object
fix_wkb: Boolean to turn wkb objects into numpy arrays (optional, default is True)
index_col: None or str
Returns:
Dataframe with query results
"""
df = pd.read_sql(query.statement, self.sqlalchemy_engine,
coerce_float=False, index_col=index_col)
if fix_wkb:
df = fix_wkb_columns(df)
if fix_decimal:
df = fix_decimal_columns(df)
return df
def _query(self, query_args, join_args=None, filter_args=None,
select_columns=None, fix_wkb=True, index_col=None):
""" Wraps make_query and execute_query in one function
:param query_args:
:param join_args:
:param filter_args:
:param select_columns:
:param fix_wkb:
:param index_col:
:return:
"""
query = self._make_query(query_args=query_args,
join_args=join_args,
filter_args=filter_args,
select_columns=select_columns)
df = self._execute_query(query=query, fix_wkb=fix_wkb,
index_col=index_col)
return df
|
AnalysisDataLink
|
/AnalysisDataLink-0.4.1.tar.gz/AnalysisDataLink-0.4.1/analysisdatalink/datalink_base.py
|
datalink_base.py
|
import numpy as np
import sqlalchemy
from analysisdatalink import datalink_base
class AnalysisDataLink(datalink_base.AnalysisDataLinkBase):
def __init__(self, dataset_name, materialization_version=None,
sqlalchemy_database_uri=None, verbose=True,
annotation_endpoint=None):
super().__init__(dataset_name, materialization_version,
sqlalchemy_database_uri, verbose=verbose,
annotation_endpoint=annotation_endpoint)
def specific_query(self, tables, filter_in_dict={}, filter_notin_dict={},
filter_equal_dict = {},
select_columns=None):
""" Allows a more narrow query without requiring knowledge about the
underlying data structures
:param tables: list of lists
standard: list of one entry: table_name of table that one wants to
query
join: list of two lists: first entries are table names, second
entries are the columns used for the join
:param filter_in_dict: dict of dicts
outer layer: keys are table names
inner layer: keys are column names, values are entries to filter by
:param filter_notin_dict: dict of dicts
inverse to filter_in_dict
:param select_columns: list of str
:return:
"""
tables = [[table] if not isinstance(table, list) else table
for table in tables]
query_args = [self.model(table[0]) for table in tables]
if len(tables) == 2:
join_args = (self.model(tables[1][0]),
self.model(tables[1][0]).__dict__[tables[1][1]] ==
self.model(tables[0][0]).__dict__[tables[0][1]])
elif len(tables) > 2:
raise Exception("Currently, only single joins are supported")
else:
join_args = None
filter_args = []
for filter_table, filter_table_dict in filter_in_dict.items():
for column_name in filter_table_dict.keys():
filter_values = filter_table_dict[column_name]
filter_values = np.array(filter_values, dtype="O")
filter_args.append((self.model(filter_table).__dict__[column_name].
in_(filter_values), ))
for filter_table, filter_table_dict in filter_notin_dict.items():
for column_name in filter_table_dict.keys():
filter_values = filter_table_dict[column_name]
filter_values = np.array(filter_values, dtype="O")
filter_args.append((sqlalchemy.not_(self.model(filter_table).__dict__[column_name].
in_(filter_values)), ))
for filter_table, filter_table_dict in filter_equal_dict.items():
for column_name in filter_table_dict.keys():
filter_value = filter_table_dict[column_name]
filter_args.append((self.model(filter_table).__dict__[column_name]==filter_value, ))
return self._query(query_args=query_args, filter_args=filter_args,
join_args=join_args, select_columns=select_columns)
|
AnalysisDataLink
|
/AnalysisDataLink-0.4.1.tar.gz/AnalysisDataLink-0.4.1/analysisdatalink/datalink.py
|
datalink.py
|
import torch
from torch.optim.lr_scheduler import ExponentialLR, CyclicLR
from AnalysisG.Settings import Settings
class OptimizerWrapper(Settings):
def __init__(self):
self.Caller = "OPTIMIZER"
Settings.__init__(self)
self.train = False
self._op = None
self._sc = None
self._mod = None
def SetOptimizer(self):
self._pth = self.OutputDirectory + "/" + self.RunName
if len(self.OptimizerParams) == 0:
return False
if self.Optimizer == "ADAM":
self._op = torch.optim.Adam(self._mod.parameters(), **self.OptimizerParams)
elif self.Optimizer == "SDG":
self._op = torch.optim.SGD(self._mod.parameters(), **self.OptimizerParams)
else:
return False
return True
def dump(self):
dct = {}
dct["epoch"] = self.Epoch
dct["optim"] = self._op.state_dict()
if self._sc is not None:
dct["sched"] = self._sc.state_dict()
torch.save(dct, self._pth + "/" + str(self.Epoch) + "/TrainingState.pth")
def load(self):
v = torch.load(self._pth + "/" + str(self.Epoch) + "/TrainingState.pth")
self.Epoch = v["epoch"]
self._op.load_state_dict(v["optim"])
if self._sc is not None:
self._sc.load_state_dict(v["sched"])
return self._pth + " @ " + str(self.Epoch)
def step(self):
if not self.train:
return
self._op.step()
def zero(self):
if not self.train:
return
self._op.zero_grad()
def SetScheduler(self):
self.SchedulerParams["optimizer"] = self._op
if self.Scheduler == "ExponentialLR":
self._sc = ExponentialLR(**self.SchedulerParams)
if self.Scheduler == "CyclicLR":
self._sc = CyclicLR(**self.SchedulerParams)
if len(self.SchedulerParams) == 0:
return False
if self._sc == None:
return False
return True
def stepsc(self):
if self._sc is None:
return
self._sc.step()
|
AnalysisG
|
/Model/Optimizers.py
|
Optimizers.py
|
from AnalysisG.Notification import _ModelWrapper
from .LossFunctions import LossFunctions
from torch_geometric.data import Data
import torch
try:
import pyc.Transform as PT
import pyc.Physics.Cartesian as CT
except:
pass
class ModelWrapper(_ModelWrapper):
def __init__(self, model=None):
self.Caller = "MODEL"
self.Verbose = 3
self.OutputDirectory = None
self.RunName = None
self.Epoch = None
# Mass reconstruction part
self.TruthMode = True if model is None else False
self.Keys = {"pt": "N_pT", "eta": "N_eta", "phi": "N_phi", "e": "N_energy"}
self._Model = model
self._inputs = {}
self._outputs = {}
# Mappings
self.o_mapping = {}
self.i_mapping = {}
self._truth = True
self._train = True
self._GetModelInputs()
self._build()
def __call__(self, data):
self._Model(**{k: getattr(data, k) for k in self.i_mapping})
dc = self._Model.__dict__
pred = {"batch": data.batch, "edge_index": data.edge_index}
if self._truth:
pred.update({self.o_mapping[k]: dc[k] for k in self.o_mapping})
else:
pred.update({k: dc[k] for k in self._outputs})
if not self._truth:
return Data().from_dict(pred), None
loss = {
o[2:]: self._loss[o[2:]](pred[t], getattr(data, t))
for o, t in zip(self.o_mapping, self.o_mapping.values())
}
self._l = loss
self.Data = data
self.Pred = Data().from_dict(pred)
return self.Pred, loss
def _scan(self, inpt, key):
return {k: inpt[k] for k in inpt if k.startswith(key)}
def _mapping(self, inpt, key):
return {
"O" + k[3:]: k
for k in inpt
if k.startswith(key) and "O" + k[3:] in self._outputs
}
def _inject_tools(self):
self._Model.MassEdgeFeature = self.MassEdgeFeature
self._Model.MassNodeFeature = self.MassNodeFeature
def _GetModelInputs(self):
if self.TruthMode: return
code = self._Model.forward.__code__
self._inputs = {
key: None for key in code.co_varnames[: code.co_argcount] if key != "self"
}
return self._inputs
def _build(self):
if self.TruthMode: return
self._outputs = self._scan(self._Model.__dict__, "O_")
mod = self._Model.__dict__["_modules"]
mod = {i: mod[i] for i in mod}
mod.update(self._Model.__dict__)
c = self._scan(self._Model.__dict__, "C_")
loss = self._scan(mod, "L_")
self._loss = {
l[2:]: LossFunctions(
loss[l], c["C_" + l[2:]] if "C_" + l[2:] in c else False
)
for l in loss
}
def SampleCompatibility(self, smpl):
self._pth = self.OutputDirectory + "/" + self.RunName
smpl = list(smpl.to_dict())
self.i_mapping = {k: smpl[smpl.index(k)] for k in self._inputs if k in smpl}
self.o_mapping = self._mapping(smpl, "E_T_")
self.o_mapping.update(self._mapping(smpl, "N_T_"))
self.o_mapping.update(self._mapping(smpl, "G_T_"))
try: self._inject_tools()
except: pass
if not self._iscompatible(): return False
return True
@property
def train(self):
return self._train
@train.setter
def train(self, val):
self._train = val
if self._train: self._Model.train()
else: self._Model.eval()
def dump(self):
out = {"epoch": self.Epoch, "model": self._Model.state_dict()}
torch.save(out, self._pth + "/" + str(self.Epoch) + "/TorchSave.pth")
def load(self):
lib = torch.load(self._pth + "/" + str(self.Epoch) + "/TorchSave.pth")
self._Model.load_state_dict(lib["model"])
self._Model.eval()
return self._pth + " @ " + str(self.Epoch)
def backward(self):
loss = sum([self._l[x]["loss"] for x in self._l])
if self._train: loss.backward()
return loss
@property
def device(self):
return self._Model.device
@device.setter
def device(self, val):
self._Model = self._Model.to(val)
def _switch(self, sample, pred):
shape = pred.size()
if shape[1] > 1: pred = pred.max(1)[1]
pred = pred.view(-1)
if shape[0] == sample.edge_index.size()[1]:
return self.MassEdgeFeature(sample, pred).tolist()
elif shape[0] == sample.num_nodes:
return self.MassNodeFeature(sample, pred).tolist()
else:
return []
def _debatch(self, inpt, sample):
btch = inpt.batch.unique()
smples = [sample.subgraph(sample.batch == b) for b in btch]
inpt = [inpt.subgraph(inpt.batch == b) for b in btch]
return smples, inpt
@property
def mass(self):
data = self.Data if self.TruthMode else self.Pred
sample, pred = self._debatch(data, self.Data)
return [
{o[2:]: self._switch(j, i[self.o_mapping[o]]) for o in self.o_mapping}
for i, j in zip(pred, sample)
]
def __SummingNodes(self):
try:
Pmu = {i: self._data[self.Keys[i]] for i in self.Keys}
except TypeError:
Pmu = {i: getattr(self._data, self.Keys[i]) for i in self.Keys}
Pmu = torch.cat(list(Pmu.values()), dim= -1)
Pmu = PT.PxPyPzE(Pmu)
# Get the prediction of the sample and extract from the topology the number of unique classes
edge_index = self._data.edge_index
edge_index_r = edge_index[0][self._mask == True]
edge_index_s = edge_index[1][self._mask == True]
# Weird floating point inaccuracy. When using Unique, the values seem to change slightly
Pmu = Pmu.to(dtype=torch.long)
Pmu_n = torch.zeros(Pmu.shape, device=Pmu.device, dtype=torch.long)
Pmu_n.index_add_(0, edge_index_r, Pmu[edge_index_s])
# Make sure to find self loops - Avoid double counting
excluded_self = edge_index[1] == edge_index[0]
excluded_self[excluded_self] = False
excluded_self[self._mask == True] = False
Pmu_n[edge_index[0][excluded_self]] += Pmu[edge_index[1][excluded_self]]
Pmu_n = (Pmu_n / 1000).to(dtype=torch.long)
Pmu_n = torch.unique(Pmu_n, dim=0)
Pmu_n = CT.M(Pmu_n).view(-1)
return Pmu_n[Pmu_n > 0]
def MassNodeFeature(self, Sample, pred, excl_zero=True):
self._data = Sample
if excl_zero:
self._mask = (
pred[self._data.edge_index[0]] * pred[self._data.edge_index[1]] > 0
)
else:
self._mask = (
pred[self._data.edge_index[0]] == pred[self._data.edge_index[1]]
)
return self.__SummingNodes()
def MassEdgeFeature(self, Sample, pred):
self._data = Sample
self._mask = pred == 1
return self.__SummingNodes()
def ClosestParticle(self, tru, pred):
res = []
if len(tru) == 0: return res
if len(pred) == 0: return pred
p = pred.pop(0)
max_tru, min_tru = max(tru), min(tru)
col = True if p <= max_tru and p >= min_tru else False
if col == False:
if len(pred) == 0: return res
return self.ClosestParticle(tru, pred)
diff = [abs(p - t) for t in tru]
tru.pop(diff.index(min(diff)))
res += self.ClosestParticle(tru, pred)
res.append(p)
return res
def ParticleEfficiency(self):
tmp = self.TruthMode
self.TruthMode = True
t = self.mass
self.TruthMode = False
p = self.mass
output = []
for b in range(len(t)):
out = {}
for f in t[b]:
pred, truth = p[b][f], t[b][f]
pred = self.ClosestParticle(truth, pred)
p_l, t_l = len(pred), len(truth)
out[f] = {
"%": float(p_l / (t_l if t_l != 0 else 1)) * 100,
"nrec": p_l,
"ntru": t_l,
}
output.append(out)
self.TruthMode = tmp
return output
|
AnalysisG
|
/Model/Model.py
|
Model.py
|
import torch
from torchmetrics.classification import MulticlassAccuracy
class LossFunctions:
def __init__(self, _loss, _class=False):
self._loss = _loss
self._class = _class
if _loss == "CEL":
self.CrossEntropyLoss()
elif _loss == "MSEL":
self.MeanSquareErrorLoss()
elif _loss == "HEL":
self.HingeEmbeddingLoss()
elif _loss == "KLD":
self.KLDivergenceLoss()
else:
self.NoDefault()
if self._class:
self._class = self.ToDigit
@property
def loss(self):
t, p = self._func(self.truth, self.pred)
return self._loss(p, t)
@property
def accuracy(self):
truth, pred = self.truth.clone().to("cpu"), self.pred.clone().detach().to("cpu")
return self._acc(truth, pred)
def ToDigit(self, inpt):
return torch.round(inpt)
def CrossEntropyLoss(self):
def accuracyfunction(truth, pred):
acc = MulticlassAccuracy(num_classes=pred.size()[1])
return 100 * acc(pred.max(1)[1].view(-1), truth.view(-1))
def funct(truth, pred):
return truth.view(-1).to(dtype=torch.long), pred
self._loss = torch.nn.CrossEntropyLoss()
self._func = funct
self._acc = accuracyfunction
self._class = False
def MeanSquareErrorLoss(self):
def accuracyfunction(truth, pred):
return truth.view(-1) - pred.view(-1)
def funct(truth, pred):
return truth.view(-1).to(dtype=torch.float), pred.view(-1)
self._loss = torch.nn.MSELoss()
self._func = funct
self._acc = accuracyfunction
def HingeEmbeddingLoss(self):
def funct(truth, pred):
return truth, pred
def accuracyfunction(truth, pred):
return self.loss
self._loss = torch.nn.HingeEmbeddingLoss()
self._func = funct
self._acc = accuracyfunction
def KLDivergenceLoss(self):
def funct(truth, pred):
return truth, pred
def accuracyfunction(truth, pred):
return self.loss
self._loss = torch.nn.HingeEmbeddingLoss()
self._func = funct
self._acc = accuracyfunction
def NoDefault(self):
def funct(truth, pred):
return truth, pred
def accuracyfunction(truth, pred):
return self.loss
self._func = funct
self._acc = accuracyfunction
def __call__(self, pred, truth):
self.pred, self.truth = pred, truth
if self._class == False:
pass
else:
self.pred = self._class(self.pred)
loss = self.loss
return {"loss": loss, "acc": self.accuracy}
|
AnalysisG
|
/Model/LossFunctions.py
|
LossFunctions.py
|
'use strict'
$(document).ready(function () {
var load_chat = function () {
$.getJSON("output/module_dependencies_repr.json", function (module_dependencies_repr) {
console.log(module_dependencies_repr);
var nodes = [];
var edges = [];
for (var i = 0; i < module_dependencies_repr.length; i++) {
var module_dependency_item = module_dependencies_repr[i];
var module_name = module_dependency_item.name;
var module_path = module_dependency_item.path;
var module_dependencies = module_dependency_item.dependencies;
nodes.push(
{data: {id: module_name, name: module_name}}
);
for (var j = 0; j < module_dependencies.length; j++) {
edges.push({
data: {
source: module_name,
target: module_dependencies[j]
}
})
}
}
draw_chat(nodes, edges);
});
function draw_chat(nodes, edges) {
var cy = cytoscape({
container: $('#cy'),
boxSelectionEnabled: false,
autounselectify: false,
selectionType: "additive",
style: cytoscape.stylesheet()
.selector('node')
.css({
'content': 'data(name)',
'text-valign': 'center',
'color': 'white',
'background-opacity': 0.7,
'background-color': '#0099CC',
'text-outline-color': '#0099CC'
})
.selector('edge')
.css({
'curve-style': 'bezier',
'target-arrow-shape': 'triangle',
'target-arrow-color': '#CCCCCC',
'line-color': '#CCCCCC',
'width': 0.5
})
.selector(':selected')
.css({
'background-color': '#CCFF66',
'line-color': '#CCFF66',
'target-arrow-color': '#CCFF66',
'source-arrow-color': '#CCFF66'
})
.selector('.faded')
.css({
'opacity': 1.0,
'text-opacity': 0
}),
elements: {
nodes: nodes,
edges: edges
},
layout: {
name: 'cose',
padding: 10
}
});
function clearSelection() {
var eles = cy.elements();
for (var j = 0; j < eles.length; j++) {
eles[j].unselect();
}
}
cy.on('tap', 'node', function (e) {
clearSelection();
var node = e.target;
var neighborhood_dependencies = node.neighborhood("edge[source = \"" + node.data("id") + "\"]");
console.log(neighborhood_dependencies);
for (var i = 0; i < neighborhood_dependencies.length; i++) {
var neighborhood_node = cy.$("#" + neighborhood_dependencies[i].data("target"));
neighborhood_dependencies[i].select();
neighborhood_node.select();
}
});
cy.on("tap", "edge", function (e) {
clearSelection();
})
}
};
load_chat();
});
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/gui/dependency_shower.js
|
dependency_shower.js
|
import ConfigParser
import json
from analysis_project_dependencies.analysis_module import analysis_javas
class MFile():
def __init__(self, local_path):
self.local_path = local_path
def get_local_path(self):
return self.local_path
class JavaMFile(MFile):
def __init__(self, path, package, clz_name, imports):
MFile.__init__(self, path)
self.package = package
self.clz_name = clz_name
self.imports = imports
def get_clz_name(self):
return self.clz_name
def get_pkg(self):
return self.package
def get_imports(self):
return self.imports
def __repr__(self):
return "%s : %s (%s)" % (self.package, self.clz_name, self.imports)
class Module():
def __init__(self, name, local_path):
self.name = name
self.local_path = local_path
def get_name(self):
return self.name
def get_local_path(self):
return self.local_path
class Dependency():
def __init__(self):
self.dependencies = []
def append_dependencies(self, java_module):
self.dependencies.append(java_module)
def set_dependencies(self, java_modules):
self.dependencies = java_modules
class JavaRootModule(Module):
def __init__(self, name, local_path):
Module.__init__(self, name, local_path)
class JavaModule(Module, Dependency):
def __init__(self, name, local_path):
Module.__init__(self, name, local_path)
Dependency.__init__(self)
self.java_mfiles = []
def analysis_self(self):
self.java_mfiles = []
print "\n[%s] analysis_self begin >>>>>>\n" % str(self.name)
for path, package, class_name, imports in analysis_javas(self.local_path):
print "file path:%s" % path
print " class:%s.%s" % (package, class_name)
print " imports:%s" % imports
self.java_mfiles.append(JavaMFile(path, package, class_name, imports))
print "\n<<<<<< [%s] analysis_self end\n" % str(self.name)
def get_java_mfiles(self):
return self.java_mfiles
def analysis_dependency(self, java_modules):
print "\n[%s] analysis_dependency begin >>>>>>\n" % str(self.name)
self.set_dependencies([])
for java_module in java_modules:
for mfile in java_module.get_java_mfiles():
result, current_mfile = JavaModule.mfile_include_pkg(self.java_mfiles, mfile.get_pkg(),
mfile.get_clz_name())
if result:
self.append_dependencies(java_module)
print "File [%s.%s] is dependent on file [%s.%s] of module [%s]" % (
current_mfile.get_pkg(), current_mfile.get_clz_name(), mfile.get_pkg(), mfile.get_clz_name(),
java_module.get_name())
break
print "\n<<<<<< [%s] analysis_dependency end\n" % str(self.name)
@staticmethod
def mfile_include_pkg(java_mfiles, pkg, java_clz):
for mfile in java_mfiles:
if mfile.get_imports().__contains__(pkg + "." + java_clz):
return True, mfile
return False, None
def __repr__(self):
return json.dumps(self.to_dict())
def to_dict(self):
dependency_repr = []
for java_module in self.dependencies:
dependency_repr.append(java_module.get_name())
mfiles_repr = []
for mfile in self.java_mfiles:
mfiles_repr.append(mfile.get_pkg() + "." + mfile.get_clz_name())
return {
"name": self.name,
"path": self.local_path,
"includes": mfiles_repr,
"dependencies": dependency_repr
}
def to_simple_dict(self):
dependency_repr = []
for java_module in self.dependencies:
dependency_repr.append(java_module.get_name())
return {
"name": self.name,
"path": self.local_path,
"dependencies": dependency_repr
}
def java_analysis():
cp = ConfigParser.SafeConfigParser()
cp.read('analysis_dependencies.conf')
java_module_pairs = cp.items("java_modules")
java_modules = []
for modules_config_pair in java_module_pairs:
java_modules.append(JavaModule(modules_config_pair[0], modules_config_pair[1]))
for java in java_modules:
java.analysis_self()
for index, java in enumerate(java_modules):
m = java_modules[:index]
m.extend(java_modules[index + 1:])
java.analysis_dependency(m)
print "\n\nAnalysis Result >>>>>>"
for java in java_modules:
print java
return java_modules
def java_output(java_modules):
output = []
for j_m in java_modules:
output.append(j_m.to_simple_dict())
with open('output/module_dependencies_repr.json', 'w') as json_file:
json_file.write(json.dumps(output))
if __name__ == "__main__":
java_output(java_analysis())
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/analysis_project_dependencies/modules.py
|
modules.py
|
**Issue type**
<!--
Are you submitting a bug report or a feature request?
When submitting a bug report, check the following:
- The report has a descriptive title.
- The bug still exists in most recent version of the library.
A request for help or a requests for a how-to should be directed to Stack Overflow:
http://stackoverflow.com/questions/tagged/cytoscape.js
-->
<!-- Delete one option -->
Bug report
Feature request
<!-- BUG REPORT : Delete if requesting a feature -->
**Environment info**
- Cytoscape.js version :
- Browser/Node.js & version :
**Current (buggy) behaviour**
<!-- What does the bug do? -->
**Desired behaviour**
<!-- What do you expect Cytoscape.js to do instead? -->
**Minimum steps to reproduce**
<!--
Write out an overview of what you need to do to reproduce the issue.
Fork/clone this JSBin demo and reproduce your issue so that your issue can be addressed quickly:
http://jsbin.com/teworah
If your code to reproduce is only two or three lines, you can write it in the issue instead. Format your code in backtick code blocks like this:
```js
my.code();
```
-->
<!-- END BUG REPORT -->
<!-- FEATURE REQUEST : Delete if reporting a bug -->
**Description of new feature**
<!-- What should the new feature do? For visual features, include an image/mockup of the expected output. -->
**Motivation for new feature**
<!-- Describe your use case for this new feature. -->
<!-- END FEATURE REQUEST -->
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/cytoscape/ISSUE_TEMPLATE.md
|
ISSUE_TEMPLATE.md
|
# Cytoscape.js
[](https://raw.githubusercontent.com/cytoscape/cytoscape.js/master/LICENSE)
[](https://www.npmjs.com/package/cytoscape)
[](https://www.npmjs.com/package/cytoscape)
[](https://travis-ci.org/cytoscape/cytoscape.js)
[](https://travis-ci.org/cytoscape/cytoscape.js)
Graph theory (a.k.a. network) library for analysis and visualisation : [http://js.cytoscape.org](http://js.cytoscape.org)
## Description
Cytoscape.js is a fully featured [graph theory](https://en.wikipedia.org/wiki/Graph_theory) library. Do you need to model and/or visualise relational data, like biological data or social networks? If so, Cytoscape.js is just what you need.
Cytoscape.js contains a graph theory model and an optional renderer to display interactive graphs. This library was designed to make it as easy as possible for programmers and scientists to use graph theory in their apps, whether it's for server-side analysis in a Node.js app or for a rich user interface.
You can get started with Cytoscape.js with one line:
```js
var cy = cytoscape({ elements: myElements, container: myDiv });
```
Learn more about the features of Cytoscape.js by reading [its documentation](http://js.cytoscape.org).
## Documentation
You can find the documentation and downloads on the [project website](http://js.cytoscape.org).
## Roadmap
Future versions of Cytoscape.js are planned in the [milestones of the Github issue tracker](https://github.com/cytoscape/cytoscape.js/milestones). You can use the milestones to see what's currently planned for future releases.
## Contributing to Cytoscape.js
Please refer to [CONTRIBUTING.md](CONTRIBUTING.md).
## Citation
To cite Cytoscape.js in a paper, please cite the Oxford Bioinformatics issue:
*Cytoscape.js: a graph theory library for visualisation and analysis*
Franz M, Lopes CT, Huck G, Dong Y, Sumer O, Bader GD
[Bioinformatics (2016) 32 (2): 309-311 first published online September 28, 2015 doi:10.1093/bioinformatics/btv557 (PDF)](http://bioinformatics.oxfordjournals.org/content/32/2/309)
[PubMed Abstract](http://www.ncbi.nlm.nih.gov/pubmed/26415722)
## Build dependencies
Install `node`, `npm` and `gulp`. Of course, `npm install` before using `gulp`.
## Build instructions
Run `gulp <target>` in the console. The main targets are:
**Building:**
* `build` (default) : build the library
* `clean` : clean the `build` directory
* `watch` : automatically build lib and tests for debugging
* `zip` : build the release ZIP
* `dist` : update the distribution JS for npm, bower, etc.
**Testing:**
* `test` : run the Mocha unit tests
* `lint` : lint the JS sources via eslint
* `benchmark` : run benchmark regression tests
* `benchmark-single` : run benchmarks only for the suite specified in `benchmark/single`
* `sniper` : runs a BioJS sniper server that hosts demos
**Documentation:**
* `docs` : build the documentation template
* `docsmin` : build the documentation template with all resources minified
* `docspub` : build the documentation for publishing (ZIPs, JS refs, etc.)
* `docspush` : push the built documentation to [js.cytoscape.org](http://js.cytoscape.org)
* `unstabledocspush` : push the built documentation to [js.cytoscape.org/unstable](http://js.cytoscape.org/unstable)
## Release instructions
1. Do each backport patch release before the corresponding current release. This ensures that npm lists the current version as the latest one.
1. Make sure the docs are updated with the list of releases in `documentation/md/intro.md`
1. Update the `VERSION` environment variable, e.g. `export VERSION=1.2.3`
1. Confirm JS files pass linting: `gulp lint`
1. Confirm all tests passing: `gulp test`
1. Test the docs and demos with the latest code: `gulp docspub`
1. Build and publish the release: `gulp publish`
## Tests
Mocha tests are found in the [test directory](https://github.com/cytoscape/cytoscape.js/tree/master/test). The tests can be run in the browser or they can be run via Node.js (`gulp test` or `mocha`).
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/cytoscape/README.md
|
README.md
|
yourDiv.style.left = 0;
yourDiv.style.top = 0;
yourDiv.style.width = "100%";
yourDiv.style.height = "100%";
yourDiv.style.position = "absolute";
var cytoscape = require("cytoscape");
var cy = cytoscape({
container: yourDiv,
style: cytoscape.stylesheet()
.selector('node')
.style({
'height': 80,
'width': 80,
'background-fit': 'cover',
'border-color': '#000',
'border-width': 3,
'border-opacity': 0.5
})
.selector('.eating')
.style({
'border-color': 'red'
})
.selector('.eater')
.style({
'border-width': 9
})
.selector('edge')
.style({
'width': 6,
'target-arrow-shape': 'triangle',
'line-color': '#ffaaaa',
'target-arrow-color': '#ffaaaa'
})
.selector('#bird')
.style({
'background-image': 'https://farm8.staticflickr.com/7272/7633179468_3e19e45a0c_b.jpg'
})
.selector('#cat')
.style({
'background-image': 'https://farm2.staticflickr.com/1261/1413379559_412a540d29_b.jpg'
})
.selector('#ladybug')
.style({
'background-image': 'https://farm4.staticflickr.com/3063/2751740612_af11fb090b_b.jpg'
})
.selector('#aphid')
.style({
'background-image': 'https://farm9.staticflickr.com/8316/8003798443_32d01257c8_b.jpg'
})
.selector('#rose')
.style({
'background-image': 'https://farm6.staticflickr.com/5109/5817854163_eaccd688f5_b.jpg'
})
.selector('#grasshopper')
.style({
'background-image': 'https://farm7.staticflickr.com/6098/6224655456_f4c3c98589_b.jpg'
})
.selector('#plant')
.style({
'background-image': 'https://farm1.staticflickr.com/231/524893064_f49a4d1d10_z.jpg'
})
.selector('#wheat')
.style({
'background-image': 'https://farm3.staticflickr.com/2660/3715569167_7e978e8319_b.jpg'
}),
elements: {
nodes: [
{ data: { id: 'cat' } },
{ data: { id: 'bird' } },
{ data: { id: 'ladybug' } },
{ data: { id: 'aphid' } },
{ data: { id: 'rose' } },
{ data: { id: 'grasshopper' } },
{ data: { id: 'plant' } },
{ data: { id: 'wheat' } }
],
edges: [
{ data: { source: 'cat', target: 'bird' } },
{ data: { source: 'bird', target: 'ladybug' } },
{ data: { source: 'bird', target: 'grasshopper' } },
{ data: { source: 'grasshopper', target: 'plant' } },
{ data: { source: 'grasshopper', target: 'wheat' } },
{ data: { source: 'ladybug', target: 'aphid' } },
{ data: { source: 'aphid', target: 'rose' } }
]
},
layout: {
name: 'breadthfirst',
directed: true,
padding: 10
}
}); // cy init
cy.on('tap', 'node', function(){
var nodes = this;
var tapped = nodes;
var food = [];
nodes.addClass('eater');
for(;;){
var connectedEdges = nodes.connectedEdges(function( edge ){
return !edge.target().anySame( nodes );
});
var connectedNodes = connectedEdges.targets();
Array.prototype.push.apply( food, connectedNodes );
nodes = connectedNodes;
if( nodes.empty() ){ break; }
}
var delay = 0;
var duration = 500;
for( var i = food.length - 1; i >= 0; i-- ){ (function(){
var thisFood = food[i];
var eater = thisFood.connectedEdges(function( edge ){
return edge.target().same(thisFood);
}).source();
thisFood.delay( delay, function(){
eater.addClass('eating');
} ).animate({
position: eater.position(),
css: {
'width': 10,
'height': 10,
'border-width': 0,
'opacity': 0
}
}, {
duration: duration,
complete: function(){
thisFood.remove();
}
});
delay += duration;
})(); } // for
}); // on tap
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/cytoscape/snippets/images.js
|
images.js
|
yourDiv.style.left = 0;
yourDiv.style.top = 0;
yourDiv.style.width = "100%";
yourDiv.style.height = "100%";
yourDiv.style.position = "absolute";
var cytoscape = require("cytoscape");
var cy = cytoscape({
container: yourDiv,
layout: {
name: 'cose',
padding: 10
},
style: cytoscape.stylesheet()
.selector('node')
.style({
'shape': 'data(faveShape)',
'width': 'mapData(weight, 40, 80, 20, 60)',
'content': 'data(name)',
'text-valign': 'center',
'text-outline-width': 2,
'text-outline-color': 'data(faveColor)',
'background-color': 'data(faveColor)',
'color': '#fff'
})
.selector(':selected')
.style({
'border-width': 3,
'border-color': '#333'
})
.selector('edge')
.style({
'opacity': 0.666,
'width': 'mapData(strength, 70, 100, 2, 6)',
'target-arrow-shape': 'triangle',
'source-arrow-shape': 'circle',
'line-color': 'data(faveColor)',
'source-arrow-color': 'data(faveColor)',
'target-arrow-color': 'data(faveColor)'
})
.selector('edge.questionable')
.style({
'line-style': 'dotted',
'target-arrow-shape': 'diamond'
})
.selector('.faded')
.style({
'opacity': 0.25,
'text-opacity': 0
}),
elements: {
nodes: [
{ data: { id: 'j', name: 'Jerry', weight: 65, faveColor: '#6FB1FC', faveShape: 'triangle' } },
{ data: { id: 'e', name: 'Elaine', weight: 45, faveColor: '#EDA1ED', faveShape: 'ellipse' } },
{ data: { id: 'k', name: 'Kramer', weight: 75, faveColor: '#86B342', faveShape: 'octagon' } },
{ data: { id: 'g', name: 'George', weight: 70, faveColor: '#F5A45D', faveShape: 'rectangle' } }
],
edges: [
{ data: { source: 'j', target: 'e', faveColor: '#6FB1FC', strength: 90 } },
{ data: { source: 'j', target: 'k', faveColor: '#6FB1FC', strength: 70 } },
{ data: { source: 'j', target: 'g', faveColor: '#6FB1FC', strength: 80 } },
{ data: { source: 'e', target: 'j', faveColor: '#EDA1ED', strength: 95 } },
{ data: { source: 'e', target: 'k', faveColor: '#EDA1ED', strength: 60 }, classes: 'questionable' },
{ data: { source: 'k', target: 'j', faveColor: '#86B342', strength: 100 } },
{ data: { source: 'k', target: 'e', faveColor: '#86B342', strength: 100 } },
{ data: { source: 'k', target: 'g', faveColor: '#86B342', strength: 100 } },
{ data: { source: 'g', target: 'j', faveColor: '#F5A45D', strength: 90 } }
]
}
});
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/cytoscape/snippets/visual.js
|
visual.js
|
# jQuery
> jQuery is a fast, small, and feature-rich JavaScript library.
For information on how to get started and how to use jQuery, please see [jQuery's documentation](http://api.jquery.com/).
For source files and issues, please visit the [jQuery repo](https://github.com/jquery/jquery).
If upgrading, please see the [blog post for 3.2.1](https://blog.jquery.com/2017/03/20/jquery-3-2-1-now-available/). This includes notable differences from the previous version and a more readable changelog.
## Including jQuery
Below are some of the most common ways to include jQuery.
### Browser
#### Script tag
```html
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
```
#### Babel
[Babel](http://babeljs.io/) is a next generation JavaScript compiler. One of the features is the ability to use ES6/ES2015 modules now, even though browsers do not yet support this feature natively.
```js
import $ from "jquery";
```
#### Browserify/Webpack
There are several ways to use [Browserify](http://browserify.org/) and [Webpack](https://webpack.github.io/). For more information on using these tools, please refer to the corresponding project's documention. In the script, including jQuery will usually look like this...
```js
var $ = require("jquery");
```
#### AMD (Asynchronous Module Definition)
AMD is a module format built for the browser. For more information, we recommend [require.js' documentation](http://requirejs.org/docs/whyamd.html).
```js
define(["jquery"], function($) {
});
```
### Node
To include jQuery in [Node](nodejs.org), first install with npm.
```sh
npm install jquery
```
For jQuery to work in Node, a window with a document is required. Since no such window exists natively in Node, one can be mocked by tools such as [jsdom](https://github.com/tmpvar/jsdom). This can be useful for testing purposes.
```js
require("jsdom").env("", function(err, window) {
if (err) {
console.error(err);
return;
}
var $ = require("jquery")(window);
});
```
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/README.md
|
README.md
|
define( [
"./core",
"./var/document",
"./var/rnothtmlwhite",
"./ajax/var/location",
"./ajax/var/nonce",
"./ajax/var/rquery",
"./core/init",
"./ajax/parseXML",
"./event/trigger",
"./deferred",
"./serialize" // jQuery.param
], function( jQuery, document, rnothtmlwhite, location, nonce, rquery ) {
"use strict";
var
r20 = /%20/g,
rhash = /#.*$/,
rantiCache = /([?&])_=[^&]*/,
rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg,
// #7653, #8125, #8152: local protocol detection
rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/,
rnoContent = /^(?:GET|HEAD)$/,
rprotocol = /^\/\//,
/* Prefilters
* 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
* 2) These are called:
* - BEFORE asking for a transport
* - AFTER param serialization (s.data is a string if s.processData is true)
* 3) key is the dataType
* 4) the catchall symbol "*" can be used
* 5) execution will start with transport dataType and THEN continue down to "*" if needed
*/
prefilters = {},
/* Transports bindings
* 1) key is the dataType
* 2) the catchall symbol "*" can be used
* 3) selection will start with transport dataType and THEN go to "*" if needed
*/
transports = {},
// Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
allTypes = "*/".concat( "*" ),
// Anchor tag for parsing the document origin
originAnchor = document.createElement( "a" );
originAnchor.href = location.href;
// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
function addToPrefiltersOrTransports( structure ) {
// dataTypeExpression is optional and defaults to "*"
return function( dataTypeExpression, func ) {
if ( typeof dataTypeExpression !== "string" ) {
func = dataTypeExpression;
dataTypeExpression = "*";
}
var dataType,
i = 0,
dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || [];
if ( jQuery.isFunction( func ) ) {
// For each dataType in the dataTypeExpression
while ( ( dataType = dataTypes[ i++ ] ) ) {
// Prepend if requested
if ( dataType[ 0 ] === "+" ) {
dataType = dataType.slice( 1 ) || "*";
( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func );
// Otherwise append
} else {
( structure[ dataType ] = structure[ dataType ] || [] ).push( func );
}
}
}
};
}
// Base inspection function for prefilters and transports
function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) {
var inspected = {},
seekingTransport = ( structure === transports );
function inspect( dataType ) {
var selected;
inspected[ dataType ] = true;
jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) {
var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR );
if ( typeof dataTypeOrTransport === "string" &&
!seekingTransport && !inspected[ dataTypeOrTransport ] ) {
options.dataTypes.unshift( dataTypeOrTransport );
inspect( dataTypeOrTransport );
return false;
} else if ( seekingTransport ) {
return !( selected = dataTypeOrTransport );
}
} );
return selected;
}
return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" );
}
// A special extend for ajax options
// that takes "flat" options (not to be deep extended)
// Fixes #9887
function ajaxExtend( target, src ) {
var key, deep,
flatOptions = jQuery.ajaxSettings.flatOptions || {};
for ( key in src ) {
if ( src[ key ] !== undefined ) {
( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
}
}
if ( deep ) {
jQuery.extend( true, target, deep );
}
return target;
}
/* Handles responses to an ajax request:
* - finds the right dataType (mediates between content-type and expected dataType)
* - returns the corresponding response
*/
function ajaxHandleResponses( s, jqXHR, responses ) {
var ct, type, finalDataType, firstDataType,
contents = s.contents,
dataTypes = s.dataTypes;
// Remove auto dataType and get content-type in the process
while ( dataTypes[ 0 ] === "*" ) {
dataTypes.shift();
if ( ct === undefined ) {
ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" );
}
}
// Check if we're dealing with a known content-type
if ( ct ) {
for ( type in contents ) {
if ( contents[ type ] && contents[ type ].test( ct ) ) {
dataTypes.unshift( type );
break;
}
}
}
// Check to see if we have a response for the expected dataType
if ( dataTypes[ 0 ] in responses ) {
finalDataType = dataTypes[ 0 ];
} else {
// Try convertible dataTypes
for ( type in responses ) {
if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) {
finalDataType = type;
break;
}
if ( !firstDataType ) {
firstDataType = type;
}
}
// Or just use first one
finalDataType = finalDataType || firstDataType;
}
// If we found a dataType
// We add the dataType to the list if needed
// and return the corresponding response
if ( finalDataType ) {
if ( finalDataType !== dataTypes[ 0 ] ) {
dataTypes.unshift( finalDataType );
}
return responses[ finalDataType ];
}
}
/* Chain conversions given the request and the original response
* Also sets the responseXXX fields on the jqXHR instance
*/
function ajaxConvert( s, response, jqXHR, isSuccess ) {
var conv2, current, conv, tmp, prev,
converters = {},
// Work with a copy of dataTypes in case we need to modify it for conversion
dataTypes = s.dataTypes.slice();
// Create converters map with lowercased keys
if ( dataTypes[ 1 ] ) {
for ( conv in s.converters ) {
converters[ conv.toLowerCase() ] = s.converters[ conv ];
}
}
current = dataTypes.shift();
// Convert to each sequential dataType
while ( current ) {
if ( s.responseFields[ current ] ) {
jqXHR[ s.responseFields[ current ] ] = response;
}
// Apply the dataFilter if provided
if ( !prev && isSuccess && s.dataFilter ) {
response = s.dataFilter( response, s.dataType );
}
prev = current;
current = dataTypes.shift();
if ( current ) {
// There's only work to do if current dataType is non-auto
if ( current === "*" ) {
current = prev;
// Convert response if prev dataType is non-auto and differs from current
} else if ( prev !== "*" && prev !== current ) {
// Seek a direct converter
conv = converters[ prev + " " + current ] || converters[ "* " + current ];
// If none found, seek a pair
if ( !conv ) {
for ( conv2 in converters ) {
// If conv2 outputs current
tmp = conv2.split( " " );
if ( tmp[ 1 ] === current ) {
// If prev can be converted to accepted input
conv = converters[ prev + " " + tmp[ 0 ] ] ||
converters[ "* " + tmp[ 0 ] ];
if ( conv ) {
// Condense equivalence converters
if ( conv === true ) {
conv = converters[ conv2 ];
// Otherwise, insert the intermediate dataType
} else if ( converters[ conv2 ] !== true ) {
current = tmp[ 0 ];
dataTypes.unshift( tmp[ 1 ] );
}
break;
}
}
}
}
// Apply converter (if not an equivalence)
if ( conv !== true ) {
// Unless errors are allowed to bubble, catch and return them
if ( conv && s.throws ) {
response = conv( response );
} else {
try {
response = conv( response );
} catch ( e ) {
return {
state: "parsererror",
error: conv ? e : "No conversion from " + prev + " to " + current
};
}
}
}
}
}
}
return { state: "success", data: response };
}
jQuery.extend( {
// Counter for holding the number of active queries
active: 0,
// Last-Modified header cache for next request
lastModified: {},
etag: {},
ajaxSettings: {
url: location.href,
type: "GET",
isLocal: rlocalProtocol.test( location.protocol ),
global: true,
processData: true,
async: true,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
/*
timeout: 0,
data: null,
dataType: null,
username: null,
password: null,
cache: null,
throws: false,
traditional: false,
headers: {},
*/
accepts: {
"*": allTypes,
text: "text/plain",
html: "text/html",
xml: "application/xml, text/xml",
json: "application/json, text/javascript"
},
contents: {
xml: /\bxml\b/,
html: /\bhtml/,
json: /\bjson\b/
},
responseFields: {
xml: "responseXML",
text: "responseText",
json: "responseJSON"
},
// Data converters
// Keys separate source (or catchall "*") and destination types with a single space
converters: {
// Convert anything to text
"* text": String,
// Text to html (true = no transformation)
"text html": true,
// Evaluate text as a json expression
"text json": JSON.parse,
// Parse text as xml
"text xml": jQuery.parseXML
},
// For options that shouldn't be deep extended:
// you can add your own custom options here if
// and when you create one that shouldn't be
// deep extended (see ajaxExtend)
flatOptions: {
url: true,
context: true
}
},
// Creates a full fledged settings object into target
// with both ajaxSettings and settings fields.
// If target is omitted, writes into ajaxSettings.
ajaxSetup: function( target, settings ) {
return settings ?
// Building a settings object
ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) :
// Extending ajaxSettings
ajaxExtend( jQuery.ajaxSettings, target );
},
ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
ajaxTransport: addToPrefiltersOrTransports( transports ),
// Main method
ajax: function( url, options ) {
// If url is an object, simulate pre-1.5 signature
if ( typeof url === "object" ) {
options = url;
url = undefined;
}
// Force options to be an object
options = options || {};
var transport,
// URL without anti-cache param
cacheURL,
// Response headers
responseHeadersString,
responseHeaders,
// timeout handle
timeoutTimer,
// Url cleanup var
urlAnchor,
// Request state (becomes false upon send and true upon completion)
completed,
// To know if global events are to be dispatched
fireGlobals,
// Loop variable
i,
// uncached part of the url
uncached,
// Create the final options object
s = jQuery.ajaxSetup( {}, options ),
// Callbacks context
callbackContext = s.context || s,
// Context for global events is callbackContext if it is a DOM node or jQuery collection
globalEventContext = s.context &&
( callbackContext.nodeType || callbackContext.jquery ) ?
jQuery( callbackContext ) :
jQuery.event,
// Deferreds
deferred = jQuery.Deferred(),
completeDeferred = jQuery.Callbacks( "once memory" ),
// Status-dependent callbacks
statusCode = s.statusCode || {},
// Headers (they are sent all at once)
requestHeaders = {},
requestHeadersNames = {},
// Default abort message
strAbort = "canceled",
// Fake xhr
jqXHR = {
readyState: 0,
// Builds headers hashtable if needed
getResponseHeader: function( key ) {
var match;
if ( completed ) {
if ( !responseHeaders ) {
responseHeaders = {};
while ( ( match = rheaders.exec( responseHeadersString ) ) ) {
responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ];
}
}
match = responseHeaders[ key.toLowerCase() ];
}
return match == null ? null : match;
},
// Raw string
getAllResponseHeaders: function() {
return completed ? responseHeadersString : null;
},
// Caches the header
setRequestHeader: function( name, value ) {
if ( completed == null ) {
name = requestHeadersNames[ name.toLowerCase() ] =
requestHeadersNames[ name.toLowerCase() ] || name;
requestHeaders[ name ] = value;
}
return this;
},
// Overrides response content-type header
overrideMimeType: function( type ) {
if ( completed == null ) {
s.mimeType = type;
}
return this;
},
// Status-dependent callbacks
statusCode: function( map ) {
var code;
if ( map ) {
if ( completed ) {
// Execute the appropriate callbacks
jqXHR.always( map[ jqXHR.status ] );
} else {
// Lazy-add the new callbacks in a way that preserves old ones
for ( code in map ) {
statusCode[ code ] = [ statusCode[ code ], map[ code ] ];
}
}
}
return this;
},
// Cancel the request
abort: function( statusText ) {
var finalText = statusText || strAbort;
if ( transport ) {
transport.abort( finalText );
}
done( 0, finalText );
return this;
}
};
// Attach deferreds
deferred.promise( jqXHR );
// Add protocol if not provided (prefilters might expect it)
// Handle falsy url in the settings object (#10093: consistency with old signature)
// We also use the url parameter if available
s.url = ( ( url || s.url || location.href ) + "" )
.replace( rprotocol, location.protocol + "//" );
// Alias method option to type as per ticket #12004
s.type = options.method || options.type || s.method || s.type;
// Extract dataTypes list
s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ];
// A cross-domain request is in order when the origin doesn't match the current origin.
if ( s.crossDomain == null ) {
urlAnchor = document.createElement( "a" );
// Support: IE <=8 - 11, Edge 12 - 13
// IE throws exception on accessing the href property if url is malformed,
// e.g. http://example.com:80x/
try {
urlAnchor.href = s.url;
// Support: IE <=8 - 11 only
// Anchor's host property isn't correctly set when s.url is relative
urlAnchor.href = urlAnchor.href;
s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !==
urlAnchor.protocol + "//" + urlAnchor.host;
} catch ( e ) {
// If there is an error parsing the URL, assume it is crossDomain,
// it can be rejected by the transport if it is invalid
s.crossDomain = true;
}
}
// Convert data if not already a string
if ( s.data && s.processData && typeof s.data !== "string" ) {
s.data = jQuery.param( s.data, s.traditional );
}
// Apply prefilters
inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
// If request was aborted inside a prefilter, stop there
if ( completed ) {
return jqXHR;
}
// We can fire global events as of now if asked to
// Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118)
fireGlobals = jQuery.event && s.global;
// Watch for a new set of requests
if ( fireGlobals && jQuery.active++ === 0 ) {
jQuery.event.trigger( "ajaxStart" );
}
// Uppercase the type
s.type = s.type.toUpperCase();
// Determine if request has content
s.hasContent = !rnoContent.test( s.type );
// Save the URL in case we're toying with the If-Modified-Since
// and/or If-None-Match header later on
// Remove hash to simplify url manipulation
cacheURL = s.url.replace( rhash, "" );
// More options handling for requests with no content
if ( !s.hasContent ) {
// Remember the hash so we can put it back
uncached = s.url.slice( cacheURL.length );
// If data is available, append data to url
if ( s.data ) {
cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data;
// #9682: remove data so that it's not used in an eventual retry
delete s.data;
}
// Add or update anti-cache param if needed
if ( s.cache === false ) {
cacheURL = cacheURL.replace( rantiCache, "$1" );
uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached;
}
// Put hash and anti-cache on the URL that will be requested (gh-1732)
s.url = cacheURL + uncached;
// Change '%20' to '+' if this is encoded form body content (gh-2658)
} else if ( s.data && s.processData &&
( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) {
s.data = s.data.replace( r20, "+" );
}
// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
if ( s.ifModified ) {
if ( jQuery.lastModified[ cacheURL ] ) {
jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] );
}
if ( jQuery.etag[ cacheURL ] ) {
jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] );
}
}
// Set the correct header, if data is being sent
if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
jqXHR.setRequestHeader( "Content-Type", s.contentType );
}
// Set the Accepts header for the server, depending on the dataType
jqXHR.setRequestHeader(
"Accept",
s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ?
s.accepts[ s.dataTypes[ 0 ] ] +
( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
s.accepts[ "*" ]
);
// Check for headers option
for ( i in s.headers ) {
jqXHR.setRequestHeader( i, s.headers[ i ] );
}
// Allow custom headers/mimetypes and early abort
if ( s.beforeSend &&
( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) {
// Abort if not done already and return
return jqXHR.abort();
}
// Aborting is no longer a cancellation
strAbort = "abort";
// Install callbacks on deferreds
completeDeferred.add( s.complete );
jqXHR.done( s.success );
jqXHR.fail( s.error );
// Get transport
transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
// If no transport, we auto-abort
if ( !transport ) {
done( -1, "No Transport" );
} else {
jqXHR.readyState = 1;
// Send global event
if ( fireGlobals ) {
globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
}
// If request was aborted inside ajaxSend, stop there
if ( completed ) {
return jqXHR;
}
// Timeout
if ( s.async && s.timeout > 0 ) {
timeoutTimer = window.setTimeout( function() {
jqXHR.abort( "timeout" );
}, s.timeout );
}
try {
completed = false;
transport.send( requestHeaders, done );
} catch ( e ) {
// Rethrow post-completion exceptions
if ( completed ) {
throw e;
}
// Propagate others as results
done( -1, e );
}
}
// Callback for when everything is done
function done( status, nativeStatusText, responses, headers ) {
var isSuccess, success, error, response, modified,
statusText = nativeStatusText;
// Ignore repeat invocations
if ( completed ) {
return;
}
completed = true;
// Clear timeout if it exists
if ( timeoutTimer ) {
window.clearTimeout( timeoutTimer );
}
// Dereference transport for early garbage collection
// (no matter how long the jqXHR object will be used)
transport = undefined;
// Cache response headers
responseHeadersString = headers || "";
// Set readyState
jqXHR.readyState = status > 0 ? 4 : 0;
// Determine if successful
isSuccess = status >= 200 && status < 300 || status === 304;
// Get response data
if ( responses ) {
response = ajaxHandleResponses( s, jqXHR, responses );
}
// Convert no matter what (that way responseXXX fields are always set)
response = ajaxConvert( s, response, jqXHR, isSuccess );
// If successful, handle type chaining
if ( isSuccess ) {
// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
if ( s.ifModified ) {
modified = jqXHR.getResponseHeader( "Last-Modified" );
if ( modified ) {
jQuery.lastModified[ cacheURL ] = modified;
}
modified = jqXHR.getResponseHeader( "etag" );
if ( modified ) {
jQuery.etag[ cacheURL ] = modified;
}
}
// if no content
if ( status === 204 || s.type === "HEAD" ) {
statusText = "nocontent";
// if not modified
} else if ( status === 304 ) {
statusText = "notmodified";
// If we have data, let's convert it
} else {
statusText = response.state;
success = response.data;
error = response.error;
isSuccess = !error;
}
} else {
// Extract error from statusText and normalize for non-aborts
error = statusText;
if ( status || !statusText ) {
statusText = "error";
if ( status < 0 ) {
status = 0;
}
}
}
// Set data for the fake xhr object
jqXHR.status = status;
jqXHR.statusText = ( nativeStatusText || statusText ) + "";
// Success/Error
if ( isSuccess ) {
deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
} else {
deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
}
// Status-dependent callbacks
jqXHR.statusCode( statusCode );
statusCode = undefined;
if ( fireGlobals ) {
globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError",
[ jqXHR, s, isSuccess ? success : error ] );
}
// Complete
completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
if ( fireGlobals ) {
globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
// Handle the global AJAX counter
if ( !( --jQuery.active ) ) {
jQuery.event.trigger( "ajaxStop" );
}
}
}
return jqXHR;
},
getJSON: function( url, data, callback ) {
return jQuery.get( url, data, callback, "json" );
},
getScript: function( url, callback ) {
return jQuery.get( url, undefined, callback, "script" );
}
} );
jQuery.each( [ "get", "post" ], function( i, method ) {
jQuery[ method ] = function( url, data, callback, type ) {
// Shift arguments if data argument was omitted
if ( jQuery.isFunction( data ) ) {
type = type || callback;
callback = data;
data = undefined;
}
// The url can be an options object (which then must have .url)
return jQuery.ajax( jQuery.extend( {
url: url,
type: method,
dataType: type,
data: data,
success: callback
}, jQuery.isPlainObject( url ) && url ) );
};
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/ajax.js
|
ajax.js
|
define( [
"./core",
"./var/document",
"./var/rcssNum",
"./var/rnothtmlwhite",
"./css/var/cssExpand",
"./css/var/isHiddenWithinTree",
"./css/var/swap",
"./css/adjustCSS",
"./data/var/dataPriv",
"./css/showHide",
"./core/init",
"./queue",
"./deferred",
"./traversing",
"./manipulation",
"./css",
"./effects/Tween"
], function( jQuery, document, rcssNum, rnothtmlwhite, cssExpand, isHiddenWithinTree, swap,
adjustCSS, dataPriv, showHide ) {
"use strict";
var
fxNow, inProgress,
rfxtypes = /^(?:toggle|show|hide)$/,
rrun = /queueHooks$/;
function schedule() {
if ( inProgress ) {
if ( document.hidden === false && window.requestAnimationFrame ) {
window.requestAnimationFrame( schedule );
} else {
window.setTimeout( schedule, jQuery.fx.interval );
}
jQuery.fx.tick();
}
}
// Animations created synchronously will run synchronously
function createFxNow() {
window.setTimeout( function() {
fxNow = undefined;
} );
return ( fxNow = jQuery.now() );
}
// Generate parameters to create a standard animation
function genFx( type, includeWidth ) {
var which,
i = 0,
attrs = { height: type };
// If we include width, step value is 1 to do all cssExpand values,
// otherwise step value is 2 to skip over Left and Right
includeWidth = includeWidth ? 1 : 0;
for ( ; i < 4; i += 2 - includeWidth ) {
which = cssExpand[ i ];
attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
}
if ( includeWidth ) {
attrs.opacity = attrs.width = type;
}
return attrs;
}
function createTween( value, prop, animation ) {
var tween,
collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ),
index = 0,
length = collection.length;
for ( ; index < length; index++ ) {
if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) {
// We're done with this property
return tween;
}
}
}
function defaultPrefilter( elem, props, opts ) {
var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display,
isBox = "width" in props || "height" in props,
anim = this,
orig = {},
style = elem.style,
hidden = elem.nodeType && isHiddenWithinTree( elem ),
dataShow = dataPriv.get( elem, "fxshow" );
// Queue-skipping animations hijack the fx hooks
if ( !opts.queue ) {
hooks = jQuery._queueHooks( elem, "fx" );
if ( hooks.unqueued == null ) {
hooks.unqueued = 0;
oldfire = hooks.empty.fire;
hooks.empty.fire = function() {
if ( !hooks.unqueued ) {
oldfire();
}
};
}
hooks.unqueued++;
anim.always( function() {
// Ensure the complete handler is called before this completes
anim.always( function() {
hooks.unqueued--;
if ( !jQuery.queue( elem, "fx" ).length ) {
hooks.empty.fire();
}
} );
} );
}
// Detect show/hide animations
for ( prop in props ) {
value = props[ prop ];
if ( rfxtypes.test( value ) ) {
delete props[ prop ];
toggle = toggle || value === "toggle";
if ( value === ( hidden ? "hide" : "show" ) ) {
// Pretend to be hidden if this is a "show" and
// there is still data from a stopped show/hide
if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) {
hidden = true;
// Ignore all other no-op show/hide data
} else {
continue;
}
}
orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop );
}
}
// Bail out if this is a no-op like .hide().hide()
propTween = !jQuery.isEmptyObject( props );
if ( !propTween && jQuery.isEmptyObject( orig ) ) {
return;
}
// Restrict "overflow" and "display" styles during box animations
if ( isBox && elem.nodeType === 1 ) {
// Support: IE <=9 - 11, Edge 12 - 13
// Record all 3 overflow attributes because IE does not infer the shorthand
// from identically-valued overflowX and overflowY
opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
// Identify a display type, preferring old show/hide data over the CSS cascade
restoreDisplay = dataShow && dataShow.display;
if ( restoreDisplay == null ) {
restoreDisplay = dataPriv.get( elem, "display" );
}
display = jQuery.css( elem, "display" );
if ( display === "none" ) {
if ( restoreDisplay ) {
display = restoreDisplay;
} else {
// Get nonempty value(s) by temporarily forcing visibility
showHide( [ elem ], true );
restoreDisplay = elem.style.display || restoreDisplay;
display = jQuery.css( elem, "display" );
showHide( [ elem ] );
}
}
// Animate inline elements as inline-block
if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) {
if ( jQuery.css( elem, "float" ) === "none" ) {
// Restore the original display value at the end of pure show/hide animations
if ( !propTween ) {
anim.done( function() {
style.display = restoreDisplay;
} );
if ( restoreDisplay == null ) {
display = style.display;
restoreDisplay = display === "none" ? "" : display;
}
}
style.display = "inline-block";
}
}
}
if ( opts.overflow ) {
style.overflow = "hidden";
anim.always( function() {
style.overflow = opts.overflow[ 0 ];
style.overflowX = opts.overflow[ 1 ];
style.overflowY = opts.overflow[ 2 ];
} );
}
// Implement show/hide animations
propTween = false;
for ( prop in orig ) {
// General show/hide setup for this element animation
if ( !propTween ) {
if ( dataShow ) {
if ( "hidden" in dataShow ) {
hidden = dataShow.hidden;
}
} else {
dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } );
}
// Store hidden/visible for toggle so `.stop().toggle()` "reverses"
if ( toggle ) {
dataShow.hidden = !hidden;
}
// Show elements before animating them
if ( hidden ) {
showHide( [ elem ], true );
}
/* eslint-disable no-loop-func */
anim.done( function() {
/* eslint-enable no-loop-func */
// The final step of a "hide" animation is actually hiding the element
if ( !hidden ) {
showHide( [ elem ] );
}
dataPriv.remove( elem, "fxshow" );
for ( prop in orig ) {
jQuery.style( elem, prop, orig[ prop ] );
}
} );
}
// Per-property setup
propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim );
if ( !( prop in dataShow ) ) {
dataShow[ prop ] = propTween.start;
if ( hidden ) {
propTween.end = propTween.start;
propTween.start = 0;
}
}
}
}
function propFilter( props, specialEasing ) {
var index, name, easing, value, hooks;
// camelCase, specialEasing and expand cssHook pass
for ( index in props ) {
name = jQuery.camelCase( index );
easing = specialEasing[ name ];
value = props[ index ];
if ( Array.isArray( value ) ) {
easing = value[ 1 ];
value = props[ index ] = value[ 0 ];
}
if ( index !== name ) {
props[ name ] = value;
delete props[ index ];
}
hooks = jQuery.cssHooks[ name ];
if ( hooks && "expand" in hooks ) {
value = hooks.expand( value );
delete props[ name ];
// Not quite $.extend, this won't overwrite existing keys.
// Reusing 'index' because we have the correct "name"
for ( index in value ) {
if ( !( index in props ) ) {
props[ index ] = value[ index ];
specialEasing[ index ] = easing;
}
}
} else {
specialEasing[ name ] = easing;
}
}
}
function Animation( elem, properties, options ) {
var result,
stopped,
index = 0,
length = Animation.prefilters.length,
deferred = jQuery.Deferred().always( function() {
// Don't match elem in the :animated selector
delete tick.elem;
} ),
tick = function() {
if ( stopped ) {
return false;
}
var currentTime = fxNow || createFxNow(),
remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
// Support: Android 2.3 only
// Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497)
temp = remaining / animation.duration || 0,
percent = 1 - temp,
index = 0,
length = animation.tweens.length;
for ( ; index < length; index++ ) {
animation.tweens[ index ].run( percent );
}
deferred.notifyWith( elem, [ animation, percent, remaining ] );
// If there's more to do, yield
if ( percent < 1 && length ) {
return remaining;
}
// If this was an empty animation, synthesize a final progress notification
if ( !length ) {
deferred.notifyWith( elem, [ animation, 1, 0 ] );
}
// Resolve the animation and report its conclusion
deferred.resolveWith( elem, [ animation ] );
return false;
},
animation = deferred.promise( {
elem: elem,
props: jQuery.extend( {}, properties ),
opts: jQuery.extend( true, {
specialEasing: {},
easing: jQuery.easing._default
}, options ),
originalProperties: properties,
originalOptions: options,
startTime: fxNow || createFxNow(),
duration: options.duration,
tweens: [],
createTween: function( prop, end ) {
var tween = jQuery.Tween( elem, animation.opts, prop, end,
animation.opts.specialEasing[ prop ] || animation.opts.easing );
animation.tweens.push( tween );
return tween;
},
stop: function( gotoEnd ) {
var index = 0,
// If we are going to the end, we want to run all the tweens
// otherwise we skip this part
length = gotoEnd ? animation.tweens.length : 0;
if ( stopped ) {
return this;
}
stopped = true;
for ( ; index < length; index++ ) {
animation.tweens[ index ].run( 1 );
}
// Resolve when we played the last frame; otherwise, reject
if ( gotoEnd ) {
deferred.notifyWith( elem, [ animation, 1, 0 ] );
deferred.resolveWith( elem, [ animation, gotoEnd ] );
} else {
deferred.rejectWith( elem, [ animation, gotoEnd ] );
}
return this;
}
} ),
props = animation.props;
propFilter( props, animation.opts.specialEasing );
for ( ; index < length; index++ ) {
result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts );
if ( result ) {
if ( jQuery.isFunction( result.stop ) ) {
jQuery._queueHooks( animation.elem, animation.opts.queue ).stop =
jQuery.proxy( result.stop, result );
}
return result;
}
}
jQuery.map( props, createTween, animation );
if ( jQuery.isFunction( animation.opts.start ) ) {
animation.opts.start.call( elem, animation );
}
// Attach callbacks from options
animation
.progress( animation.opts.progress )
.done( animation.opts.done, animation.opts.complete )
.fail( animation.opts.fail )
.always( animation.opts.always );
jQuery.fx.timer(
jQuery.extend( tick, {
elem: elem,
anim: animation,
queue: animation.opts.queue
} )
);
return animation;
}
jQuery.Animation = jQuery.extend( Animation, {
tweeners: {
"*": [ function( prop, value ) {
var tween = this.createTween( prop, value );
adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween );
return tween;
} ]
},
tweener: function( props, callback ) {
if ( jQuery.isFunction( props ) ) {
callback = props;
props = [ "*" ];
} else {
props = props.match( rnothtmlwhite );
}
var prop,
index = 0,
length = props.length;
for ( ; index < length; index++ ) {
prop = props[ index ];
Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || [];
Animation.tweeners[ prop ].unshift( callback );
}
},
prefilters: [ defaultPrefilter ],
prefilter: function( callback, prepend ) {
if ( prepend ) {
Animation.prefilters.unshift( callback );
} else {
Animation.prefilters.push( callback );
}
}
} );
jQuery.speed = function( speed, easing, fn ) {
var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
complete: fn || !fn && easing ||
jQuery.isFunction( speed ) && speed,
duration: speed,
easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing
};
// Go to the end state if fx are off
if ( jQuery.fx.off ) {
opt.duration = 0;
} else {
if ( typeof opt.duration !== "number" ) {
if ( opt.duration in jQuery.fx.speeds ) {
opt.duration = jQuery.fx.speeds[ opt.duration ];
} else {
opt.duration = jQuery.fx.speeds._default;
}
}
}
// Normalize opt.queue - true/undefined/null -> "fx"
if ( opt.queue == null || opt.queue === true ) {
opt.queue = "fx";
}
// Queueing
opt.old = opt.complete;
opt.complete = function() {
if ( jQuery.isFunction( opt.old ) ) {
opt.old.call( this );
}
if ( opt.queue ) {
jQuery.dequeue( this, opt.queue );
}
};
return opt;
};
jQuery.fn.extend( {
fadeTo: function( speed, to, easing, callback ) {
// Show any hidden elements after setting opacity to 0
return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show()
// Animate to the value specified
.end().animate( { opacity: to }, speed, easing, callback );
},
animate: function( prop, speed, easing, callback ) {
var empty = jQuery.isEmptyObject( prop ),
optall = jQuery.speed( speed, easing, callback ),
doAnimation = function() {
// Operate on a copy of prop so per-property easing won't be lost
var anim = Animation( this, jQuery.extend( {}, prop ), optall );
// Empty animations, or finishing resolves immediately
if ( empty || dataPriv.get( this, "finish" ) ) {
anim.stop( true );
}
};
doAnimation.finish = doAnimation;
return empty || optall.queue === false ?
this.each( doAnimation ) :
this.queue( optall.queue, doAnimation );
},
stop: function( type, clearQueue, gotoEnd ) {
var stopQueue = function( hooks ) {
var stop = hooks.stop;
delete hooks.stop;
stop( gotoEnd );
};
if ( typeof type !== "string" ) {
gotoEnd = clearQueue;
clearQueue = type;
type = undefined;
}
if ( clearQueue && type !== false ) {
this.queue( type || "fx", [] );
}
return this.each( function() {
var dequeue = true,
index = type != null && type + "queueHooks",
timers = jQuery.timers,
data = dataPriv.get( this );
if ( index ) {
if ( data[ index ] && data[ index ].stop ) {
stopQueue( data[ index ] );
}
} else {
for ( index in data ) {
if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
stopQueue( data[ index ] );
}
}
}
for ( index = timers.length; index--; ) {
if ( timers[ index ].elem === this &&
( type == null || timers[ index ].queue === type ) ) {
timers[ index ].anim.stop( gotoEnd );
dequeue = false;
timers.splice( index, 1 );
}
}
// Start the next in the queue if the last step wasn't forced.
// Timers currently will call their complete callbacks, which
// will dequeue but only if they were gotoEnd.
if ( dequeue || !gotoEnd ) {
jQuery.dequeue( this, type );
}
} );
},
finish: function( type ) {
if ( type !== false ) {
type = type || "fx";
}
return this.each( function() {
var index,
data = dataPriv.get( this ),
queue = data[ type + "queue" ],
hooks = data[ type + "queueHooks" ],
timers = jQuery.timers,
length = queue ? queue.length : 0;
// Enable finishing flag on private data
data.finish = true;
// Empty the queue first
jQuery.queue( this, type, [] );
if ( hooks && hooks.stop ) {
hooks.stop.call( this, true );
}
// Look for any active animations, and finish them
for ( index = timers.length; index--; ) {
if ( timers[ index ].elem === this && timers[ index ].queue === type ) {
timers[ index ].anim.stop( true );
timers.splice( index, 1 );
}
}
// Look for any animations in the old queue and finish them
for ( index = 0; index < length; index++ ) {
if ( queue[ index ] && queue[ index ].finish ) {
queue[ index ].finish.call( this );
}
}
// Turn off finishing flag
delete data.finish;
} );
}
} );
jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) {
var cssFn = jQuery.fn[ name ];
jQuery.fn[ name ] = function( speed, easing, callback ) {
return speed == null || typeof speed === "boolean" ?
cssFn.apply( this, arguments ) :
this.animate( genFx( name, true ), speed, easing, callback );
};
} );
// Generate shortcuts for custom animations
jQuery.each( {
slideDown: genFx( "show" ),
slideUp: genFx( "hide" ),
slideToggle: genFx( "toggle" ),
fadeIn: { opacity: "show" },
fadeOut: { opacity: "hide" },
fadeToggle: { opacity: "toggle" }
}, function( name, props ) {
jQuery.fn[ name ] = function( speed, easing, callback ) {
return this.animate( props, speed, easing, callback );
};
} );
jQuery.timers = [];
jQuery.fx.tick = function() {
var timer,
i = 0,
timers = jQuery.timers;
fxNow = jQuery.now();
for ( ; i < timers.length; i++ ) {
timer = timers[ i ];
// Run the timer and safely remove it when done (allowing for external removal)
if ( !timer() && timers[ i ] === timer ) {
timers.splice( i--, 1 );
}
}
if ( !timers.length ) {
jQuery.fx.stop();
}
fxNow = undefined;
};
jQuery.fx.timer = function( timer ) {
jQuery.timers.push( timer );
jQuery.fx.start();
};
jQuery.fx.interval = 13;
jQuery.fx.start = function() {
if ( inProgress ) {
return;
}
inProgress = true;
schedule();
};
jQuery.fx.stop = function() {
inProgress = null;
};
jQuery.fx.speeds = {
slow: 600,
fast: 200,
// Default speed
_default: 400
};
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/effects.js
|
effects.js
|
define( [
"./core",
"./var/rnothtmlwhite"
], function( jQuery, rnothtmlwhite ) {
"use strict";
// Convert String-formatted options into Object-formatted ones
function createOptions( options ) {
var object = {};
jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) {
object[ flag ] = true;
} );
return object;
}
/*
* Create a callback list using the following parameters:
*
* options: an optional list of space-separated options that will change how
* the callback list behaves or a more traditional option object
*
* By default a callback list will act like an event callback list and can be
* "fired" multiple times.
*
* Possible options:
*
* once: will ensure the callback list can only be fired once (like a Deferred)
*
* memory: will keep track of previous values and will call any callback added
* after the list has been fired right away with the latest "memorized"
* values (like a Deferred)
*
* unique: will ensure a callback can only be added once (no duplicate in the list)
*
* stopOnFalse: interrupt callings when a callback returns false
*
*/
jQuery.Callbacks = function( options ) {
// Convert options from String-formatted to Object-formatted if needed
// (we check in cache first)
options = typeof options === "string" ?
createOptions( options ) :
jQuery.extend( {}, options );
var // Flag to know if list is currently firing
firing,
// Last fire value for non-forgettable lists
memory,
// Flag to know if list was already fired
fired,
// Flag to prevent firing
locked,
// Actual callback list
list = [],
// Queue of execution data for repeatable lists
queue = [],
// Index of currently firing callback (modified by add/remove as needed)
firingIndex = -1,
// Fire callbacks
fire = function() {
// Enforce single-firing
locked = locked || options.once;
// Execute callbacks for all pending executions,
// respecting firingIndex overrides and runtime changes
fired = firing = true;
for ( ; queue.length; firingIndex = -1 ) {
memory = queue.shift();
while ( ++firingIndex < list.length ) {
// Run callback and check for early termination
if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false &&
options.stopOnFalse ) {
// Jump to end and forget the data so .add doesn't re-fire
firingIndex = list.length;
memory = false;
}
}
}
// Forget the data if we're done with it
if ( !options.memory ) {
memory = false;
}
firing = false;
// Clean up if we're done firing for good
if ( locked ) {
// Keep an empty list if we have data for future add calls
if ( memory ) {
list = [];
// Otherwise, this object is spent
} else {
list = "";
}
}
},
// Actual Callbacks object
self = {
// Add a callback or a collection of callbacks to the list
add: function() {
if ( list ) {
// If we have memory from a past run, we should fire after adding
if ( memory && !firing ) {
firingIndex = list.length - 1;
queue.push( memory );
}
( function add( args ) {
jQuery.each( args, function( _, arg ) {
if ( jQuery.isFunction( arg ) ) {
if ( !options.unique || !self.has( arg ) ) {
list.push( arg );
}
} else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) {
// Inspect recursively
add( arg );
}
} );
} )( arguments );
if ( memory && !firing ) {
fire();
}
}
return this;
},
// Remove a callback from the list
remove: function() {
jQuery.each( arguments, function( _, arg ) {
var index;
while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
list.splice( index, 1 );
// Handle firing indexes
if ( index <= firingIndex ) {
firingIndex--;
}
}
} );
return this;
},
// Check if a given callback is in the list.
// If no argument is given, return whether or not list has callbacks attached.
has: function( fn ) {
return fn ?
jQuery.inArray( fn, list ) > -1 :
list.length > 0;
},
// Remove all callbacks from the list
empty: function() {
if ( list ) {
list = [];
}
return this;
},
// Disable .fire and .add
// Abort any current/pending executions
// Clear all callbacks and values
disable: function() {
locked = queue = [];
list = memory = "";
return this;
},
disabled: function() {
return !list;
},
// Disable .fire
// Also disable .add unless we have memory (since it would have no effect)
// Abort any pending executions
lock: function() {
locked = queue = [];
if ( !memory && !firing ) {
list = memory = "";
}
return this;
},
locked: function() {
return !!locked;
},
// Call all callbacks with the given context and arguments
fireWith: function( context, args ) {
if ( !locked ) {
args = args || [];
args = [ context, args.slice ? args.slice() : args ];
queue.push( args );
if ( !firing ) {
fire();
}
}
return this;
},
// Call all the callbacks with the given arguments
fire: function() {
self.fireWith( this, arguments );
return this;
},
// To know if the callbacks have already been called at least once
fired: function() {
return !!fired;
}
};
return self;
};
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/callbacks.js
|
callbacks.js
|
define( [
"./core",
"./var/concat",
"./var/push",
"./core/access",
"./manipulation/var/rcheckableType",
"./manipulation/var/rtagName",
"./manipulation/var/rscriptType",
"./manipulation/wrapMap",
"./manipulation/getAll",
"./manipulation/setGlobalEval",
"./manipulation/buildFragment",
"./manipulation/support",
"./data/var/dataPriv",
"./data/var/dataUser",
"./data/var/acceptData",
"./core/DOMEval",
"./core/nodeName",
"./core/init",
"./traversing",
"./selector",
"./event"
], function( jQuery, concat, push, access,
rcheckableType, rtagName, rscriptType,
wrapMap, getAll, setGlobalEval, buildFragment, support,
dataPriv, dataUser, acceptData, DOMEval, nodeName ) {
"use strict";
var
/* eslint-disable max-len */
// See https://github.com/eslint/eslint/issues/3229
rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,
/* eslint-enable */
// Support: IE <=10 - 11, Edge 12 - 13
// In IE/Edge using regex groups here causes severe slowdowns.
// See https://connect.microsoft.com/IE/feedback/details/1736512/
rnoInnerhtml = /<script|<style|<link/i,
// checked="checked" or checked
rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
rscriptTypeMasked = /^true\/(.*)/,
rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;
// Prefer a tbody over its parent table for containing new rows
function manipulationTarget( elem, content ) {
if ( nodeName( elem, "table" ) &&
nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) {
return jQuery( ">tbody", elem )[ 0 ] || elem;
}
return elem;
}
// Replace/restore the type attribute of script elements for safe DOM manipulation
function disableScript( elem ) {
elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type;
return elem;
}
function restoreScript( elem ) {
var match = rscriptTypeMasked.exec( elem.type );
if ( match ) {
elem.type = match[ 1 ];
} else {
elem.removeAttribute( "type" );
}
return elem;
}
function cloneCopyEvent( src, dest ) {
var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events;
if ( dest.nodeType !== 1 ) {
return;
}
// 1. Copy private data: events, handlers, etc.
if ( dataPriv.hasData( src ) ) {
pdataOld = dataPriv.access( src );
pdataCur = dataPriv.set( dest, pdataOld );
events = pdataOld.events;
if ( events ) {
delete pdataCur.handle;
pdataCur.events = {};
for ( type in events ) {
for ( i = 0, l = events[ type ].length; i < l; i++ ) {
jQuery.event.add( dest, type, events[ type ][ i ] );
}
}
}
}
// 2. Copy user data
if ( dataUser.hasData( src ) ) {
udataOld = dataUser.access( src );
udataCur = jQuery.extend( {}, udataOld );
dataUser.set( dest, udataCur );
}
}
// Fix IE bugs, see support tests
function fixInput( src, dest ) {
var nodeName = dest.nodeName.toLowerCase();
// Fails to persist the checked state of a cloned checkbox or radio button.
if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
dest.checked = src.checked;
// Fails to return the selected option to the default selected state when cloning options
} else if ( nodeName === "input" || nodeName === "textarea" ) {
dest.defaultValue = src.defaultValue;
}
}
function domManip( collection, args, callback, ignored ) {
// Flatten any nested arrays
args = concat.apply( [], args );
var fragment, first, scripts, hasScripts, node, doc,
i = 0,
l = collection.length,
iNoClone = l - 1,
value = args[ 0 ],
isFunction = jQuery.isFunction( value );
// We can't cloneNode fragments that contain checked, in WebKit
if ( isFunction ||
( l > 1 && typeof value === "string" &&
!support.checkClone && rchecked.test( value ) ) ) {
return collection.each( function( index ) {
var self = collection.eq( index );
if ( isFunction ) {
args[ 0 ] = value.call( this, index, self.html() );
}
domManip( self, args, callback, ignored );
} );
}
if ( l ) {
fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored );
first = fragment.firstChild;
if ( fragment.childNodes.length === 1 ) {
fragment = first;
}
// Require either new content or an interest in ignored elements to invoke the callback
if ( first || ignored ) {
scripts = jQuery.map( getAll( fragment, "script" ), disableScript );
hasScripts = scripts.length;
// Use the original fragment for the last item
// instead of the first because it can end up
// being emptied incorrectly in certain situations (#8070).
for ( ; i < l; i++ ) {
node = fragment;
if ( i !== iNoClone ) {
node = jQuery.clone( node, true, true );
// Keep references to cloned scripts for later restoration
if ( hasScripts ) {
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( scripts, getAll( node, "script" ) );
}
}
callback.call( collection[ i ], node, i );
}
if ( hasScripts ) {
doc = scripts[ scripts.length - 1 ].ownerDocument;
// Reenable scripts
jQuery.map( scripts, restoreScript );
// Evaluate executable scripts on first document insertion
for ( i = 0; i < hasScripts; i++ ) {
node = scripts[ i ];
if ( rscriptType.test( node.type || "" ) &&
!dataPriv.access( node, "globalEval" ) &&
jQuery.contains( doc, node ) ) {
if ( node.src ) {
// Optional AJAX dependency, but won't run scripts if not present
if ( jQuery._evalUrl ) {
jQuery._evalUrl( node.src );
}
} else {
DOMEval( node.textContent.replace( rcleanScript, "" ), doc );
}
}
}
}
}
}
return collection;
}
function remove( elem, selector, keepData ) {
var node,
nodes = selector ? jQuery.filter( selector, elem ) : elem,
i = 0;
for ( ; ( node = nodes[ i ] ) != null; i++ ) {
if ( !keepData && node.nodeType === 1 ) {
jQuery.cleanData( getAll( node ) );
}
if ( node.parentNode ) {
if ( keepData && jQuery.contains( node.ownerDocument, node ) ) {
setGlobalEval( getAll( node, "script" ) );
}
node.parentNode.removeChild( node );
}
}
return elem;
}
jQuery.extend( {
htmlPrefilter: function( html ) {
return html.replace( rxhtmlTag, "<$1></$2>" );
},
clone: function( elem, dataAndEvents, deepDataAndEvents ) {
var i, l, srcElements, destElements,
clone = elem.cloneNode( true ),
inPage = jQuery.contains( elem.ownerDocument, elem );
// Fix IE cloning issues
if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) &&
!jQuery.isXMLDoc( elem ) ) {
// We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2
destElements = getAll( clone );
srcElements = getAll( elem );
for ( i = 0, l = srcElements.length; i < l; i++ ) {
fixInput( srcElements[ i ], destElements[ i ] );
}
}
// Copy the events from the original to the clone
if ( dataAndEvents ) {
if ( deepDataAndEvents ) {
srcElements = srcElements || getAll( elem );
destElements = destElements || getAll( clone );
for ( i = 0, l = srcElements.length; i < l; i++ ) {
cloneCopyEvent( srcElements[ i ], destElements[ i ] );
}
} else {
cloneCopyEvent( elem, clone );
}
}
// Preserve script evaluation history
destElements = getAll( clone, "script" );
if ( destElements.length > 0 ) {
setGlobalEval( destElements, !inPage && getAll( elem, "script" ) );
}
// Return the cloned set
return clone;
},
cleanData: function( elems ) {
var data, elem, type,
special = jQuery.event.special,
i = 0;
for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) {
if ( acceptData( elem ) ) {
if ( ( data = elem[ dataPriv.expando ] ) ) {
if ( data.events ) {
for ( type in data.events ) {
if ( special[ type ] ) {
jQuery.event.remove( elem, type );
// This is a shortcut to avoid jQuery.event.remove's overhead
} else {
jQuery.removeEvent( elem, type, data.handle );
}
}
}
// Support: Chrome <=35 - 45+
// Assign undefined instead of using delete, see Data#remove
elem[ dataPriv.expando ] = undefined;
}
if ( elem[ dataUser.expando ] ) {
// Support: Chrome <=35 - 45+
// Assign undefined instead of using delete, see Data#remove
elem[ dataUser.expando ] = undefined;
}
}
}
}
} );
jQuery.fn.extend( {
detach: function( selector ) {
return remove( this, selector, true );
},
remove: function( selector ) {
return remove( this, selector );
},
text: function( value ) {
return access( this, function( value ) {
return value === undefined ?
jQuery.text( this ) :
this.empty().each( function() {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
this.textContent = value;
}
} );
}, null, value, arguments.length );
},
append: function() {
return domManip( this, arguments, function( elem ) {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
var target = manipulationTarget( this, elem );
target.appendChild( elem );
}
} );
},
prepend: function() {
return domManip( this, arguments, function( elem ) {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
var target = manipulationTarget( this, elem );
target.insertBefore( elem, target.firstChild );
}
} );
},
before: function() {
return domManip( this, arguments, function( elem ) {
if ( this.parentNode ) {
this.parentNode.insertBefore( elem, this );
}
} );
},
after: function() {
return domManip( this, arguments, function( elem ) {
if ( this.parentNode ) {
this.parentNode.insertBefore( elem, this.nextSibling );
}
} );
},
empty: function() {
var elem,
i = 0;
for ( ; ( elem = this[ i ] ) != null; i++ ) {
if ( elem.nodeType === 1 ) {
// Prevent memory leaks
jQuery.cleanData( getAll( elem, false ) );
// Remove any remaining nodes
elem.textContent = "";
}
}
return this;
},
clone: function( dataAndEvents, deepDataAndEvents ) {
dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
return this.map( function() {
return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
} );
},
html: function( value ) {
return access( this, function( value ) {
var elem = this[ 0 ] || {},
i = 0,
l = this.length;
if ( value === undefined && elem.nodeType === 1 ) {
return elem.innerHTML;
}
// See if we can take a shortcut and just use innerHTML
if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
!wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) {
value = jQuery.htmlPrefilter( value );
try {
for ( ; i < l; i++ ) {
elem = this[ i ] || {};
// Remove element nodes and prevent memory leaks
if ( elem.nodeType === 1 ) {
jQuery.cleanData( getAll( elem, false ) );
elem.innerHTML = value;
}
}
elem = 0;
// If using innerHTML throws an exception, use the fallback method
} catch ( e ) {}
}
if ( elem ) {
this.empty().append( value );
}
}, null, value, arguments.length );
},
replaceWith: function() {
var ignored = [];
// Make the changes, replacing each non-ignored context element with the new content
return domManip( this, arguments, function( elem ) {
var parent = this.parentNode;
if ( jQuery.inArray( this, ignored ) < 0 ) {
jQuery.cleanData( getAll( this ) );
if ( parent ) {
parent.replaceChild( elem, this );
}
}
// Force callback invocation
}, ignored );
}
} );
jQuery.each( {
appendTo: "append",
prependTo: "prepend",
insertBefore: "before",
insertAfter: "after",
replaceAll: "replaceWith"
}, function( name, original ) {
jQuery.fn[ name ] = function( selector ) {
var elems,
ret = [],
insert = jQuery( selector ),
last = insert.length - 1,
i = 0;
for ( ; i <= last; i++ ) {
elems = i === last ? this : this.clone( true );
jQuery( insert[ i ] )[ original ]( elems );
// Support: Android <=4.0 only, PhantomJS 1 only
// .get() because push.apply(_, arraylike) throws on ancient WebKit
push.apply( ret, elems.get() );
}
return this.pushStack( ret );
};
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/manipulation.js
|
manipulation.js
|
define( [
"./core",
"./var/slice",
"./callbacks"
], function( jQuery, slice ) {
"use strict";
function Identity( v ) {
return v;
}
function Thrower( ex ) {
throw ex;
}
function adoptValue( value, resolve, reject, noValue ) {
var method;
try {
// Check for promise aspect first to privilege synchronous behavior
if ( value && jQuery.isFunction( ( method = value.promise ) ) ) {
method.call( value ).done( resolve ).fail( reject );
// Other thenables
} else if ( value && jQuery.isFunction( ( method = value.then ) ) ) {
method.call( value, resolve, reject );
// Other non-thenables
} else {
// Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer:
// * false: [ value ].slice( 0 ) => resolve( value )
// * true: [ value ].slice( 1 ) => resolve()
resolve.apply( undefined, [ value ].slice( noValue ) );
}
// For Promises/A+, convert exceptions into rejections
// Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in
// Deferred#then to conditionally suppress rejection.
} catch ( value ) {
// Support: Android 4.0 only
// Strict mode functions invoked without .call/.apply get global-object context
reject.apply( undefined, [ value ] );
}
}
jQuery.extend( {
Deferred: function( func ) {
var tuples = [
// action, add listener, callbacks,
// ... .then handlers, argument index, [final state]
[ "notify", "progress", jQuery.Callbacks( "memory" ),
jQuery.Callbacks( "memory" ), 2 ],
[ "resolve", "done", jQuery.Callbacks( "once memory" ),
jQuery.Callbacks( "once memory" ), 0, "resolved" ],
[ "reject", "fail", jQuery.Callbacks( "once memory" ),
jQuery.Callbacks( "once memory" ), 1, "rejected" ]
],
state = "pending",
promise = {
state: function() {
return state;
},
always: function() {
deferred.done( arguments ).fail( arguments );
return this;
},
"catch": function( fn ) {
return promise.then( null, fn );
},
// Keep pipe for back-compat
pipe: function( /* fnDone, fnFail, fnProgress */ ) {
var fns = arguments;
return jQuery.Deferred( function( newDefer ) {
jQuery.each( tuples, function( i, tuple ) {
// Map tuples (progress, done, fail) to arguments (done, fail, progress)
var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ];
// deferred.progress(function() { bind to newDefer or newDefer.notify })
// deferred.done(function() { bind to newDefer or newDefer.resolve })
// deferred.fail(function() { bind to newDefer or newDefer.reject })
deferred[ tuple[ 1 ] ]( function() {
var returned = fn && fn.apply( this, arguments );
if ( returned && jQuery.isFunction( returned.promise ) ) {
returned.promise()
.progress( newDefer.notify )
.done( newDefer.resolve )
.fail( newDefer.reject );
} else {
newDefer[ tuple[ 0 ] + "With" ](
this,
fn ? [ returned ] : arguments
);
}
} );
} );
fns = null;
} ).promise();
},
then: function( onFulfilled, onRejected, onProgress ) {
var maxDepth = 0;
function resolve( depth, deferred, handler, special ) {
return function() {
var that = this,
args = arguments,
mightThrow = function() {
var returned, then;
// Support: Promises/A+ section 2.3.3.3.3
// https://promisesaplus.com/#point-59
// Ignore double-resolution attempts
if ( depth < maxDepth ) {
return;
}
returned = handler.apply( that, args );
// Support: Promises/A+ section 2.3.1
// https://promisesaplus.com/#point-48
if ( returned === deferred.promise() ) {
throw new TypeError( "Thenable self-resolution" );
}
// Support: Promises/A+ sections 2.3.3.1, 3.5
// https://promisesaplus.com/#point-54
// https://promisesaplus.com/#point-75
// Retrieve `then` only once
then = returned &&
// Support: Promises/A+ section 2.3.4
// https://promisesaplus.com/#point-64
// Only check objects and functions for thenability
( typeof returned === "object" ||
typeof returned === "function" ) &&
returned.then;
// Handle a returned thenable
if ( jQuery.isFunction( then ) ) {
// Special processors (notify) just wait for resolution
if ( special ) {
then.call(
returned,
resolve( maxDepth, deferred, Identity, special ),
resolve( maxDepth, deferred, Thrower, special )
);
// Normal processors (resolve) also hook into progress
} else {
// ...and disregard older resolution values
maxDepth++;
then.call(
returned,
resolve( maxDepth, deferred, Identity, special ),
resolve( maxDepth, deferred, Thrower, special ),
resolve( maxDepth, deferred, Identity,
deferred.notifyWith )
);
}
// Handle all other returned values
} else {
// Only substitute handlers pass on context
// and multiple values (non-spec behavior)
if ( handler !== Identity ) {
that = undefined;
args = [ returned ];
}
// Process the value(s)
// Default process is resolve
( special || deferred.resolveWith )( that, args );
}
},
// Only normal processors (resolve) catch and reject exceptions
process = special ?
mightThrow :
function() {
try {
mightThrow();
} catch ( e ) {
if ( jQuery.Deferred.exceptionHook ) {
jQuery.Deferred.exceptionHook( e,
process.stackTrace );
}
// Support: Promises/A+ section 2.3.3.3.4.1
// https://promisesaplus.com/#point-61
// Ignore post-resolution exceptions
if ( depth + 1 >= maxDepth ) {
// Only substitute handlers pass on context
// and multiple values (non-spec behavior)
if ( handler !== Thrower ) {
that = undefined;
args = [ e ];
}
deferred.rejectWith( that, args );
}
}
};
// Support: Promises/A+ section 2.3.3.3.1
// https://promisesaplus.com/#point-57
// Re-resolve promises immediately to dodge false rejection from
// subsequent errors
if ( depth ) {
process();
} else {
// Call an optional hook to record the stack, in case of exception
// since it's otherwise lost when execution goes async
if ( jQuery.Deferred.getStackHook ) {
process.stackTrace = jQuery.Deferred.getStackHook();
}
window.setTimeout( process );
}
};
}
return jQuery.Deferred( function( newDefer ) {
// progress_handlers.add( ... )
tuples[ 0 ][ 3 ].add(
resolve(
0,
newDefer,
jQuery.isFunction( onProgress ) ?
onProgress :
Identity,
newDefer.notifyWith
)
);
// fulfilled_handlers.add( ... )
tuples[ 1 ][ 3 ].add(
resolve(
0,
newDefer,
jQuery.isFunction( onFulfilled ) ?
onFulfilled :
Identity
)
);
// rejected_handlers.add( ... )
tuples[ 2 ][ 3 ].add(
resolve(
0,
newDefer,
jQuery.isFunction( onRejected ) ?
onRejected :
Thrower
)
);
} ).promise();
},
// Get a promise for this deferred
// If obj is provided, the promise aspect is added to the object
promise: function( obj ) {
return obj != null ? jQuery.extend( obj, promise ) : promise;
}
},
deferred = {};
// Add list-specific methods
jQuery.each( tuples, function( i, tuple ) {
var list = tuple[ 2 ],
stateString = tuple[ 5 ];
// promise.progress = list.add
// promise.done = list.add
// promise.fail = list.add
promise[ tuple[ 1 ] ] = list.add;
// Handle state
if ( stateString ) {
list.add(
function() {
// state = "resolved" (i.e., fulfilled)
// state = "rejected"
state = stateString;
},
// rejected_callbacks.disable
// fulfilled_callbacks.disable
tuples[ 3 - i ][ 2 ].disable,
// progress_callbacks.lock
tuples[ 0 ][ 2 ].lock
);
}
// progress_handlers.fire
// fulfilled_handlers.fire
// rejected_handlers.fire
list.add( tuple[ 3 ].fire );
// deferred.notify = function() { deferred.notifyWith(...) }
// deferred.resolve = function() { deferred.resolveWith(...) }
// deferred.reject = function() { deferred.rejectWith(...) }
deferred[ tuple[ 0 ] ] = function() {
deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments );
return this;
};
// deferred.notifyWith = list.fireWith
// deferred.resolveWith = list.fireWith
// deferred.rejectWith = list.fireWith
deferred[ tuple[ 0 ] + "With" ] = list.fireWith;
} );
// Make the deferred a promise
promise.promise( deferred );
// Call given func if any
if ( func ) {
func.call( deferred, deferred );
}
// All done!
return deferred;
},
// Deferred helper
when: function( singleValue ) {
var
// count of uncompleted subordinates
remaining = arguments.length,
// count of unprocessed arguments
i = remaining,
// subordinate fulfillment data
resolveContexts = Array( i ),
resolveValues = slice.call( arguments ),
// the master Deferred
master = jQuery.Deferred(),
// subordinate callback factory
updateFunc = function( i ) {
return function( value ) {
resolveContexts[ i ] = this;
resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value;
if ( !( --remaining ) ) {
master.resolveWith( resolveContexts, resolveValues );
}
};
};
// Single- and empty arguments are adopted like Promise.resolve
if ( remaining <= 1 ) {
adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject,
!remaining );
// Use .then() to unwrap secondary thenables (cf. gh-3000)
if ( master.state() === "pending" ||
jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) {
return master.then();
}
}
// Multiple arguments are aggregated like Promise.all array elements
while ( i-- ) {
adoptValue( resolveValues[ i ], updateFunc( i ), master.reject );
}
return master.promise();
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/deferred.js
|
deferred.js
|
define( [
"./core",
"./var/pnum",
"./core/access",
"./css/var/rmargin",
"./var/document",
"./var/rcssNum",
"./css/var/rnumnonpx",
"./css/var/cssExpand",
"./css/var/getStyles",
"./css/var/swap",
"./css/curCSS",
"./css/adjustCSS",
"./css/addGetHookIf",
"./css/support",
"./core/init",
"./core/ready",
"./selector" // contains
], function( jQuery, pnum, access, rmargin, document, rcssNum, rnumnonpx, cssExpand,
getStyles, swap, curCSS, adjustCSS, addGetHookIf, support ) {
"use strict";
var
// Swappable if display is none or starts with table
// except "table", "table-cell", or "table-caption"
// See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
rdisplayswap = /^(none|table(?!-c[ea]).+)/,
rcustomProp = /^--/,
cssShow = { position: "absolute", visibility: "hidden", display: "block" },
cssNormalTransform = {
letterSpacing: "0",
fontWeight: "400"
},
cssPrefixes = [ "Webkit", "Moz", "ms" ],
emptyStyle = document.createElement( "div" ).style;
// Return a css property mapped to a potentially vendor prefixed property
function vendorPropName( name ) {
// Shortcut for names that are not vendor prefixed
if ( name in emptyStyle ) {
return name;
}
// Check for vendor prefixed names
var capName = name[ 0 ].toUpperCase() + name.slice( 1 ),
i = cssPrefixes.length;
while ( i-- ) {
name = cssPrefixes[ i ] + capName;
if ( name in emptyStyle ) {
return name;
}
}
}
// Return a property mapped along what jQuery.cssProps suggests or to
// a vendor prefixed property.
function finalPropName( name ) {
var ret = jQuery.cssProps[ name ];
if ( !ret ) {
ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name;
}
return ret;
}
function setPositiveNumber( elem, value, subtract ) {
// Any relative (+/-) values have already been
// normalized at this point
var matches = rcssNum.exec( value );
return matches ?
// Guard against undefined "subtract", e.g., when used as in cssHooks
Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) :
value;
}
function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) {
var i,
val = 0;
// If we already have the right measurement, avoid augmentation
if ( extra === ( isBorderBox ? "border" : "content" ) ) {
i = 4;
// Otherwise initialize for horizontal or vertical properties
} else {
i = name === "width" ? 1 : 0;
}
for ( ; i < 4; i += 2 ) {
// Both box models exclude margin, so add it if we want it
if ( extra === "margin" ) {
val += jQuery.css( elem, extra + cssExpand[ i ], true, styles );
}
if ( isBorderBox ) {
// border-box includes padding, so remove it if we want content
if ( extra === "content" ) {
val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
}
// At this point, extra isn't border nor margin, so remove border
if ( extra !== "margin" ) {
val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
} else {
// At this point, extra isn't content, so add padding
val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
// At this point, extra isn't content nor padding, so add border
if ( extra !== "padding" ) {
val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
}
}
return val;
}
function getWidthOrHeight( elem, name, extra ) {
// Start with computed style
var valueIsBorderBox,
styles = getStyles( elem ),
val = curCSS( elem, name, styles ),
isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box";
// Computed unit is not pixels. Stop here and return.
if ( rnumnonpx.test( val ) ) {
return val;
}
// Check for style in case a browser which returns unreliable values
// for getComputedStyle silently falls back to the reliable elem.style
valueIsBorderBox = isBorderBox &&
( support.boxSizingReliable() || val === elem.style[ name ] );
// Fall back to offsetWidth/Height when value is "auto"
// This happens for inline elements with no explicit setting (gh-3571)
if ( val === "auto" ) {
val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ];
}
// Normalize "", auto, and prepare for extra
val = parseFloat( val ) || 0;
// Use the active box-sizing model to add/subtract irrelevant styles
return ( val +
augmentWidthOrHeight(
elem,
name,
extra || ( isBorderBox ? "border" : "content" ),
valueIsBorderBox,
styles
)
) + "px";
}
jQuery.extend( {
// Add in style property hooks for overriding the default
// behavior of getting and setting a style property
cssHooks: {
opacity: {
get: function( elem, computed ) {
if ( computed ) {
// We should always get a number back from opacity
var ret = curCSS( elem, "opacity" );
return ret === "" ? "1" : ret;
}
}
}
},
// Don't automatically add "px" to these possibly-unitless properties
cssNumber: {
"animationIterationCount": true,
"columnCount": true,
"fillOpacity": true,
"flexGrow": true,
"flexShrink": true,
"fontWeight": true,
"lineHeight": true,
"opacity": true,
"order": true,
"orphans": true,
"widows": true,
"zIndex": true,
"zoom": true
},
// Add in properties whose names you wish to fix before
// setting or getting the value
cssProps: {
"float": "cssFloat"
},
// Get and set the style property on a DOM Node
style: function( elem, name, value, extra ) {
// Don't set styles on text and comment nodes
if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
return;
}
// Make sure that we're working with the right name
var ret, type, hooks,
origName = jQuery.camelCase( name ),
isCustomProp = rcustomProp.test( name ),
style = elem.style;
// Make sure that we're working with the right name. We don't
// want to query the value if it is a CSS custom property
// since they are user-defined.
if ( !isCustomProp ) {
name = finalPropName( origName );
}
// Gets hook for the prefixed version, then unprefixed version
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// Check if we're setting a value
if ( value !== undefined ) {
type = typeof value;
// Convert "+=" or "-=" to relative numbers (#7345)
if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) {
value = adjustCSS( elem, name, ret );
// Fixes bug #9237
type = "number";
}
// Make sure that null and NaN values aren't set (#7116)
if ( value == null || value !== value ) {
return;
}
// If a number was passed in, add the unit (except for certain CSS properties)
if ( type === "number" ) {
value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" );
}
// background-* props affect original clone's values
if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) {
style[ name ] = "inherit";
}
// If a hook was provided, use that value, otherwise just set the specified value
if ( !hooks || !( "set" in hooks ) ||
( value = hooks.set( elem, value, extra ) ) !== undefined ) {
if ( isCustomProp ) {
style.setProperty( name, value );
} else {
style[ name ] = value;
}
}
} else {
// If a hook was provided get the non-computed value from there
if ( hooks && "get" in hooks &&
( ret = hooks.get( elem, false, extra ) ) !== undefined ) {
return ret;
}
// Otherwise just get the value from the style object
return style[ name ];
}
},
css: function( elem, name, extra, styles ) {
var val, num, hooks,
origName = jQuery.camelCase( name ),
isCustomProp = rcustomProp.test( name );
// Make sure that we're working with the right name. We don't
// want to modify the value if it is a CSS custom property
// since they are user-defined.
if ( !isCustomProp ) {
name = finalPropName( origName );
}
// Try prefixed name followed by the unprefixed name
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// If a hook was provided get the computed value from there
if ( hooks && "get" in hooks ) {
val = hooks.get( elem, true, extra );
}
// Otherwise, if a way to get the computed value exists, use that
if ( val === undefined ) {
val = curCSS( elem, name, styles );
}
// Convert "normal" to computed value
if ( val === "normal" && name in cssNormalTransform ) {
val = cssNormalTransform[ name ];
}
// Make numeric if forced or a qualifier was provided and val looks numeric
if ( extra === "" || extra ) {
num = parseFloat( val );
return extra === true || isFinite( num ) ? num || 0 : val;
}
return val;
}
} );
jQuery.each( [ "height", "width" ], function( i, name ) {
jQuery.cssHooks[ name ] = {
get: function( elem, computed, extra ) {
if ( computed ) {
// Certain elements can have dimension info if we invisibly show them
// but it must have a current display style that would benefit
return rdisplayswap.test( jQuery.css( elem, "display" ) ) &&
// Support: Safari 8+
// Table columns in Safari have non-zero offsetWidth & zero
// getBoundingClientRect().width unless display is changed.
// Support: IE <=11 only
// Running getBoundingClientRect on a disconnected node
// in IE throws an error.
( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ?
swap( elem, cssShow, function() {
return getWidthOrHeight( elem, name, extra );
} ) :
getWidthOrHeight( elem, name, extra );
}
},
set: function( elem, value, extra ) {
var matches,
styles = extra && getStyles( elem ),
subtract = extra && augmentWidthOrHeight(
elem,
name,
extra,
jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
styles
);
// Convert to pixels if value adjustment is needed
if ( subtract && ( matches = rcssNum.exec( value ) ) &&
( matches[ 3 ] || "px" ) !== "px" ) {
elem.style[ name ] = value;
value = jQuery.css( elem, name );
}
return setPositiveNumber( elem, value, subtract );
}
};
} );
jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft,
function( elem, computed ) {
if ( computed ) {
return ( parseFloat( curCSS( elem, "marginLeft" ) ) ||
elem.getBoundingClientRect().left -
swap( elem, { marginLeft: 0 }, function() {
return elem.getBoundingClientRect().left;
} )
) + "px";
}
}
);
// These hooks are used by animate to expand properties
jQuery.each( {
margin: "",
padding: "",
border: "Width"
}, function( prefix, suffix ) {
jQuery.cssHooks[ prefix + suffix ] = {
expand: function( value ) {
var i = 0,
expanded = {},
// Assumes a single number if not a string
parts = typeof value === "string" ? value.split( " " ) : [ value ];
for ( ; i < 4; i++ ) {
expanded[ prefix + cssExpand[ i ] + suffix ] =
parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
}
return expanded;
}
};
if ( !rmargin.test( prefix ) ) {
jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
}
} );
jQuery.fn.extend( {
css: function( name, value ) {
return access( this, function( elem, name, value ) {
var styles, len,
map = {},
i = 0;
if ( Array.isArray( name ) ) {
styles = getStyles( elem );
len = name.length;
for ( ; i < len; i++ ) {
map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles );
}
return map;
}
return value !== undefined ?
jQuery.style( elem, name, value ) :
jQuery.css( elem, name );
}, name, value, arguments.length > 1 );
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/css.js
|
css.js
|
define( [
"./core",
"./core/access",
"./data/var/dataPriv",
"./data/var/dataUser"
], function( jQuery, access, dataPriv, dataUser ) {
"use strict";
// Implementation Summary
//
// 1. Enforce API surface and semantic compatibility with 1.9.x branch
// 2. Improve the module's maintainability by reducing the storage
// paths to a single mechanism.
// 3. Use the same single mechanism to support "private" and "user" data.
// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData)
// 5. Avoid exposing implementation details on user objects (eg. expando properties)
// 6. Provide a clear path for implementation upgrade to WeakMap in 2014
var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,
rmultiDash = /[A-Z]/g;
function getData( data ) {
if ( data === "true" ) {
return true;
}
if ( data === "false" ) {
return false;
}
if ( data === "null" ) {
return null;
}
// Only convert to a number if it doesn't change the string
if ( data === +data + "" ) {
return +data;
}
if ( rbrace.test( data ) ) {
return JSON.parse( data );
}
return data;
}
function dataAttr( elem, key, data ) {
var name;
// If nothing was found internally, try to fetch any
// data from the HTML5 data-* attribute
if ( data === undefined && elem.nodeType === 1 ) {
name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase();
data = elem.getAttribute( name );
if ( typeof data === "string" ) {
try {
data = getData( data );
} catch ( e ) {}
// Make sure we set the data so it isn't changed later
dataUser.set( elem, key, data );
} else {
data = undefined;
}
}
return data;
}
jQuery.extend( {
hasData: function( elem ) {
return dataUser.hasData( elem ) || dataPriv.hasData( elem );
},
data: function( elem, name, data ) {
return dataUser.access( elem, name, data );
},
removeData: function( elem, name ) {
dataUser.remove( elem, name );
},
// TODO: Now that all calls to _data and _removeData have been replaced
// with direct calls to dataPriv methods, these can be deprecated.
_data: function( elem, name, data ) {
return dataPriv.access( elem, name, data );
},
_removeData: function( elem, name ) {
dataPriv.remove( elem, name );
}
} );
jQuery.fn.extend( {
data: function( key, value ) {
var i, name, data,
elem = this[ 0 ],
attrs = elem && elem.attributes;
// Gets all values
if ( key === undefined ) {
if ( this.length ) {
data = dataUser.get( elem );
if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) {
i = attrs.length;
while ( i-- ) {
// Support: IE 11 only
// The attrs elements can be null (#14894)
if ( attrs[ i ] ) {
name = attrs[ i ].name;
if ( name.indexOf( "data-" ) === 0 ) {
name = jQuery.camelCase( name.slice( 5 ) );
dataAttr( elem, name, data[ name ] );
}
}
}
dataPriv.set( elem, "hasDataAttrs", true );
}
}
return data;
}
// Sets multiple values
if ( typeof key === "object" ) {
return this.each( function() {
dataUser.set( this, key );
} );
}
return access( this, function( value ) {
var data;
// The calling jQuery object (element matches) is not empty
// (and therefore has an element appears at this[ 0 ]) and the
// `value` parameter was not undefined. An empty jQuery object
// will result in `undefined` for elem = this[ 0 ] which will
// throw an exception if an attempt to read a data cache is made.
if ( elem && value === undefined ) {
// Attempt to get data from the cache
// The key will always be camelCased in Data
data = dataUser.get( elem, key );
if ( data !== undefined ) {
return data;
}
// Attempt to "discover" the data in
// HTML5 custom data-* attrs
data = dataAttr( elem, key );
if ( data !== undefined ) {
return data;
}
// We tried really hard, but the data doesn't exist.
return;
}
// Set the data...
this.each( function() {
// We always store the camelCased key
dataUser.set( this, key, value );
} );
}, null, value, arguments.length > 1, null, true );
},
removeData: function( key ) {
return this.each( function() {
dataUser.remove( this, key );
} );
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/data.js
|
data.js
|
define( [
"./core",
"./data/var/dataPriv",
"./deferred",
"./callbacks"
], function( jQuery, dataPriv ) {
"use strict";
jQuery.extend( {
queue: function( elem, type, data ) {
var queue;
if ( elem ) {
type = ( type || "fx" ) + "queue";
queue = dataPriv.get( elem, type );
// Speed up dequeue by getting out quickly if this is just a lookup
if ( data ) {
if ( !queue || Array.isArray( data ) ) {
queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
} else {
queue.push( data );
}
}
return queue || [];
}
},
dequeue: function( elem, type ) {
type = type || "fx";
var queue = jQuery.queue( elem, type ),
startLength = queue.length,
fn = queue.shift(),
hooks = jQuery._queueHooks( elem, type ),
next = function() {
jQuery.dequeue( elem, type );
};
// If the fx queue is dequeued, always remove the progress sentinel
if ( fn === "inprogress" ) {
fn = queue.shift();
startLength--;
}
if ( fn ) {
// Add a progress sentinel to prevent the fx queue from being
// automatically dequeued
if ( type === "fx" ) {
queue.unshift( "inprogress" );
}
// Clear up the last queue stop function
delete hooks.stop;
fn.call( elem, next, hooks );
}
if ( !startLength && hooks ) {
hooks.empty.fire();
}
},
// Not public - generate a queueHooks object, or return the current one
_queueHooks: function( elem, type ) {
var key = type + "queueHooks";
return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
empty: jQuery.Callbacks( "once memory" ).add( function() {
dataPriv.remove( elem, [ type + "queue", key ] );
} )
} );
}
} );
jQuery.fn.extend( {
queue: function( type, data ) {
var setter = 2;
if ( typeof type !== "string" ) {
data = type;
type = "fx";
setter--;
}
if ( arguments.length < setter ) {
return jQuery.queue( this[ 0 ], type );
}
return data === undefined ?
this :
this.each( function() {
var queue = jQuery.queue( this, type, data );
// Ensure a hooks for this queue
jQuery._queueHooks( this, type );
if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
jQuery.dequeue( this, type );
}
} );
},
dequeue: function( type ) {
return this.each( function() {
jQuery.dequeue( this, type );
} );
},
clearQueue: function( type ) {
return this.queue( type || "fx", [] );
},
// Get a promise resolved when queues of a certain type
// are emptied (fx is the type by default)
promise: function( type, obj ) {
var tmp,
count = 1,
defer = jQuery.Deferred(),
elements = this,
i = this.length,
resolve = function() {
if ( !( --count ) ) {
defer.resolveWith( elements, [ elements ] );
}
};
if ( typeof type !== "string" ) {
obj = type;
type = undefined;
}
type = type || "fx";
while ( i-- ) {
tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
if ( tmp && tmp.empty ) {
count++;
tmp.empty.add( resolve );
}
}
resolve();
return defer.promise( obj );
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/queue.js
|
queue.js
|
define( [
"./core",
"./var/indexOf",
"./traversing/var/dir",
"./traversing/var/siblings",
"./traversing/var/rneedsContext",
"./core/nodeName",
"./core/init",
"./traversing/findFilter",
"./selector"
], function( jQuery, indexOf, dir, siblings, rneedsContext, nodeName ) {
"use strict";
var rparentsprev = /^(?:parents|prev(?:Until|All))/,
// Methods guaranteed to produce a unique set when starting from a unique set
guaranteedUnique = {
children: true,
contents: true,
next: true,
prev: true
};
jQuery.fn.extend( {
has: function( target ) {
var targets = jQuery( target, this ),
l = targets.length;
return this.filter( function() {
var i = 0;
for ( ; i < l; i++ ) {
if ( jQuery.contains( this, targets[ i ] ) ) {
return true;
}
}
} );
},
closest: function( selectors, context ) {
var cur,
i = 0,
l = this.length,
matched = [],
targets = typeof selectors !== "string" && jQuery( selectors );
// Positional selectors never match, since there's no _selection_ context
if ( !rneedsContext.test( selectors ) ) {
for ( ; i < l; i++ ) {
for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) {
// Always skip document fragments
if ( cur.nodeType < 11 && ( targets ?
targets.index( cur ) > -1 :
// Don't pass non-elements to Sizzle
cur.nodeType === 1 &&
jQuery.find.matchesSelector( cur, selectors ) ) ) {
matched.push( cur );
break;
}
}
}
}
return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched );
},
// Determine the position of an element within the set
index: function( elem ) {
// No argument, return index in parent
if ( !elem ) {
return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1;
}
// Index in selector
if ( typeof elem === "string" ) {
return indexOf.call( jQuery( elem ), this[ 0 ] );
}
// Locate the position of the desired element
return indexOf.call( this,
// If it receives a jQuery object, the first element is used
elem.jquery ? elem[ 0 ] : elem
);
},
add: function( selector, context ) {
return this.pushStack(
jQuery.uniqueSort(
jQuery.merge( this.get(), jQuery( selector, context ) )
)
);
},
addBack: function( selector ) {
return this.add( selector == null ?
this.prevObject : this.prevObject.filter( selector )
);
}
} );
function sibling( cur, dir ) {
while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {}
return cur;
}
jQuery.each( {
parent: function( elem ) {
var parent = elem.parentNode;
return parent && parent.nodeType !== 11 ? parent : null;
},
parents: function( elem ) {
return dir( elem, "parentNode" );
},
parentsUntil: function( elem, i, until ) {
return dir( elem, "parentNode", until );
},
next: function( elem ) {
return sibling( elem, "nextSibling" );
},
prev: function( elem ) {
return sibling( elem, "previousSibling" );
},
nextAll: function( elem ) {
return dir( elem, "nextSibling" );
},
prevAll: function( elem ) {
return dir( elem, "previousSibling" );
},
nextUntil: function( elem, i, until ) {
return dir( elem, "nextSibling", until );
},
prevUntil: function( elem, i, until ) {
return dir( elem, "previousSibling", until );
},
siblings: function( elem ) {
return siblings( ( elem.parentNode || {} ).firstChild, elem );
},
children: function( elem ) {
return siblings( elem.firstChild );
},
contents: function( elem ) {
if ( nodeName( elem, "iframe" ) ) {
return elem.contentDocument;
}
// Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only
// Treat the template element as a regular one in browsers that
// don't support it.
if ( nodeName( elem, "template" ) ) {
elem = elem.content || elem;
}
return jQuery.merge( [], elem.childNodes );
}
}, function( name, fn ) {
jQuery.fn[ name ] = function( until, selector ) {
var matched = jQuery.map( this, fn, until );
if ( name.slice( -5 ) !== "Until" ) {
selector = until;
}
if ( selector && typeof selector === "string" ) {
matched = jQuery.filter( selector, matched );
}
if ( this.length > 1 ) {
// Remove duplicates
if ( !guaranteedUnique[ name ] ) {
jQuery.uniqueSort( matched );
}
// Reverse order for parents* and prev-derivatives
if ( rparentsprev.test( name ) ) {
matched.reverse();
}
}
return this.pushStack( matched );
};
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/traversing.js
|
traversing.js
|
define( [
"./core",
"./core/access",
"./var/document",
"./var/documentElement",
"./css/var/rnumnonpx",
"./css/curCSS",
"./css/addGetHookIf",
"./css/support",
"./core/nodeName",
"./core/init",
"./css",
"./selector" // contains
], function( jQuery, access, document, documentElement, rnumnonpx,
curCSS, addGetHookIf, support, nodeName ) {
"use strict";
jQuery.offset = {
setOffset: function( elem, options, i ) {
var curPosition, curLeft, curCSSTop, curTop, curOffset, curCSSLeft, calculatePosition,
position = jQuery.css( elem, "position" ),
curElem = jQuery( elem ),
props = {};
// Set position first, in-case top/left are set even on static elem
if ( position === "static" ) {
elem.style.position = "relative";
}
curOffset = curElem.offset();
curCSSTop = jQuery.css( elem, "top" );
curCSSLeft = jQuery.css( elem, "left" );
calculatePosition = ( position === "absolute" || position === "fixed" ) &&
( curCSSTop + curCSSLeft ).indexOf( "auto" ) > -1;
// Need to be able to calculate position if either
// top or left is auto and position is either absolute or fixed
if ( calculatePosition ) {
curPosition = curElem.position();
curTop = curPosition.top;
curLeft = curPosition.left;
} else {
curTop = parseFloat( curCSSTop ) || 0;
curLeft = parseFloat( curCSSLeft ) || 0;
}
if ( jQuery.isFunction( options ) ) {
// Use jQuery.extend here to allow modification of coordinates argument (gh-1848)
options = options.call( elem, i, jQuery.extend( {}, curOffset ) );
}
if ( options.top != null ) {
props.top = ( options.top - curOffset.top ) + curTop;
}
if ( options.left != null ) {
props.left = ( options.left - curOffset.left ) + curLeft;
}
if ( "using" in options ) {
options.using.call( elem, props );
} else {
curElem.css( props );
}
}
};
jQuery.fn.extend( {
offset: function( options ) {
// Preserve chaining for setter
if ( arguments.length ) {
return options === undefined ?
this :
this.each( function( i ) {
jQuery.offset.setOffset( this, options, i );
} );
}
var doc, docElem, rect, win,
elem = this[ 0 ];
if ( !elem ) {
return;
}
// Return zeros for disconnected and hidden (display: none) elements (gh-2310)
// Support: IE <=11 only
// Running getBoundingClientRect on a
// disconnected node in IE throws an error
if ( !elem.getClientRects().length ) {
return { top: 0, left: 0 };
}
rect = elem.getBoundingClientRect();
doc = elem.ownerDocument;
docElem = doc.documentElement;
win = doc.defaultView;
return {
top: rect.top + win.pageYOffset - docElem.clientTop,
left: rect.left + win.pageXOffset - docElem.clientLeft
};
},
position: function() {
if ( !this[ 0 ] ) {
return;
}
var offsetParent, offset,
elem = this[ 0 ],
parentOffset = { top: 0, left: 0 };
// Fixed elements are offset from window (parentOffset = {top:0, left: 0},
// because it is its only offset parent
if ( jQuery.css( elem, "position" ) === "fixed" ) {
// Assume getBoundingClientRect is there when computed position is fixed
offset = elem.getBoundingClientRect();
} else {
// Get *real* offsetParent
offsetParent = this.offsetParent();
// Get correct offsets
offset = this.offset();
if ( !nodeName( offsetParent[ 0 ], "html" ) ) {
parentOffset = offsetParent.offset();
}
// Add offsetParent borders
parentOffset = {
top: parentOffset.top + jQuery.css( offsetParent[ 0 ], "borderTopWidth", true ),
left: parentOffset.left + jQuery.css( offsetParent[ 0 ], "borderLeftWidth", true )
};
}
// Subtract parent offsets and element margins
return {
top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ),
left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true )
};
},
// This method will return documentElement in the following cases:
// 1) For the element inside the iframe without offsetParent, this method will return
// documentElement of the parent window
// 2) For the hidden or detached element
// 3) For body or html element, i.e. in case of the html node - it will return itself
//
// but those exceptions were never presented as a real life use-cases
// and might be considered as more preferable results.
//
// This logic, however, is not guaranteed and can change at any point in the future
offsetParent: function() {
return this.map( function() {
var offsetParent = this.offsetParent;
while ( offsetParent && jQuery.css( offsetParent, "position" ) === "static" ) {
offsetParent = offsetParent.offsetParent;
}
return offsetParent || documentElement;
} );
}
} );
// Create scrollLeft and scrollTop methods
jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( method, prop ) {
var top = "pageYOffset" === prop;
jQuery.fn[ method ] = function( val ) {
return access( this, function( elem, method, val ) {
// Coalesce documents and windows
var win;
if ( jQuery.isWindow( elem ) ) {
win = elem;
} else if ( elem.nodeType === 9 ) {
win = elem.defaultView;
}
if ( val === undefined ) {
return win ? win[ prop ] : elem[ method ];
}
if ( win ) {
win.scrollTo(
!top ? val : win.pageXOffset,
top ? val : win.pageYOffset
);
} else {
elem[ method ] = val;
}
}, method, val, arguments.length );
};
} );
// Support: Safari <=7 - 9.1, Chrome <=37 - 49
// Add the top/left cssHooks using jQuery.fn.position
// Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
// Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347
// getComputedStyle returns percent when specified for top/left/bottom/right;
// rather than make the css module depend on the offset module, just check for it here
jQuery.each( [ "top", "left" ], function( i, prop ) {
jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition,
function( elem, computed ) {
if ( computed ) {
computed = curCSS( elem, prop );
// If curCSS returns percentage, fallback to offset
return rnumnonpx.test( computed ) ?
jQuery( elem ).position()[ prop ] + "px" :
computed;
}
}
);
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/offset.js
|
offset.js
|
define( [
"./core",
"./var/document",
"./var/documentElement",
"./var/hasOwn",
"./var/indexOf"
], function( jQuery, document, documentElement, hasOwn, indexOf ) {
"use strict";
/*
* Optional (non-Sizzle) selector module for custom builds.
*
* Note that this DOES NOT SUPPORT many documented jQuery
* features in exchange for its smaller size:
*
* Attribute not equal selector
* Positional selectors (:first; :eq(n); :odd; etc.)
* Type selectors (:input; :checkbox; :button; etc.)
* State-based selectors (:animated; :visible; :hidden; etc.)
* :has(selector)
* :not(complex selector)
* custom selectors via Sizzle extensions
* Leading combinators (e.g., $collection.find("> *"))
* Reliable functionality on XML fragments
* Requiring all parts of a selector to match elements under context
* (e.g., $div.find("div > *") now matches children of $div)
* Matching against non-elements
* Reliable sorting of disconnected nodes
* querySelectorAll bug fixes (e.g., unreliable :focus on WebKit)
*
* If any of these are unacceptable tradeoffs, either use Sizzle or
* customize this stub for the project's specific needs.
*/
var hasDuplicate, sortInput,
sortStable = jQuery.expando.split( "" ).sort( sortOrder ).join( "" ) === jQuery.expando,
matches = documentElement.matches ||
documentElement.webkitMatchesSelector ||
documentElement.mozMatchesSelector ||
documentElement.oMatchesSelector ||
documentElement.msMatchesSelector,
// CSS string/identifier serialization
// https://drafts.csswg.org/cssom/#common-serializing-idioms
rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g,
fcssescape = function( ch, asCodePoint ) {
if ( asCodePoint ) {
// U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
if ( ch === "\0" ) {
return "\uFFFD";
}
// Control characters and (dependent upon position) numbers get escaped as code points
return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
}
// Other potentially-special ASCII characters get backslash-escaped
return "\\" + ch;
};
function sortOrder( a, b ) {
// Flag for duplicate removal
if ( a === b ) {
hasDuplicate = true;
return 0;
}
// Sort on method existence if only one input has compareDocumentPosition
var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
if ( compare ) {
return compare;
}
// Calculate position if both inputs belong to the same document
compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ?
a.compareDocumentPosition( b ) :
// Otherwise we know they are disconnected
1;
// Disconnected nodes
if ( compare & 1 ) {
// Choose the first element that is related to our preferred document
if ( a === document || a.ownerDocument === document &&
jQuery.contains( document, a ) ) {
return -1;
}
if ( b === document || b.ownerDocument === document &&
jQuery.contains( document, b ) ) {
return 1;
}
// Maintain original order
return sortInput ?
( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) :
0;
}
return compare & 4 ? -1 : 1;
}
function uniqueSort( results ) {
var elem,
duplicates = [],
j = 0,
i = 0;
hasDuplicate = false;
sortInput = !sortStable && results.slice( 0 );
results.sort( sortOrder );
if ( hasDuplicate ) {
while ( ( elem = results[ i++ ] ) ) {
if ( elem === results[ i ] ) {
j = duplicates.push( i );
}
}
while ( j-- ) {
results.splice( duplicates[ j ], 1 );
}
}
// Clear input after sorting to release objects
// See https://github.com/jquery/sizzle/pull/225
sortInput = null;
return results;
}
function escape( sel ) {
return ( sel + "" ).replace( rcssescape, fcssescape );
}
jQuery.extend( {
uniqueSort: uniqueSort,
unique: uniqueSort,
escapeSelector: escape,
find: function( selector, context, results, seed ) {
var elem, nodeType,
i = 0;
results = results || [];
context = context || document;
// Same basic safeguard as Sizzle
if ( !selector || typeof selector !== "string" ) {
return results;
}
// Early return if context is not an element or document
if ( ( nodeType = context.nodeType ) !== 1 && nodeType !== 9 ) {
return [];
}
if ( seed ) {
while ( ( elem = seed[ i++ ] ) ) {
if ( jQuery.find.matchesSelector( elem, selector ) ) {
results.push( elem );
}
}
} else {
jQuery.merge( results, context.querySelectorAll( selector ) );
}
return results;
},
text: function( elem ) {
var node,
ret = "",
i = 0,
nodeType = elem.nodeType;
if ( !nodeType ) {
// If no nodeType, this is expected to be an array
while ( ( node = elem[ i++ ] ) ) {
// Do not traverse comment nodes
ret += jQuery.text( node );
}
} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
// Use textContent for elements
return elem.textContent;
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
// Do not include comment or processing instruction nodes
return ret;
},
contains: function( a, b ) {
var adown = a.nodeType === 9 ? a.documentElement : a,
bup = b && b.parentNode;
return a === bup || !!( bup && bup.nodeType === 1 && adown.contains( bup ) );
},
isXMLDoc: function( elem ) {
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = elem && ( elem.ownerDocument || elem ).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
},
expr: {
attrHandle: {},
match: {
bool: new RegExp( "^(?:checked|selected|async|autofocus|autoplay|controls|defer" +
"|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped)$", "i" ),
needsContext: /^[\x20\t\r\n\f]*[>+~]/
}
}
} );
jQuery.extend( jQuery.find, {
matches: function( expr, elements ) {
return jQuery.find( expr, null, null, elements );
},
matchesSelector: function( elem, expr ) {
return matches.call( elem, expr );
},
attr: function( elem, name ) {
var fn = jQuery.expr.attrHandle[ name.toLowerCase() ],
// Don't get fooled by Object.prototype properties (jQuery #13807)
value = fn && hasOwn.call( jQuery.expr.attrHandle, name.toLowerCase() ) ?
fn( elem, name, jQuery.isXMLDoc( elem ) ) :
undefined;
return value !== undefined ? value : elem.getAttribute( name );
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/selector-native.js
|
selector-native.js
|
define( [
"./var/arr",
"./var/document",
"./var/getProto",
"./var/slice",
"./var/concat",
"./var/push",
"./var/indexOf",
"./var/class2type",
"./var/toString",
"./var/hasOwn",
"./var/fnToString",
"./var/ObjectFunctionString",
"./var/support",
"./core/DOMEval"
], function( arr, document, getProto, slice, concat, push, indexOf,
class2type, toString, hasOwn, fnToString, ObjectFunctionString,
support, DOMEval ) {
"use strict";
var
version = "3.2.1",
// Define a local copy of jQuery
jQuery = function( selector, context ) {
// The jQuery object is actually just the init constructor 'enhanced'
// Need init if jQuery is called (just allow error to be thrown if not included)
return new jQuery.fn.init( selector, context );
},
// Support: Android <=4.0 only
// Make sure we trim BOM and NBSP
rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
// Matches dashed string for camelizing
rmsPrefix = /^-ms-/,
rdashAlpha = /-([a-z])/g,
// Used by jQuery.camelCase as callback to replace()
fcamelCase = function( all, letter ) {
return letter.toUpperCase();
};
jQuery.fn = jQuery.prototype = {
// The current version of jQuery being used
jquery: version,
constructor: jQuery,
// The default length of a jQuery object is 0
length: 0,
toArray: function() {
return slice.call( this );
},
// Get the Nth element in the matched element set OR
// Get the whole matched element set as a clean array
get: function( num ) {
// Return all the elements in a clean array
if ( num == null ) {
return slice.call( this );
}
// Return just the one element from the set
return num < 0 ? this[ num + this.length ] : this[ num ];
},
// Take an array of elements and push it onto the stack
// (returning the new matched element set)
pushStack: function( elems ) {
// Build a new jQuery matched element set
var ret = jQuery.merge( this.constructor(), elems );
// Add the old object onto the stack (as a reference)
ret.prevObject = this;
// Return the newly-formed element set
return ret;
},
// Execute a callback for every element in the matched set.
each: function( callback ) {
return jQuery.each( this, callback );
},
map: function( callback ) {
return this.pushStack( jQuery.map( this, function( elem, i ) {
return callback.call( elem, i, elem );
} ) );
},
slice: function() {
return this.pushStack( slice.apply( this, arguments ) );
},
first: function() {
return this.eq( 0 );
},
last: function() {
return this.eq( -1 );
},
eq: function( i ) {
var len = this.length,
j = +i + ( i < 0 ? len : 0 );
return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
},
end: function() {
return this.prevObject || this.constructor();
},
// For internal use only.
// Behaves like an Array's method, not like a jQuery method.
push: push,
sort: arr.sort,
splice: arr.splice
};
jQuery.extend = jQuery.fn.extend = function() {
var options, name, src, copy, copyIsArray, clone,
target = arguments[ 0 ] || {},
i = 1,
length = arguments.length,
deep = false;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
// Skip the boolean and the target
target = arguments[ i ] || {};
i++;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction( target ) ) {
target = {};
}
// Extend jQuery itself if only one argument is passed
if ( i === length ) {
target = this;
i--;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( ( options = arguments[ i ] ) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging plain objects or arrays
if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
( copyIsArray = Array.isArray( copy ) ) ) ) {
if ( copyIsArray ) {
copyIsArray = false;
clone = src && Array.isArray( src ) ? src : [];
} else {
clone = src && jQuery.isPlainObject( src ) ? src : {};
}
// Never move original objects, clone them
target[ name ] = jQuery.extend( deep, clone, copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
jQuery.extend( {
// Unique for each copy of jQuery on the page
expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
// Assume jQuery is ready without the ready module
isReady: true,
error: function( msg ) {
throw new Error( msg );
},
noop: function() {},
isFunction: function( obj ) {
return jQuery.type( obj ) === "function";
},
isWindow: function( obj ) {
return obj != null && obj === obj.window;
},
isNumeric: function( obj ) {
// As of jQuery 3.0, isNumeric is limited to
// strings and numbers (primitives or objects)
// that can be coerced to finite numbers (gh-2662)
var type = jQuery.type( obj );
return ( type === "number" || type === "string" ) &&
// parseFloat NaNs numeric-cast false positives ("")
// ...but misinterprets leading-number strings, particularly hex literals ("0x...")
// subtraction forces infinities to NaN
!isNaN( obj - parseFloat( obj ) );
},
isPlainObject: function( obj ) {
var proto, Ctor;
// Detect obvious negatives
// Use toString instead of jQuery.type to catch host objects
if ( !obj || toString.call( obj ) !== "[object Object]" ) {
return false;
}
proto = getProto( obj );
// Objects with no prototype (e.g., `Object.create( null )`) are plain
if ( !proto ) {
return true;
}
// Objects with prototype are plain iff they were constructed by a global Object function
Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
},
isEmptyObject: function( obj ) {
/* eslint-disable no-unused-vars */
// See https://github.com/eslint/eslint/issues/6125
var name;
for ( name in obj ) {
return false;
}
return true;
},
type: function( obj ) {
if ( obj == null ) {
return obj + "";
}
// Support: Android <=2.3 only (functionish RegExp)
return typeof obj === "object" || typeof obj === "function" ?
class2type[ toString.call( obj ) ] || "object" :
typeof obj;
},
// Evaluates a script in a global context
globalEval: function( code ) {
DOMEval( code );
},
// Convert dashed to camelCase; used by the css and data modules
// Support: IE <=9 - 11, Edge 12 - 13
// Microsoft forgot to hump their vendor prefix (#9572)
camelCase: function( string ) {
return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
},
each: function( obj, callback ) {
var length, i = 0;
if ( isArrayLike( obj ) ) {
length = obj.length;
for ( ; i < length; i++ ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
} else {
for ( i in obj ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
}
return obj;
},
// Support: Android <=4.0 only
trim: function( text ) {
return text == null ?
"" :
( text + "" ).replace( rtrim, "" );
},
// results is for internal usage only
makeArray: function( arr, results ) {
var ret = results || [];
if ( arr != null ) {
if ( isArrayLike( Object( arr ) ) ) {
jQuery.merge( ret,
typeof arr === "string" ?
[ arr ] : arr
);
} else {
push.call( ret, arr );
}
}
return ret;
},
inArray: function( elem, arr, i ) {
return arr == null ? -1 : indexOf.call( arr, elem, i );
},
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
merge: function( first, second ) {
var len = +second.length,
j = 0,
i = first.length;
for ( ; j < len; j++ ) {
first[ i++ ] = second[ j ];
}
first.length = i;
return first;
},
grep: function( elems, callback, invert ) {
var callbackInverse,
matches = [],
i = 0,
length = elems.length,
callbackExpect = !invert;
// Go through the array, only saving the items
// that pass the validator function
for ( ; i < length; i++ ) {
callbackInverse = !callback( elems[ i ], i );
if ( callbackInverse !== callbackExpect ) {
matches.push( elems[ i ] );
}
}
return matches;
},
// arg is for internal usage only
map: function( elems, callback, arg ) {
var length, value,
i = 0,
ret = [];
// Go through the array, translating each of the items to their new values
if ( isArrayLike( elems ) ) {
length = elems.length;
for ( ; i < length; i++ ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
// Go through every key on the object,
} else {
for ( i in elems ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
}
// Flatten any nested arrays
return concat.apply( [], ret );
},
// A global GUID counter for objects
guid: 1,
// Bind a function to a context, optionally partially applying any
// arguments.
proxy: function( fn, context ) {
var tmp, args, proxy;
if ( typeof context === "string" ) {
tmp = fn[ context ];
context = fn;
fn = tmp;
}
// Quick check to determine if target is callable, in the spec
// this throws a TypeError, but we will just return undefined.
if ( !jQuery.isFunction( fn ) ) {
return undefined;
}
// Simulated bind
args = slice.call( arguments, 2 );
proxy = function() {
return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
};
// Set the guid of unique handler to the same of original handler, so it can be removed
proxy.guid = fn.guid = fn.guid || jQuery.guid++;
return proxy;
},
now: Date.now,
// jQuery.support is not used in Core but other projects attach their
// properties to it so it needs to exist.
support: support
} );
if ( typeof Symbol === "function" ) {
jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
}
// Populate the class2type map
jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
function( i, name ) {
class2type[ "[object " + name + "]" ] = name.toLowerCase();
} );
function isArrayLike( obj ) {
// Support: real iOS 8.2 only (not reproducible in simulator)
// `in` check used to prevent JIT error (gh-2145)
// hasOwn isn't used here due to false negatives
// regarding Nodelist length in IE
var length = !!obj && "length" in obj && obj.length,
type = jQuery.type( obj );
if ( type === "function" || jQuery.isWindow( obj ) ) {
return false;
}
return type === "array" || length === 0 ||
typeof length === "number" && length > 0 && ( length - 1 ) in obj;
}
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/core.js
|
core.js
|
define( [
"./core",
"./manipulation/var/rcheckableType",
"./core/init",
"./traversing", // filter
"./attributes/prop"
], function( jQuery, rcheckableType ) {
"use strict";
var
rbracket = /\[\]$/,
rCRLF = /\r?\n/g,
rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i,
rsubmittable = /^(?:input|select|textarea|keygen)/i;
function buildParams( prefix, obj, traditional, add ) {
var name;
if ( Array.isArray( obj ) ) {
// Serialize array item.
jQuery.each( obj, function( i, v ) {
if ( traditional || rbracket.test( prefix ) ) {
// Treat each array item as a scalar.
add( prefix, v );
} else {
// Item is non-scalar (array or object), encode its numeric index.
buildParams(
prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]",
v,
traditional,
add
);
}
} );
} else if ( !traditional && jQuery.type( obj ) === "object" ) {
// Serialize object item.
for ( name in obj ) {
buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
}
} else {
// Serialize scalar item.
add( prefix, obj );
}
}
// Serialize an array of form elements or a set of
// key/values into a query string
jQuery.param = function( a, traditional ) {
var prefix,
s = [],
add = function( key, valueOrFunction ) {
// If value is a function, invoke it and use its return value
var value = jQuery.isFunction( valueOrFunction ) ?
valueOrFunction() :
valueOrFunction;
s[ s.length ] = encodeURIComponent( key ) + "=" +
encodeURIComponent( value == null ? "" : value );
};
// If an array was passed in, assume that it is an array of form elements.
if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
// Serialize the form elements
jQuery.each( a, function() {
add( this.name, this.value );
} );
} else {
// If traditional, encode the "old" way (the way 1.3.2 or older
// did it), otherwise encode params recursively.
for ( prefix in a ) {
buildParams( prefix, a[ prefix ], traditional, add );
}
}
// Return the resulting serialization
return s.join( "&" );
};
jQuery.fn.extend( {
serialize: function() {
return jQuery.param( this.serializeArray() );
},
serializeArray: function() {
return this.map( function() {
// Can add propHook for "elements" to filter or add form elements
var elements = jQuery.prop( this, "elements" );
return elements ? jQuery.makeArray( elements ) : this;
} )
.filter( function() {
var type = this.type;
// Use .is( ":disabled" ) so that fieldset[disabled] works
return this.name && !jQuery( this ).is( ":disabled" ) &&
rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) &&
( this.checked || !rcheckableType.test( type ) );
} )
.map( function( i, elem ) {
var val = jQuery( this ).val();
if ( val == null ) {
return null;
}
if ( Array.isArray( val ) ) {
return jQuery.map( val, function( val ) {
return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
} );
}
return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
} ).get();
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/serialize.js
|
serialize.js
|
define( [
"./core",
"./var/document",
"./var/documentElement",
"./var/rnothtmlwhite",
"./var/slice",
"./data/var/dataPriv",
"./core/nodeName",
"./core/init",
"./selector"
], function( jQuery, document, documentElement, rnothtmlwhite, slice, dataPriv, nodeName ) {
"use strict";
var
rkeyEvent = /^key/,
rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/,
rtypenamespace = /^([^.]*)(?:\.(.+)|)/;
function returnTrue() {
return true;
}
function returnFalse() {
return false;
}
// Support: IE <=9 only
// See #13393 for more info
function safeActiveElement() {
try {
return document.activeElement;
} catch ( err ) { }
}
function on( elem, types, selector, data, fn, one ) {
var origFn, type;
// Types can be a map of types/handlers
if ( typeof types === "object" ) {
// ( types-Object, selector, data )
if ( typeof selector !== "string" ) {
// ( types-Object, data )
data = data || selector;
selector = undefined;
}
for ( type in types ) {
on( elem, type, selector, data, types[ type ], one );
}
return elem;
}
if ( data == null && fn == null ) {
// ( types, fn )
fn = selector;
data = selector = undefined;
} else if ( fn == null ) {
if ( typeof selector === "string" ) {
// ( types, selector, fn )
fn = data;
data = undefined;
} else {
// ( types, data, fn )
fn = data;
data = selector;
selector = undefined;
}
}
if ( fn === false ) {
fn = returnFalse;
} else if ( !fn ) {
return elem;
}
if ( one === 1 ) {
origFn = fn;
fn = function( event ) {
// Can use an empty set, since event contains the info
jQuery().off( event );
return origFn.apply( this, arguments );
};
// Use same guid so caller can remove using origFn
fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
}
return elem.each( function() {
jQuery.event.add( this, types, fn, data, selector );
} );
}
/*
* Helper functions for managing events -- not part of the public interface.
* Props to Dean Edwards' addEvent library for many of the ideas.
*/
jQuery.event = {
global: {},
add: function( elem, types, handler, data, selector ) {
var handleObjIn, eventHandle, tmp,
events, t, handleObj,
special, handlers, type, namespaces, origType,
elemData = dataPriv.get( elem );
// Don't attach events to noData or text/comment nodes (but allow plain objects)
if ( !elemData ) {
return;
}
// Caller can pass in an object of custom data in lieu of the handler
if ( handler.handler ) {
handleObjIn = handler;
handler = handleObjIn.handler;
selector = handleObjIn.selector;
}
// Ensure that invalid selectors throw exceptions at attach time
// Evaluate against documentElement in case elem is a non-element node (e.g., document)
if ( selector ) {
jQuery.find.matchesSelector( documentElement, selector );
}
// Make sure that the handler has a unique ID, used to find/remove it later
if ( !handler.guid ) {
handler.guid = jQuery.guid++;
}
// Init the element's event structure and main handler, if this is the first
if ( !( events = elemData.events ) ) {
events = elemData.events = {};
}
if ( !( eventHandle = elemData.handle ) ) {
eventHandle = elemData.handle = function( e ) {
// Discard the second event of a jQuery.event.trigger() and
// when an event is called after a page has unloaded
return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ?
jQuery.event.dispatch.apply( elem, arguments ) : undefined;
};
}
// Handle multiple events separated by a space
types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
t = types.length;
while ( t-- ) {
tmp = rtypenamespace.exec( types[ t ] ) || [];
type = origType = tmp[ 1 ];
namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
// There *must* be a type, no attaching namespace-only handlers
if ( !type ) {
continue;
}
// If event changes its type, use the special event handlers for the changed type
special = jQuery.event.special[ type ] || {};
// If selector defined, determine special event api type, otherwise given type
type = ( selector ? special.delegateType : special.bindType ) || type;
// Update special based on newly reset type
special = jQuery.event.special[ type ] || {};
// handleObj is passed to all event handlers
handleObj = jQuery.extend( {
type: type,
origType: origType,
data: data,
handler: handler,
guid: handler.guid,
selector: selector,
needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
namespace: namespaces.join( "." )
}, handleObjIn );
// Init the event handler queue if we're the first
if ( !( handlers = events[ type ] ) ) {
handlers = events[ type ] = [];
handlers.delegateCount = 0;
// Only use addEventListener if the special events handler returns false
if ( !special.setup ||
special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
if ( elem.addEventListener ) {
elem.addEventListener( type, eventHandle );
}
}
}
if ( special.add ) {
special.add.call( elem, handleObj );
if ( !handleObj.handler.guid ) {
handleObj.handler.guid = handler.guid;
}
}
// Add to the element's handler list, delegates in front
if ( selector ) {
handlers.splice( handlers.delegateCount++, 0, handleObj );
} else {
handlers.push( handleObj );
}
// Keep track of which events have ever been used, for event optimization
jQuery.event.global[ type ] = true;
}
},
// Detach an event or set of events from an element
remove: function( elem, types, handler, selector, mappedTypes ) {
var j, origCount, tmp,
events, t, handleObj,
special, handlers, type, namespaces, origType,
elemData = dataPriv.hasData( elem ) && dataPriv.get( elem );
if ( !elemData || !( events = elemData.events ) ) {
return;
}
// Once for each type.namespace in types; type may be omitted
types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
t = types.length;
while ( t-- ) {
tmp = rtypenamespace.exec( types[ t ] ) || [];
type = origType = tmp[ 1 ];
namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
// Unbind all events (on this namespace, if provided) for the element
if ( !type ) {
for ( type in events ) {
jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
}
continue;
}
special = jQuery.event.special[ type ] || {};
type = ( selector ? special.delegateType : special.bindType ) || type;
handlers = events[ type ] || [];
tmp = tmp[ 2 ] &&
new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" );
// Remove matching events
origCount = j = handlers.length;
while ( j-- ) {
handleObj = handlers[ j ];
if ( ( mappedTypes || origType === handleObj.origType ) &&
( !handler || handler.guid === handleObj.guid ) &&
( !tmp || tmp.test( handleObj.namespace ) ) &&
( !selector || selector === handleObj.selector ||
selector === "**" && handleObj.selector ) ) {
handlers.splice( j, 1 );
if ( handleObj.selector ) {
handlers.delegateCount--;
}
if ( special.remove ) {
special.remove.call( elem, handleObj );
}
}
}
// Remove generic event handler if we removed something and no more handlers exist
// (avoids potential for endless recursion during removal of special event handlers)
if ( origCount && !handlers.length ) {
if ( !special.teardown ||
special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
jQuery.removeEvent( elem, type, elemData.handle );
}
delete events[ type ];
}
}
// Remove data and the expando if it's no longer used
if ( jQuery.isEmptyObject( events ) ) {
dataPriv.remove( elem, "handle events" );
}
},
dispatch: function( nativeEvent ) {
// Make a writable jQuery.Event from the native event object
var event = jQuery.event.fix( nativeEvent );
var i, j, ret, matched, handleObj, handlerQueue,
args = new Array( arguments.length ),
handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [],
special = jQuery.event.special[ event.type ] || {};
// Use the fix-ed jQuery.Event rather than the (read-only) native event
args[ 0 ] = event;
for ( i = 1; i < arguments.length; i++ ) {
args[ i ] = arguments[ i ];
}
event.delegateTarget = this;
// Call the preDispatch hook for the mapped type, and let it bail if desired
if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
return;
}
// Determine handlers
handlerQueue = jQuery.event.handlers.call( this, event, handlers );
// Run delegates first; they may want to stop propagation beneath us
i = 0;
while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) {
event.currentTarget = matched.elem;
j = 0;
while ( ( handleObj = matched.handlers[ j++ ] ) &&
!event.isImmediatePropagationStopped() ) {
// Triggered event must either 1) have no namespace, or 2) have namespace(s)
// a subset or equal to those in the bound event (both can have no namespace).
if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) {
event.handleObj = handleObj;
event.data = handleObj.data;
ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle ||
handleObj.handler ).apply( matched.elem, args );
if ( ret !== undefined ) {
if ( ( event.result = ret ) === false ) {
event.preventDefault();
event.stopPropagation();
}
}
}
}
}
// Call the postDispatch hook for the mapped type
if ( special.postDispatch ) {
special.postDispatch.call( this, event );
}
return event.result;
},
handlers: function( event, handlers ) {
var i, handleObj, sel, matchedHandlers, matchedSelectors,
handlerQueue = [],
delegateCount = handlers.delegateCount,
cur = event.target;
// Find delegate handlers
if ( delegateCount &&
// Support: IE <=9
// Black-hole SVG <use> instance trees (trac-13180)
cur.nodeType &&
// Support: Firefox <=42
// Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861)
// https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click
// Support: IE 11 only
// ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343)
!( event.type === "click" && event.button >= 1 ) ) {
for ( ; cur !== this; cur = cur.parentNode || this ) {
// Don't check non-elements (#13208)
// Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
matchedHandlers = [];
matchedSelectors = {};
for ( i = 0; i < delegateCount; i++ ) {
handleObj = handlers[ i ];
// Don't conflict with Object.prototype properties (#13203)
sel = handleObj.selector + " ";
if ( matchedSelectors[ sel ] === undefined ) {
matchedSelectors[ sel ] = handleObj.needsContext ?
jQuery( sel, this ).index( cur ) > -1 :
jQuery.find( sel, this, null, [ cur ] ).length;
}
if ( matchedSelectors[ sel ] ) {
matchedHandlers.push( handleObj );
}
}
if ( matchedHandlers.length ) {
handlerQueue.push( { elem: cur, handlers: matchedHandlers } );
}
}
}
}
// Add the remaining (directly-bound) handlers
cur = this;
if ( delegateCount < handlers.length ) {
handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } );
}
return handlerQueue;
},
addProp: function( name, hook ) {
Object.defineProperty( jQuery.Event.prototype, name, {
enumerable: true,
configurable: true,
get: jQuery.isFunction( hook ) ?
function() {
if ( this.originalEvent ) {
return hook( this.originalEvent );
}
} :
function() {
if ( this.originalEvent ) {
return this.originalEvent[ name ];
}
},
set: function( value ) {
Object.defineProperty( this, name, {
enumerable: true,
configurable: true,
writable: true,
value: value
} );
}
} );
},
fix: function( originalEvent ) {
return originalEvent[ jQuery.expando ] ?
originalEvent :
new jQuery.Event( originalEvent );
},
special: {
load: {
// Prevent triggered image.load events from bubbling to window.load
noBubble: true
},
focus: {
// Fire native event if possible so blur/focus sequence is correct
trigger: function() {
if ( this !== safeActiveElement() && this.focus ) {
this.focus();
return false;
}
},
delegateType: "focusin"
},
blur: {
trigger: function() {
if ( this === safeActiveElement() && this.blur ) {
this.blur();
return false;
}
},
delegateType: "focusout"
},
click: {
// For checkbox, fire native event so checked state will be right
trigger: function() {
if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) {
this.click();
return false;
}
},
// For cross-browser consistency, don't fire native .click() on links
_default: function( event ) {
return nodeName( event.target, "a" );
}
},
beforeunload: {
postDispatch: function( event ) {
// Support: Firefox 20+
// Firefox doesn't alert if the returnValue field is not set.
if ( event.result !== undefined && event.originalEvent ) {
event.originalEvent.returnValue = event.result;
}
}
}
}
};
jQuery.removeEvent = function( elem, type, handle ) {
// This "if" is needed for plain objects
if ( elem.removeEventListener ) {
elem.removeEventListener( type, handle );
}
};
jQuery.Event = function( src, props ) {
// Allow instantiation without the 'new' keyword
if ( !( this instanceof jQuery.Event ) ) {
return new jQuery.Event( src, props );
}
// Event object
if ( src && src.type ) {
this.originalEvent = src;
this.type = src.type;
// Events bubbling up the document may have been marked as prevented
// by a handler lower down the tree; reflect the correct value.
this.isDefaultPrevented = src.defaultPrevented ||
src.defaultPrevented === undefined &&
// Support: Android <=2.3 only
src.returnValue === false ?
returnTrue :
returnFalse;
// Create target properties
// Support: Safari <=6 - 7 only
// Target should not be a text node (#504, #13143)
this.target = ( src.target && src.target.nodeType === 3 ) ?
src.target.parentNode :
src.target;
this.currentTarget = src.currentTarget;
this.relatedTarget = src.relatedTarget;
// Event type
} else {
this.type = src;
}
// Put explicitly provided properties onto the event object
if ( props ) {
jQuery.extend( this, props );
}
// Create a timestamp if incoming event doesn't have one
this.timeStamp = src && src.timeStamp || jQuery.now();
// Mark it as fixed
this[ jQuery.expando ] = true;
};
// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
jQuery.Event.prototype = {
constructor: jQuery.Event,
isDefaultPrevented: returnFalse,
isPropagationStopped: returnFalse,
isImmediatePropagationStopped: returnFalse,
isSimulated: false,
preventDefault: function() {
var e = this.originalEvent;
this.isDefaultPrevented = returnTrue;
if ( e && !this.isSimulated ) {
e.preventDefault();
}
},
stopPropagation: function() {
var e = this.originalEvent;
this.isPropagationStopped = returnTrue;
if ( e && !this.isSimulated ) {
e.stopPropagation();
}
},
stopImmediatePropagation: function() {
var e = this.originalEvent;
this.isImmediatePropagationStopped = returnTrue;
if ( e && !this.isSimulated ) {
e.stopImmediatePropagation();
}
this.stopPropagation();
}
};
// Includes all common event props including KeyEvent and MouseEvent specific props
jQuery.each( {
altKey: true,
bubbles: true,
cancelable: true,
changedTouches: true,
ctrlKey: true,
detail: true,
eventPhase: true,
metaKey: true,
pageX: true,
pageY: true,
shiftKey: true,
view: true,
"char": true,
charCode: true,
key: true,
keyCode: true,
button: true,
buttons: true,
clientX: true,
clientY: true,
offsetX: true,
offsetY: true,
pointerId: true,
pointerType: true,
screenX: true,
screenY: true,
targetTouches: true,
toElement: true,
touches: true,
which: function( event ) {
var button = event.button;
// Add which for key events
if ( event.which == null && rkeyEvent.test( event.type ) ) {
return event.charCode != null ? event.charCode : event.keyCode;
}
// Add which for click: 1 === left; 2 === middle; 3 === right
if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) {
if ( button & 1 ) {
return 1;
}
if ( button & 2 ) {
return 3;
}
if ( button & 4 ) {
return 2;
}
return 0;
}
return event.which;
}
}, jQuery.event.addProp );
// Create mouseenter/leave events using mouseover/out and event-time checks
// so that event delegation works in jQuery.
// Do the same for pointerenter/pointerleave and pointerover/pointerout
//
// Support: Safari 7 only
// Safari sends mouseenter too often; see:
// https://bugs.chromium.org/p/chromium/issues/detail?id=470258
// for the description of the bug (it existed in older Chrome versions as well).
jQuery.each( {
mouseenter: "mouseover",
mouseleave: "mouseout",
pointerenter: "pointerover",
pointerleave: "pointerout"
}, function( orig, fix ) {
jQuery.event.special[ orig ] = {
delegateType: fix,
bindType: fix,
handle: function( event ) {
var ret,
target = this,
related = event.relatedTarget,
handleObj = event.handleObj;
// For mouseenter/leave call the handler if related is outside the target.
// NB: No relatedTarget if the mouse left/entered the browser window
if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) {
event.type = handleObj.origType;
ret = handleObj.handler.apply( this, arguments );
event.type = fix;
}
return ret;
}
};
} );
jQuery.fn.extend( {
on: function( types, selector, data, fn ) {
return on( this, types, selector, data, fn );
},
one: function( types, selector, data, fn ) {
return on( this, types, selector, data, fn, 1 );
},
off: function( types, selector, fn ) {
var handleObj, type;
if ( types && types.preventDefault && types.handleObj ) {
// ( event ) dispatched jQuery.Event
handleObj = types.handleObj;
jQuery( types.delegateTarget ).off(
handleObj.namespace ?
handleObj.origType + "." + handleObj.namespace :
handleObj.origType,
handleObj.selector,
handleObj.handler
);
return this;
}
if ( typeof types === "object" ) {
// ( types-object [, selector] )
for ( type in types ) {
this.off( type, selector, types[ type ] );
}
return this;
}
if ( selector === false || typeof selector === "function" ) {
// ( types [, fn] )
fn = selector;
selector = undefined;
}
if ( fn === false ) {
fn = returnFalse;
}
return this.each( function() {
jQuery.event.remove( this, types, fn, selector );
} );
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/event.js
|
event.js
|
define( [
"../core",
"../var/document",
"../data/var/dataPriv",
"../data/var/acceptData",
"../var/hasOwn",
"../event"
], function( jQuery, document, dataPriv, acceptData, hasOwn ) {
"use strict";
var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/;
jQuery.extend( jQuery.event, {
trigger: function( event, data, elem, onlyHandlers ) {
var i, cur, tmp, bubbleType, ontype, handle, special,
eventPath = [ elem || document ],
type = hasOwn.call( event, "type" ) ? event.type : event,
namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : [];
cur = tmp = elem = elem || document;
// Don't do events on text and comment nodes
if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
return;
}
// focus/blur morphs to focusin/out; ensure we're not firing them right now
if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
return;
}
if ( type.indexOf( "." ) > -1 ) {
// Namespaced trigger; create a regexp to match event type in handle()
namespaces = type.split( "." );
type = namespaces.shift();
namespaces.sort();
}
ontype = type.indexOf( ":" ) < 0 && "on" + type;
// Caller can pass in a jQuery.Event object, Object, or just an event type string
event = event[ jQuery.expando ] ?
event :
new jQuery.Event( type, typeof event === "object" && event );
// Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true)
event.isTrigger = onlyHandlers ? 2 : 3;
event.namespace = namespaces.join( "." );
event.rnamespace = event.namespace ?
new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) :
null;
// Clean up the event in case it is being reused
event.result = undefined;
if ( !event.target ) {
event.target = elem;
}
// Clone any incoming data and prepend the event, creating the handler arg list
data = data == null ?
[ event ] :
jQuery.makeArray( data, [ event ] );
// Allow special events to draw outside the lines
special = jQuery.event.special[ type ] || {};
if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) {
return;
}
// Determine event propagation path in advance, per W3C events spec (#9951)
// Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) {
bubbleType = special.delegateType || type;
if ( !rfocusMorph.test( bubbleType + type ) ) {
cur = cur.parentNode;
}
for ( ; cur; cur = cur.parentNode ) {
eventPath.push( cur );
tmp = cur;
}
// Only add window if we got to document (e.g., not plain obj or detached DOM)
if ( tmp === ( elem.ownerDocument || document ) ) {
eventPath.push( tmp.defaultView || tmp.parentWindow || window );
}
}
// Fire handlers on the event path
i = 0;
while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) {
event.type = i > 1 ?
bubbleType :
special.bindType || type;
// jQuery handler
handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] &&
dataPriv.get( cur, "handle" );
if ( handle ) {
handle.apply( cur, data );
}
// Native handler
handle = ontype && cur[ ontype ];
if ( handle && handle.apply && acceptData( cur ) ) {
event.result = handle.apply( cur, data );
if ( event.result === false ) {
event.preventDefault();
}
}
}
event.type = type;
// If nobody prevented the default action, do it now
if ( !onlyHandlers && !event.isDefaultPrevented() ) {
if ( ( !special._default ||
special._default.apply( eventPath.pop(), data ) === false ) &&
acceptData( elem ) ) {
// Call a native DOM method on the target with the same name as the event.
// Don't do default actions on window, that's where global variables be (#6170)
if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) {
// Don't re-trigger an onFOO event when we call its FOO() method
tmp = elem[ ontype ];
if ( tmp ) {
elem[ ontype ] = null;
}
// Prevent re-triggering of the same event, since we already bubbled it above
jQuery.event.triggered = type;
elem[ type ]();
jQuery.event.triggered = undefined;
if ( tmp ) {
elem[ ontype ] = tmp;
}
}
}
}
return event.result;
},
// Piggyback on a donor event to simulate a different one
// Used only for `focus(in | out)` events
simulate: function( type, elem, event ) {
var e = jQuery.extend(
new jQuery.Event(),
event,
{
type: type,
isSimulated: true
}
);
jQuery.event.trigger( e, null, elem );
}
} );
jQuery.fn.extend( {
trigger: function( type, data ) {
return this.each( function() {
jQuery.event.trigger( type, data, this );
} );
},
triggerHandler: function( type, data ) {
var elem = this[ 0 ];
if ( elem ) {
return jQuery.event.trigger( type, data, elem, true );
}
}
} );
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/event/trigger.js
|
trigger.js
|
define( [
"../core",
"./var/rtagName",
"./var/rscriptType",
"./wrapMap",
"./getAll",
"./setGlobalEval"
], function( jQuery, rtagName, rscriptType, wrapMap, getAll, setGlobalEval ) {
"use strict";
var rhtml = /<|&#?\w+;/;
function buildFragment( elems, context, scripts, selection, ignored ) {
var elem, tmp, tag, wrap, contains, j,
fragment = context.createDocumentFragment(),
nodes = [],
i = 0,
l = elems.length;
for ( ; i < l; i++ ) {
elem = elems[ i ];
if ( elem || elem === 0 ) {
// Add nodes directly
if ( jQuery.type( elem ) === "object" ) {
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem );
// Convert non-html into a text node
} else if ( !rhtml.test( elem ) ) {
nodes.push( context.createTextNode( elem ) );
// Convert html into DOM nodes
} else {
tmp = tmp || fragment.appendChild( context.createElement( "div" ) );
// Deserialize a standard representation
tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase();
wrap = wrapMap[ tag ] || wrapMap._default;
tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ];
// Descend through wrappers to the right content
j = wrap[ 0 ];
while ( j-- ) {
tmp = tmp.lastChild;
}
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( nodes, tmp.childNodes );
// Remember the top-level container
tmp = fragment.firstChild;
// Ensure the created nodes are orphaned (#12392)
tmp.textContent = "";
}
}
}
// Remove wrapper from fragment
fragment.textContent = "";
i = 0;
while ( ( elem = nodes[ i++ ] ) ) {
// Skip elements already in the context collection (trac-4087)
if ( selection && jQuery.inArray( elem, selection ) > -1 ) {
if ( ignored ) {
ignored.push( elem );
}
continue;
}
contains = jQuery.contains( elem.ownerDocument, elem );
// Append to fragment
tmp = getAll( fragment.appendChild( elem ), "script" );
// Preserve script evaluation history
if ( contains ) {
setGlobalEval( tmp );
}
// Capture executables
if ( scripts ) {
j = 0;
while ( ( elem = tmp[ j++ ] ) ) {
if ( rscriptType.test( elem.type || "" ) ) {
scripts.push( elem );
}
}
}
}
return fragment;
}
return buildFragment;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/manipulation/buildFragment.js
|
buildFragment.js
|
define( [
"../core",
"../var/document",
"../var/documentElement",
"../var/support"
], function( jQuery, document, documentElement, support ) {
"use strict";
( function() {
// Executing both pixelPosition & boxSizingReliable tests require only one layout
// so they're executed at the same time to save the second computation.
function computeStyleTests() {
// This is a singleton, we need to execute it only once
if ( !div ) {
return;
}
div.style.cssText =
"box-sizing:border-box;" +
"position:relative;display:block;" +
"margin:auto;border:1px;padding:1px;" +
"top:1%;width:50%";
div.innerHTML = "";
documentElement.appendChild( container );
var divStyle = window.getComputedStyle( div );
pixelPositionVal = divStyle.top !== "1%";
// Support: Android 4.0 - 4.3 only, Firefox <=3 - 44
reliableMarginLeftVal = divStyle.marginLeft === "2px";
boxSizingReliableVal = divStyle.width === "4px";
// Support: Android 4.0 - 4.3 only
// Some styles come back with percentage values, even though they shouldn't
div.style.marginRight = "50%";
pixelMarginRightVal = divStyle.marginRight === "4px";
documentElement.removeChild( container );
// Nullify the div so it wouldn't be stored in the memory and
// it will also be a sign that checks already performed
div = null;
}
var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal,
container = document.createElement( "div" ),
div = document.createElement( "div" );
// Finish early in limited (non-browser) environments
if ( !div.style ) {
return;
}
// Support: IE <=9 - 11 only
// Style of cloned element affects source element cloned (#8908)
div.style.backgroundClip = "content-box";
div.cloneNode( true ).style.backgroundClip = "";
support.clearCloneStyle = div.style.backgroundClip === "content-box";
container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" +
"padding:0;margin-top:1px;position:absolute";
container.appendChild( div );
jQuery.extend( support, {
pixelPosition: function() {
computeStyleTests();
return pixelPositionVal;
},
boxSizingReliable: function() {
computeStyleTests();
return boxSizingReliableVal;
},
pixelMarginRight: function() {
computeStyleTests();
return pixelMarginRightVal;
},
reliableMarginLeft: function() {
computeStyleTests();
return reliableMarginLeftVal;
}
} );
} )();
return support;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/css/support.js
|
support.js
|
define( [
"../core",
"../data/var/dataPriv",
"../css/var/isHiddenWithinTree"
], function( jQuery, dataPriv, isHiddenWithinTree ) {
"use strict";
var defaultDisplayMap = {};
function getDefaultDisplay( elem ) {
var temp,
doc = elem.ownerDocument,
nodeName = elem.nodeName,
display = defaultDisplayMap[ nodeName ];
if ( display ) {
return display;
}
temp = doc.body.appendChild( doc.createElement( nodeName ) );
display = jQuery.css( temp, "display" );
temp.parentNode.removeChild( temp );
if ( display === "none" ) {
display = "block";
}
defaultDisplayMap[ nodeName ] = display;
return display;
}
function showHide( elements, show ) {
var display, elem,
values = [],
index = 0,
length = elements.length;
// Determine new display value for elements that need to change
for ( ; index < length; index++ ) {
elem = elements[ index ];
if ( !elem.style ) {
continue;
}
display = elem.style.display;
if ( show ) {
// Since we force visibility upon cascade-hidden elements, an immediate (and slow)
// check is required in this first loop unless we have a nonempty display value (either
// inline or about-to-be-restored)
if ( display === "none" ) {
values[ index ] = dataPriv.get( elem, "display" ) || null;
if ( !values[ index ] ) {
elem.style.display = "";
}
}
if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) {
values[ index ] = getDefaultDisplay( elem );
}
} else {
if ( display !== "none" ) {
values[ index ] = "none";
// Remember what we're overwriting
dataPriv.set( elem, "display", display );
}
}
}
// Set the display of the elements in a second loop to avoid constant reflow
for ( index = 0; index < length; index++ ) {
if ( values[ index ] != null ) {
elements[ index ].style.display = values[ index ];
}
}
return elements;
}
jQuery.fn.extend( {
show: function() {
return showHide( this, true );
},
hide: function() {
return showHide( this );
},
toggle: function( state ) {
if ( typeof state === "boolean" ) {
return state ? this.show() : this.hide();
}
return this.each( function() {
if ( isHiddenWithinTree( this ) ) {
jQuery( this ).show();
} else {
jQuery( this ).hide();
}
} );
}
} );
return showHide;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/css/showHide.js
|
showHide.js
|
define( [
"../core",
"../var/indexOf",
"./var/rneedsContext",
"../selector"
], function( jQuery, indexOf, rneedsContext ) {
"use strict";
var risSimple = /^.[^:#\[\.,]*$/;
// Implement the identical functionality for filter and not
function winnow( elements, qualifier, not ) {
if ( jQuery.isFunction( qualifier ) ) {
return jQuery.grep( elements, function( elem, i ) {
return !!qualifier.call( elem, i, elem ) !== not;
} );
}
// Single element
if ( qualifier.nodeType ) {
return jQuery.grep( elements, function( elem ) {
return ( elem === qualifier ) !== not;
} );
}
// Arraylike of elements (jQuery, arguments, Array)
if ( typeof qualifier !== "string" ) {
return jQuery.grep( elements, function( elem ) {
return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
} );
}
// Simple selector that can be filtered directly, removing non-Elements
if ( risSimple.test( qualifier ) ) {
return jQuery.filter( qualifier, elements, not );
}
// Complex selector, compare the two sets, removing non-Elements
qualifier = jQuery.filter( qualifier, elements );
return jQuery.grep( elements, function( elem ) {
return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1;
} );
}
jQuery.filter = function( expr, elems, not ) {
var elem = elems[ 0 ];
if ( not ) {
expr = ":not(" + expr + ")";
}
if ( elems.length === 1 && elem.nodeType === 1 ) {
return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
}
return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
return elem.nodeType === 1;
} ) );
};
jQuery.fn.extend( {
find: function( selector ) {
var i, ret,
len = this.length,
self = this;
if ( typeof selector !== "string" ) {
return this.pushStack( jQuery( selector ).filter( function() {
for ( i = 0; i < len; i++ ) {
if ( jQuery.contains( self[ i ], this ) ) {
return true;
}
}
} ) );
}
ret = this.pushStack( [] );
for ( i = 0; i < len; i++ ) {
jQuery.find( selector, self[ i ], ret );
}
return len > 1 ? jQuery.uniqueSort( ret ) : ret;
},
filter: function( selector ) {
return this.pushStack( winnow( this, selector || [], false ) );
},
not: function( selector ) {
return this.pushStack( winnow( this, selector || [], true ) );
},
is: function( selector ) {
return !!winnow(
this,
// If this is a positional/relative selector, check membership in the returned set
// so $("p:first").is("p:last") won't return true for a doc with two "p".
typeof selector === "string" && rneedsContext.test( selector ) ?
jQuery( selector ) :
selector || [],
false
).length;
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/traversing/findFilter.js
|
findFilter.js
|
define( [
"../core",
"./var/nonce",
"./var/rquery",
"../ajax"
], function( jQuery, nonce, rquery ) {
"use strict";
var oldCallbacks = [],
rjsonp = /(=)\?(?=&|$)|\?\?/;
// Default jsonp settings
jQuery.ajaxSetup( {
jsonp: "callback",
jsonpCallback: function() {
var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) );
this[ callback ] = true;
return callback;
}
} );
// Detect, normalize options and install callbacks for jsonp requests
jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
var callbackName, overwritten, responseContainer,
jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ?
"url" :
typeof s.data === "string" &&
( s.contentType || "" )
.indexOf( "application/x-www-form-urlencoded" ) === 0 &&
rjsonp.test( s.data ) && "data"
);
// Handle iff the expected data type is "jsonp" or we have a parameter to set
if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) {
// Get callback name, remembering preexisting value associated with it
callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ?
s.jsonpCallback() :
s.jsonpCallback;
// Insert callback into url or form data
if ( jsonProp ) {
s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName );
} else if ( s.jsonp !== false ) {
s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
}
// Use data converter to retrieve json after script execution
s.converters[ "script json" ] = function() {
if ( !responseContainer ) {
jQuery.error( callbackName + " was not called" );
}
return responseContainer[ 0 ];
};
// Force json dataType
s.dataTypes[ 0 ] = "json";
// Install callback
overwritten = window[ callbackName ];
window[ callbackName ] = function() {
responseContainer = arguments;
};
// Clean-up function (fires after converters)
jqXHR.always( function() {
// If previous value didn't exist - remove it
if ( overwritten === undefined ) {
jQuery( window ).removeProp( callbackName );
// Otherwise restore preexisting value
} else {
window[ callbackName ] = overwritten;
}
// Save back as free
if ( s[ callbackName ] ) {
// Make sure that re-using the options doesn't screw things around
s.jsonpCallback = originalSettings.jsonpCallback;
// Save the callback name for future use
oldCallbacks.push( callbackName );
}
// Call if it was a function and we have a response
if ( responseContainer && jQuery.isFunction( overwritten ) ) {
overwritten( responseContainer[ 0 ] );
}
responseContainer = overwritten = undefined;
} );
// Delegate to script
return "script";
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/ajax/jsonp.js
|
jsonp.js
|
define( [
"../core",
"../var/support",
"../ajax"
], function( jQuery, support ) {
"use strict";
jQuery.ajaxSettings.xhr = function() {
try {
return new window.XMLHttpRequest();
} catch ( e ) {}
};
var xhrSuccessStatus = {
// File protocol always yields status code 0, assume 200
0: 200,
// Support: IE <=9 only
// #1450: sometimes IE returns 1223 when it should be 204
1223: 204
},
xhrSupported = jQuery.ajaxSettings.xhr();
support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported );
support.ajax = xhrSupported = !!xhrSupported;
jQuery.ajaxTransport( function( options ) {
var callback, errorCallback;
// Cross domain only allowed if supported through XMLHttpRequest
if ( support.cors || xhrSupported && !options.crossDomain ) {
return {
send: function( headers, complete ) {
var i,
xhr = options.xhr();
xhr.open(
options.type,
options.url,
options.async,
options.username,
options.password
);
// Apply custom fields if provided
if ( options.xhrFields ) {
for ( i in options.xhrFields ) {
xhr[ i ] = options.xhrFields[ i ];
}
}
// Override mime type if needed
if ( options.mimeType && xhr.overrideMimeType ) {
xhr.overrideMimeType( options.mimeType );
}
// X-Requested-With header
// For cross-domain requests, seeing as conditions for a preflight are
// akin to a jigsaw puzzle, we simply never set it to be sure.
// (it can always be set on a per-request basis or even using ajaxSetup)
// For same-domain requests, won't change header if already provided.
if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) {
headers[ "X-Requested-With" ] = "XMLHttpRequest";
}
// Set headers
for ( i in headers ) {
xhr.setRequestHeader( i, headers[ i ] );
}
// Callback
callback = function( type ) {
return function() {
if ( callback ) {
callback = errorCallback = xhr.onload =
xhr.onerror = xhr.onabort = xhr.onreadystatechange = null;
if ( type === "abort" ) {
xhr.abort();
} else if ( type === "error" ) {
// Support: IE <=9 only
// On a manual native abort, IE9 throws
// errors on any property access that is not readyState
if ( typeof xhr.status !== "number" ) {
complete( 0, "error" );
} else {
complete(
// File: protocol always yields status 0; see #8605, #14207
xhr.status,
xhr.statusText
);
}
} else {
complete(
xhrSuccessStatus[ xhr.status ] || xhr.status,
xhr.statusText,
// Support: IE <=9 only
// IE9 has no XHR2 but throws on binary (trac-11426)
// For XHR2 non-text, let the caller handle it (gh-2498)
( xhr.responseType || "text" ) !== "text" ||
typeof xhr.responseText !== "string" ?
{ binary: xhr.response } :
{ text: xhr.responseText },
xhr.getAllResponseHeaders()
);
}
}
};
};
// Listen to events
xhr.onload = callback();
errorCallback = xhr.onerror = callback( "error" );
// Support: IE 9 only
// Use onreadystatechange to replace onabort
// to handle uncaught aborts
if ( xhr.onabort !== undefined ) {
xhr.onabort = errorCallback;
} else {
xhr.onreadystatechange = function() {
// Check readyState before timeout as it changes
if ( xhr.readyState === 4 ) {
// Allow onerror to be called first,
// but that will not handle a native abort
// Also, save errorCallback to a variable
// as xhr.onerror cannot be accessed
window.setTimeout( function() {
if ( callback ) {
errorCallback();
}
} );
}
};
}
// Create the abort callback
callback = callback( "abort" );
try {
// Do send the request (this may raise an exception)
xhr.send( options.hasContent && options.data || null );
} catch ( e ) {
// #14683: Only rethrow if this hasn't been notified as an error yet
if ( callback ) {
throw e;
}
}
},
abort: function() {
if ( callback ) {
callback();
}
}
};
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/ajax/xhr.js
|
xhr.js
|
define( [
"../core",
"../core/stripAndCollapse",
"./support",
"../core/nodeName",
"../core/init"
], function( jQuery, stripAndCollapse, support, nodeName ) {
"use strict";
var rreturn = /\r/g;
jQuery.fn.extend( {
val: function( value ) {
var hooks, ret, isFunction,
elem = this[ 0 ];
if ( !arguments.length ) {
if ( elem ) {
hooks = jQuery.valHooks[ elem.type ] ||
jQuery.valHooks[ elem.nodeName.toLowerCase() ];
if ( hooks &&
"get" in hooks &&
( ret = hooks.get( elem, "value" ) ) !== undefined
) {
return ret;
}
ret = elem.value;
// Handle most common string cases
if ( typeof ret === "string" ) {
return ret.replace( rreturn, "" );
}
// Handle cases where value is null/undef or number
return ret == null ? "" : ret;
}
return;
}
isFunction = jQuery.isFunction( value );
return this.each( function( i ) {
var val;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call( this, i, jQuery( this ).val() );
} else {
val = value;
}
// Treat null/undefined as ""; convert numbers to string
if ( val == null ) {
val = "";
} else if ( typeof val === "number" ) {
val += "";
} else if ( Array.isArray( val ) ) {
val = jQuery.map( val, function( value ) {
return value == null ? "" : value + "";
} );
}
hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
// If set returns undefined, fall back to normal setting
if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) {
this.value = val;
}
} );
}
} );
jQuery.extend( {
valHooks: {
option: {
get: function( elem ) {
var val = jQuery.find.attr( elem, "value" );
return val != null ?
val :
// Support: IE <=10 - 11 only
// option.text throws exceptions (#14686, #14858)
// Strip and collapse whitespace
// https://html.spec.whatwg.org/#strip-and-collapse-whitespace
stripAndCollapse( jQuery.text( elem ) );
}
},
select: {
get: function( elem ) {
var value, option, i,
options = elem.options,
index = elem.selectedIndex,
one = elem.type === "select-one",
values = one ? null : [],
max = one ? index + 1 : options.length;
if ( index < 0 ) {
i = max;
} else {
i = one ? index : 0;
}
// Loop through all the selected options
for ( ; i < max; i++ ) {
option = options[ i ];
// Support: IE <=9 only
// IE8-9 doesn't update selected after form reset (#2551)
if ( ( option.selected || i === index ) &&
// Don't return options that are disabled or in a disabled optgroup
!option.disabled &&
( !option.parentNode.disabled ||
!nodeName( option.parentNode, "optgroup" ) ) ) {
// Get the specific value for the option
value = jQuery( option ).val();
// We don't need an array for one selects
if ( one ) {
return value;
}
// Multi-Selects return an array
values.push( value );
}
}
return values;
},
set: function( elem, value ) {
var optionSet, option,
options = elem.options,
values = jQuery.makeArray( value ),
i = options.length;
while ( i-- ) {
option = options[ i ];
/* eslint-disable no-cond-assign */
if ( option.selected =
jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1
) {
optionSet = true;
}
/* eslint-enable no-cond-assign */
}
// Force browsers to behave consistently when non-matching value is set
if ( !optionSet ) {
elem.selectedIndex = -1;
}
return values;
}
}
}
} );
// Radios and checkboxes getter/setter
jQuery.each( [ "radio", "checkbox" ], function() {
jQuery.valHooks[ this ] = {
set: function( elem, value ) {
if ( Array.isArray( value ) ) {
return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 );
}
}
};
if ( !support.checkOn ) {
jQuery.valHooks[ this ].get = function( elem ) {
return elem.getAttribute( "value" ) === null ? "on" : elem.value;
};
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/attributes/val.js
|
val.js
|
define( [
"../core",
"../core/access",
"./support",
"../selector"
], function( jQuery, access, support ) {
"use strict";
var rfocusable = /^(?:input|select|textarea|button)$/i,
rclickable = /^(?:a|area)$/i;
jQuery.fn.extend( {
prop: function( name, value ) {
return access( this, jQuery.prop, name, value, arguments.length > 1 );
},
removeProp: function( name ) {
return this.each( function() {
delete this[ jQuery.propFix[ name ] || name ];
} );
}
} );
jQuery.extend( {
prop: function( elem, name, value ) {
var ret, hooks,
nType = elem.nodeType;
// Don't get/set properties on text, comment and attribute nodes
if ( nType === 3 || nType === 8 || nType === 2 ) {
return;
}
if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
// Fix name and attach hooks
name = jQuery.propFix[ name ] || name;
hooks = jQuery.propHooks[ name ];
}
if ( value !== undefined ) {
if ( hooks && "set" in hooks &&
( ret = hooks.set( elem, value, name ) ) !== undefined ) {
return ret;
}
return ( elem[ name ] = value );
}
if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
return ret;
}
return elem[ name ];
},
propHooks: {
tabIndex: {
get: function( elem ) {
// Support: IE <=9 - 11 only
// elem.tabIndex doesn't always return the
// correct value when it hasn't been explicitly set
// https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
// Use proper attribute retrieval(#12072)
var tabindex = jQuery.find.attr( elem, "tabindex" );
if ( tabindex ) {
return parseInt( tabindex, 10 );
}
if (
rfocusable.test( elem.nodeName ) ||
rclickable.test( elem.nodeName ) &&
elem.href
) {
return 0;
}
return -1;
}
}
},
propFix: {
"for": "htmlFor",
"class": "className"
}
} );
// Support: IE <=11 only
// Accessing the selectedIndex property
// forces the browser to respect setting selected
// on the option
// The getter ensures a default option is selected
// when in an optgroup
// eslint rule "no-unused-expressions" is disabled for this code
// since it considers such accessions noop
if ( !support.optSelected ) {
jQuery.propHooks.selected = {
get: function( elem ) {
/* eslint no-unused-expressions: "off" */
var parent = elem.parentNode;
if ( parent && parent.parentNode ) {
parent.parentNode.selectedIndex;
}
return null;
},
set: function( elem ) {
/* eslint no-unused-expressions: "off" */
var parent = elem.parentNode;
if ( parent ) {
parent.selectedIndex;
if ( parent.parentNode ) {
parent.parentNode.selectedIndex;
}
}
}
};
}
jQuery.each( [
"tabIndex",
"readOnly",
"maxLength",
"cellSpacing",
"cellPadding",
"rowSpan",
"colSpan",
"useMap",
"frameBorder",
"contentEditable"
], function() {
jQuery.propFix[ this.toLowerCase() ] = this;
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/attributes/prop.js
|
prop.js
|
define( [
"../core",
"../core/access",
"../core/nodeName",
"./support",
"../var/rnothtmlwhite",
"../selector"
], function( jQuery, access, nodeName, support, rnothtmlwhite ) {
"use strict";
var boolHook,
attrHandle = jQuery.expr.attrHandle;
jQuery.fn.extend( {
attr: function( name, value ) {
return access( this, jQuery.attr, name, value, arguments.length > 1 );
},
removeAttr: function( name ) {
return this.each( function() {
jQuery.removeAttr( this, name );
} );
}
} );
jQuery.extend( {
attr: function( elem, name, value ) {
var ret, hooks,
nType = elem.nodeType;
// Don't get/set attributes on text, comment and attribute nodes
if ( nType === 3 || nType === 8 || nType === 2 ) {
return;
}
// Fallback to prop when attributes are not supported
if ( typeof elem.getAttribute === "undefined" ) {
return jQuery.prop( elem, name, value );
}
// Attribute hooks are determined by the lowercase version
// Grab necessary hook if one is defined
if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
hooks = jQuery.attrHooks[ name.toLowerCase() ] ||
( jQuery.expr.match.bool.test( name ) ? boolHook : undefined );
}
if ( value !== undefined ) {
if ( value === null ) {
jQuery.removeAttr( elem, name );
return;
}
if ( hooks && "set" in hooks &&
( ret = hooks.set( elem, value, name ) ) !== undefined ) {
return ret;
}
elem.setAttribute( name, value + "" );
return value;
}
if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
return ret;
}
ret = jQuery.find.attr( elem, name );
// Non-existent attributes return null, we normalize to undefined
return ret == null ? undefined : ret;
},
attrHooks: {
type: {
set: function( elem, value ) {
if ( !support.radioValue && value === "radio" &&
nodeName( elem, "input" ) ) {
var val = elem.value;
elem.setAttribute( "type", value );
if ( val ) {
elem.value = val;
}
return value;
}
}
}
},
removeAttr: function( elem, value ) {
var name,
i = 0,
// Attribute names can contain non-HTML whitespace characters
// https://html.spec.whatwg.org/multipage/syntax.html#attributes-2
attrNames = value && value.match( rnothtmlwhite );
if ( attrNames && elem.nodeType === 1 ) {
while ( ( name = attrNames[ i++ ] ) ) {
elem.removeAttribute( name );
}
}
}
} );
// Hooks for boolean attributes
boolHook = {
set: function( elem, value, name ) {
if ( value === false ) {
// Remove boolean attributes when set to false
jQuery.removeAttr( elem, name );
} else {
elem.setAttribute( name, name );
}
return name;
}
};
jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) {
var getter = attrHandle[ name ] || jQuery.find.attr;
attrHandle[ name ] = function( elem, name, isXML ) {
var ret, handle,
lowercaseName = name.toLowerCase();
if ( !isXML ) {
// Avoid an infinite loop by temporarily removing this function from the getter
handle = attrHandle[ lowercaseName ];
attrHandle[ lowercaseName ] = ret;
ret = getter( elem, name, isXML ) != null ?
lowercaseName :
null;
attrHandle[ lowercaseName ] = handle;
}
return ret;
};
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/attributes/attr.js
|
attr.js
|
define( [
"../core",
"../core/stripAndCollapse",
"../var/rnothtmlwhite",
"../data/var/dataPriv",
"../core/init"
], function( jQuery, stripAndCollapse, rnothtmlwhite, dataPriv ) {
"use strict";
function getClass( elem ) {
return elem.getAttribute && elem.getAttribute( "class" ) || "";
}
jQuery.fn.extend( {
addClass: function( value ) {
var classes, elem, cur, curValue, clazz, j, finalValue,
i = 0;
if ( jQuery.isFunction( value ) ) {
return this.each( function( j ) {
jQuery( this ).addClass( value.call( this, j, getClass( this ) ) );
} );
}
if ( typeof value === "string" && value ) {
classes = value.match( rnothtmlwhite ) || [];
while ( ( elem = this[ i++ ] ) ) {
curValue = getClass( elem );
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
if ( cur ) {
j = 0;
while ( ( clazz = classes[ j++ ] ) ) {
if ( cur.indexOf( " " + clazz + " " ) < 0 ) {
cur += clazz + " ";
}
}
// Only assign if different to avoid unneeded rendering.
finalValue = stripAndCollapse( cur );
if ( curValue !== finalValue ) {
elem.setAttribute( "class", finalValue );
}
}
}
}
return this;
},
removeClass: function( value ) {
var classes, elem, cur, curValue, clazz, j, finalValue,
i = 0;
if ( jQuery.isFunction( value ) ) {
return this.each( function( j ) {
jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) );
} );
}
if ( !arguments.length ) {
return this.attr( "class", "" );
}
if ( typeof value === "string" && value ) {
classes = value.match( rnothtmlwhite ) || [];
while ( ( elem = this[ i++ ] ) ) {
curValue = getClass( elem );
// This expression is here for better compressibility (see addClass)
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
if ( cur ) {
j = 0;
while ( ( clazz = classes[ j++ ] ) ) {
// Remove *all* instances
while ( cur.indexOf( " " + clazz + " " ) > -1 ) {
cur = cur.replace( " " + clazz + " ", " " );
}
}
// Only assign if different to avoid unneeded rendering.
finalValue = stripAndCollapse( cur );
if ( curValue !== finalValue ) {
elem.setAttribute( "class", finalValue );
}
}
}
}
return this;
},
toggleClass: function( value, stateVal ) {
var type = typeof value;
if ( typeof stateVal === "boolean" && type === "string" ) {
return stateVal ? this.addClass( value ) : this.removeClass( value );
}
if ( jQuery.isFunction( value ) ) {
return this.each( function( i ) {
jQuery( this ).toggleClass(
value.call( this, i, getClass( this ), stateVal ),
stateVal
);
} );
}
return this.each( function() {
var className, i, self, classNames;
if ( type === "string" ) {
// Toggle individual class names
i = 0;
self = jQuery( this );
classNames = value.match( rnothtmlwhite ) || [];
while ( ( className = classNames[ i++ ] ) ) {
// Check each className given, space separated list
if ( self.hasClass( className ) ) {
self.removeClass( className );
} else {
self.addClass( className );
}
}
// Toggle whole class name
} else if ( value === undefined || type === "boolean" ) {
className = getClass( this );
if ( className ) {
// Store className if set
dataPriv.set( this, "__className__", className );
}
// If the element has a class name or if we're passed `false`,
// then remove the whole classname (if there was one, the above saved it).
// Otherwise bring back whatever was previously saved (if anything),
// falling back to the empty string if nothing was stored.
if ( this.setAttribute ) {
this.setAttribute( "class",
className || value === false ?
"" :
dataPriv.get( this, "__className__" ) || ""
);
}
}
} );
},
hasClass: function( selector ) {
var className, elem,
i = 0;
className = " " + selector + " ";
while ( ( elem = this[ i++ ] ) ) {
if ( elem.nodeType === 1 &&
( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) {
return true;
}
}
return false;
}
} );
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/attributes/classes.js
|
classes.js
|
define( [
"../core",
"../var/document"
], function( jQuery, document ) {
"use strict";
var readyCallbacks = [],
whenReady = function( fn ) {
readyCallbacks.push( fn );
},
executeReady = function( fn ) {
// Prevent errors from freezing future callback execution (gh-1823)
// Not backwards-compatible as this does not execute sync
window.setTimeout( function() {
fn.call( document, jQuery );
} );
};
jQuery.fn.ready = function( fn ) {
whenReady( fn );
return this;
};
jQuery.extend( {
// Is the DOM ready to be used? Set to true once it occurs.
isReady: false,
// A counter to track how many items to wait for before
// the ready event fires. See #6781
readyWait: 1,
ready: function( wait ) {
// Abort if there are pending holds or we're already ready
if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
return;
}
// Remember that the DOM is ready
jQuery.isReady = true;
// If a normal DOM Ready event fired, decrement, and wait if need be
if ( wait !== true && --jQuery.readyWait > 0 ) {
return;
}
whenReady = function( fn ) {
readyCallbacks.push( fn );
while ( readyCallbacks.length ) {
fn = readyCallbacks.shift();
if ( jQuery.isFunction( fn ) ) {
executeReady( fn );
}
}
};
whenReady();
}
} );
// Make jQuery.ready Promise consumable (gh-1778)
jQuery.ready.then = jQuery.fn.ready;
/**
* The ready event handler and self cleanup method
*/
function completed() {
document.removeEventListener( "DOMContentLoaded", completed );
window.removeEventListener( "load", completed );
jQuery.ready();
}
// Catch cases where $(document).ready() is called
// after the browser event has already occurred.
// Support: IE9-10 only
// Older IE sometimes signals "interactive" too soon
if ( document.readyState === "complete" ||
( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
// Handle it asynchronously to allow scripts the opportunity to delay ready
window.setTimeout( jQuery.ready );
} else {
// Use the handy event callback
document.addEventListener( "DOMContentLoaded", completed );
// A fallback to window.onload, that will always work
window.addEventListener( "load", completed );
}
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/core/ready-no-deferred.js
|
ready-no-deferred.js
|
define( [
"../core",
"../var/document",
"../core/readyException",
"../deferred"
], function( jQuery, document ) {
"use strict";
// The deferred used on DOM ready
var readyList = jQuery.Deferred();
jQuery.fn.ready = function( fn ) {
readyList
.then( fn )
// Wrap jQuery.readyException in a function so that the lookup
// happens at the time of error handling instead of callback
// registration.
.catch( function( error ) {
jQuery.readyException( error );
} );
return this;
};
jQuery.extend( {
// Is the DOM ready to be used? Set to true once it occurs.
isReady: false,
// A counter to track how many items to wait for before
// the ready event fires. See #6781
readyWait: 1,
// Handle when the DOM is ready
ready: function( wait ) {
// Abort if there are pending holds or we're already ready
if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
return;
}
// Remember that the DOM is ready
jQuery.isReady = true;
// If a normal DOM Ready event fired, decrement, and wait if need be
if ( wait !== true && --jQuery.readyWait > 0 ) {
return;
}
// If there are functions bound, to execute
readyList.resolveWith( document, [ jQuery ] );
}
} );
jQuery.ready.then = readyList.then;
// The ready event handler and self cleanup method
function completed() {
document.removeEventListener( "DOMContentLoaded", completed );
window.removeEventListener( "load", completed );
jQuery.ready();
}
// Catch cases where $(document).ready() is called
// after the browser event has already occurred.
// Support: IE <=9 - 10 only
// Older IE sometimes signals "interactive" too soon
if ( document.readyState === "complete" ||
( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
// Handle it asynchronously to allow scripts the opportunity to delay ready
window.setTimeout( jQuery.ready );
} else {
// Use the handy event callback
document.addEventListener( "DOMContentLoaded", completed );
// A fallback to window.onload, that will always work
window.addEventListener( "load", completed );
}
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/core/ready.js
|
ready.js
|
define( [
"../core",
"../var/document",
"./var/rsingleTag",
"../traversing/findFilter"
], function( jQuery, document, rsingleTag ) {
"use strict";
// A central reference to the root jQuery(document)
var rootjQuery,
// A simple way to check for HTML strings
// Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
// Strict HTML recognition (#11290: must start with <)
// Shortcut simple #id case for speed
rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
init = jQuery.fn.init = function( selector, context, root ) {
var match, elem;
// HANDLE: $(""), $(null), $(undefined), $(false)
if ( !selector ) {
return this;
}
// Method init() accepts an alternate rootjQuery
// so migrate can support jQuery.sub (gh-2101)
root = root || rootjQuery;
// Handle HTML strings
if ( typeof selector === "string" ) {
if ( selector[ 0 ] === "<" &&
selector[ selector.length - 1 ] === ">" &&
selector.length >= 3 ) {
// Assume that strings that start and end with <> are HTML and skip the regex check
match = [ null, selector, null ];
} else {
match = rquickExpr.exec( selector );
}
// Match html or make sure no context is specified for #id
if ( match && ( match[ 1 ] || !context ) ) {
// HANDLE: $(html) -> $(array)
if ( match[ 1 ] ) {
context = context instanceof jQuery ? context[ 0 ] : context;
// Option to run scripts is true for back-compat
// Intentionally let the error be thrown if parseHTML is not present
jQuery.merge( this, jQuery.parseHTML(
match[ 1 ],
context && context.nodeType ? context.ownerDocument || context : document,
true
) );
// HANDLE: $(html, props)
if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) {
for ( match in context ) {
// Properties of context are called as methods if possible
if ( jQuery.isFunction( this[ match ] ) ) {
this[ match ]( context[ match ] );
// ...and otherwise set as attributes
} else {
this.attr( match, context[ match ] );
}
}
}
return this;
// HANDLE: $(#id)
} else {
elem = document.getElementById( match[ 2 ] );
if ( elem ) {
// Inject the element directly into the jQuery object
this[ 0 ] = elem;
this.length = 1;
}
return this;
}
// HANDLE: $(expr, $(...))
} else if ( !context || context.jquery ) {
return ( context || root ).find( selector );
// HANDLE: $(expr, context)
// (which is just equivalent to: $(context).find(expr)
} else {
return this.constructor( context ).find( selector );
}
// HANDLE: $(DOMElement)
} else if ( selector.nodeType ) {
this[ 0 ] = selector;
this.length = 1;
return this;
// HANDLE: $(function)
// Shortcut for document ready
} else if ( jQuery.isFunction( selector ) ) {
return root.ready !== undefined ?
root.ready( selector ) :
// Execute immediately if ready is not present
selector( jQuery );
}
return jQuery.makeArray( selector, this );
};
// Give the init function the jQuery prototype for later instantiation
init.prototype = jQuery.fn;
// Initialize central reference
rootjQuery = jQuery( document );
return init;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/core/init.js
|
init.js
|
define( [
"../core",
"../css"
], function( jQuery ) {
"use strict";
function Tween( elem, options, prop, end, easing ) {
return new Tween.prototype.init( elem, options, prop, end, easing );
}
jQuery.Tween = Tween;
Tween.prototype = {
constructor: Tween,
init: function( elem, options, prop, end, easing, unit ) {
this.elem = elem;
this.prop = prop;
this.easing = easing || jQuery.easing._default;
this.options = options;
this.start = this.now = this.cur();
this.end = end;
this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
},
cur: function() {
var hooks = Tween.propHooks[ this.prop ];
return hooks && hooks.get ?
hooks.get( this ) :
Tween.propHooks._default.get( this );
},
run: function( percent ) {
var eased,
hooks = Tween.propHooks[ this.prop ];
if ( this.options.duration ) {
this.pos = eased = jQuery.easing[ this.easing ](
percent, this.options.duration * percent, 0, 1, this.options.duration
);
} else {
this.pos = eased = percent;
}
this.now = ( this.end - this.start ) * eased + this.start;
if ( this.options.step ) {
this.options.step.call( this.elem, this.now, this );
}
if ( hooks && hooks.set ) {
hooks.set( this );
} else {
Tween.propHooks._default.set( this );
}
return this;
}
};
Tween.prototype.init.prototype = Tween.prototype;
Tween.propHooks = {
_default: {
get: function( tween ) {
var result;
// Use a property on the element directly when it is not a DOM element,
// or when there is no matching style property that exists.
if ( tween.elem.nodeType !== 1 ||
tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) {
return tween.elem[ tween.prop ];
}
// Passing an empty string as a 3rd parameter to .css will automatically
// attempt a parseFloat and fallback to a string if the parse fails.
// Simple values such as "10px" are parsed to Float;
// complex values such as "rotate(1rad)" are returned as-is.
result = jQuery.css( tween.elem, tween.prop, "" );
// Empty strings, null, undefined and "auto" are converted to 0.
return !result || result === "auto" ? 0 : result;
},
set: function( tween ) {
// Use step hook for back compat.
// Use cssHook if its there.
// Use .style if available and use plain properties where available.
if ( jQuery.fx.step[ tween.prop ] ) {
jQuery.fx.step[ tween.prop ]( tween );
} else if ( tween.elem.nodeType === 1 &&
( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null ||
jQuery.cssHooks[ tween.prop ] ) ) {
jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
} else {
tween.elem[ tween.prop ] = tween.now;
}
}
}
};
// Support: IE <=9 only
// Panic based approach to setting things on disconnected nodes
Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
set: function( tween ) {
if ( tween.elem.nodeType && tween.elem.parentNode ) {
tween.elem[ tween.prop ] = tween.now;
}
}
};
jQuery.easing = {
linear: function( p ) {
return p;
},
swing: function( p ) {
return 0.5 - Math.cos( p * Math.PI ) / 2;
},
_default: "swing"
};
jQuery.fx = Tween.prototype.init;
// Back compat <1.8 extension point
jQuery.fx.step = {};
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/effects/Tween.js
|
Tween.js
|
define( [
"../core",
"../var/rnothtmlwhite",
"./var/acceptData"
], function( jQuery, rnothtmlwhite, acceptData ) {
"use strict";
function Data() {
this.expando = jQuery.expando + Data.uid++;
}
Data.uid = 1;
Data.prototype = {
cache: function( owner ) {
// Check if the owner object already has a cache
var value = owner[ this.expando ];
// If not, create one
if ( !value ) {
value = {};
// We can accept data for non-element nodes in modern browsers,
// but we should not, see #8335.
// Always return an empty object.
if ( acceptData( owner ) ) {
// If it is a node unlikely to be stringify-ed or looped over
// use plain assignment
if ( owner.nodeType ) {
owner[ this.expando ] = value;
// Otherwise secure it in a non-enumerable property
// configurable must be true to allow the property to be
// deleted when data is removed
} else {
Object.defineProperty( owner, this.expando, {
value: value,
configurable: true
} );
}
}
}
return value;
},
set: function( owner, data, value ) {
var prop,
cache = this.cache( owner );
// Handle: [ owner, key, value ] args
// Always use camelCase key (gh-2257)
if ( typeof data === "string" ) {
cache[ jQuery.camelCase( data ) ] = value;
// Handle: [ owner, { properties } ] args
} else {
// Copy the properties one-by-one to the cache object
for ( prop in data ) {
cache[ jQuery.camelCase( prop ) ] = data[ prop ];
}
}
return cache;
},
get: function( owner, key ) {
return key === undefined ?
this.cache( owner ) :
// Always use camelCase key (gh-2257)
owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ];
},
access: function( owner, key, value ) {
// In cases where either:
//
// 1. No key was specified
// 2. A string key was specified, but no value provided
//
// Take the "read" path and allow the get method to determine
// which value to return, respectively either:
//
// 1. The entire cache object
// 2. The data stored at the key
//
if ( key === undefined ||
( ( key && typeof key === "string" ) && value === undefined ) ) {
return this.get( owner, key );
}
// When the key is not a string, or both a key and value
// are specified, set or extend (existing objects) with either:
//
// 1. An object of properties
// 2. A key and value
//
this.set( owner, key, value );
// Since the "set" path can have two possible entry points
// return the expected data based on which path was taken[*]
return value !== undefined ? value : key;
},
remove: function( owner, key ) {
var i,
cache = owner[ this.expando ];
if ( cache === undefined ) {
return;
}
if ( key !== undefined ) {
// Support array or space separated string of keys
if ( Array.isArray( key ) ) {
// If key is an array of keys...
// We always set camelCase keys, so remove that.
key = key.map( jQuery.camelCase );
} else {
key = jQuery.camelCase( key );
// If a key with the spaces exists, use it.
// Otherwise, create an array by matching non-whitespace
key = key in cache ?
[ key ] :
( key.match( rnothtmlwhite ) || [] );
}
i = key.length;
while ( i-- ) {
delete cache[ key[ i ] ];
}
}
// Remove the expando if there's no more data
if ( key === undefined || jQuery.isEmptyObject( cache ) ) {
// Support: Chrome <=35 - 45
// Webkit & Blink performance suffers when deleting properties
// from DOM nodes, so set to undefined instead
// https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted)
if ( owner.nodeType ) {
owner[ this.expando ] = undefined;
} else {
delete owner[ this.expando ];
}
}
},
hasData: function( owner ) {
var cache = owner[ this.expando ];
return cache !== undefined && !jQuery.isEmptyObject( cache );
}
};
return Data;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/src/data/Data.js
|
Data.js
|
define( [
"./var/arr",
"./var/document",
"./var/getProto",
"./var/slice",
"./var/concat",
"./var/push",
"./var/indexOf",
"./var/class2type",
"./var/toString",
"./var/hasOwn",
"./var/fnToString",
"./var/ObjectFunctionString",
"./var/support",
"./core/DOMEval"
], function( arr, document, getProto, slice, concat, push, indexOf,
class2type, toString, hasOwn, fnToString, ObjectFunctionString,
support, DOMEval ) {
"use strict";
var
version = "3.2.1",
// Define a local copy of jQuery
jQuery = function( selector, context ) {
// The jQuery object is actually just the init constructor 'enhanced'
// Need init if jQuery is called (just allow error to be thrown if not included)
return new jQuery.fn.init( selector, context );
},
// Support: Android <=4.0 only
// Make sure we trim BOM and NBSP
rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
// Matches dashed string for camelizing
rmsPrefix = /^-ms-/,
rdashAlpha = /-([a-z])/g,
// Used by jQuery.camelCase as callback to replace()
fcamelCase = function( all, letter ) {
return letter.toUpperCase();
};
jQuery.fn = jQuery.prototype = {
// The current version of jQuery being used
jquery: version,
constructor: jQuery,
// The default length of a jQuery object is 0
length: 0,
toArray: function() {
return slice.call( this );
},
// Get the Nth element in the matched element set OR
// Get the whole matched element set as a clean array
get: function( num ) {
// Return all the elements in a clean array
if ( num == null ) {
return slice.call( this );
}
// Return just the one element from the set
return num < 0 ? this[ num + this.length ] : this[ num ];
},
// Take an array of elements and push it onto the stack
// (returning the new matched element set)
pushStack: function( elems ) {
// Build a new jQuery matched element set
var ret = jQuery.merge( this.constructor(), elems );
// Add the old object onto the stack (as a reference)
ret.prevObject = this;
// Return the newly-formed element set
return ret;
},
// Execute a callback for every element in the matched set.
each: function( callback ) {
return jQuery.each( this, callback );
},
map: function( callback ) {
return this.pushStack( jQuery.map( this, function( elem, i ) {
return callback.call( elem, i, elem );
} ) );
},
slice: function() {
return this.pushStack( slice.apply( this, arguments ) );
},
first: function() {
return this.eq( 0 );
},
last: function() {
return this.eq( -1 );
},
eq: function( i ) {
var len = this.length,
j = +i + ( i < 0 ? len : 0 );
return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
},
end: function() {
return this.prevObject || this.constructor();
},
// For internal use only.
// Behaves like an Array's method, not like a jQuery method.
push: push,
sort: arr.sort,
splice: arr.splice
};
jQuery.extend = jQuery.fn.extend = function() {
var options, name, src, copy, copyIsArray, clone,
target = arguments[ 0 ] || {},
i = 1,
length = arguments.length,
deep = false;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
// Skip the boolean and the target
target = arguments[ i ] || {};
i++;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction( target ) ) {
target = {};
}
// Extend jQuery itself if only one argument is passed
if ( i === length ) {
target = this;
i--;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( ( options = arguments[ i ] ) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging plain objects or arrays
if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
( copyIsArray = Array.isArray( copy ) ) ) ) {
if ( copyIsArray ) {
copyIsArray = false;
clone = src && Array.isArray( src ) ? src : [];
} else {
clone = src && jQuery.isPlainObject( src ) ? src : {};
}
// Never move original objects, clone them
target[ name ] = jQuery.extend( deep, clone, copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
jQuery.extend( {
// Unique for each copy of jQuery on the page
expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
// Assume jQuery is ready without the ready module
isReady: true,
error: function( msg ) {
throw new Error( msg );
},
noop: function() {},
isFunction: function( obj ) {
return jQuery.type( obj ) === "function";
},
isWindow: function( obj ) {
return obj != null && obj === obj.window;
},
isNumeric: function( obj ) {
// As of jQuery 3.0, isNumeric is limited to
// strings and numbers (primitives or objects)
// that can be coerced to finite numbers (gh-2662)
var type = jQuery.type( obj );
return ( type === "number" || type === "string" ) &&
// parseFloat NaNs numeric-cast false positives ("")
// ...but misinterprets leading-number strings, particularly hex literals ("0x...")
// subtraction forces infinities to NaN
!isNaN( obj - parseFloat( obj ) );
},
isPlainObject: function( obj ) {
var proto, Ctor;
// Detect obvious negatives
// Use toString instead of jQuery.type to catch host objects
if ( !obj || toString.call( obj ) !== "[object Object]" ) {
return false;
}
proto = getProto( obj );
// Objects with no prototype (e.g., `Object.create( null )`) are plain
if ( !proto ) {
return true;
}
// Objects with prototype are plain iff they were constructed by a global Object function
Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
},
isEmptyObject: function( obj ) {
/* eslint-disable no-unused-vars */
// See https://github.com/eslint/eslint/issues/6125
var name;
for ( name in obj ) {
return false;
}
return true;
},
type: function( obj ) {
if ( obj == null ) {
return obj + "";
}
// Support: Android <=2.3 only (functionish RegExp)
return typeof obj === "object" || typeof obj === "function" ?
class2type[ toString.call( obj ) ] || "object" :
typeof obj;
},
// Evaluates a script in a global context
globalEval: function( code ) {
DOMEval( code );
},
// Convert dashed to camelCase; used by the css and data modules
// Support: IE <=9 - 11, Edge 12 - 13
// Microsoft forgot to hump their vendor prefix (#9572)
camelCase: function( string ) {
return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
},
each: function( obj, callback ) {
var length, i = 0;
if ( isArrayLike( obj ) ) {
length = obj.length;
for ( ; i < length; i++ ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
} else {
for ( i in obj ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
}
return obj;
},
// Support: Android <=4.0 only
trim: function( text ) {
return text == null ?
"" :
( text + "" ).replace( rtrim, "" );
},
// results is for internal usage only
makeArray: function( arr, results ) {
var ret = results || [];
if ( arr != null ) {
if ( isArrayLike( Object( arr ) ) ) {
jQuery.merge( ret,
typeof arr === "string" ?
[ arr ] : arr
);
} else {
push.call( ret, arr );
}
}
return ret;
},
inArray: function( elem, arr, i ) {
return arr == null ? -1 : indexOf.call( arr, elem, i );
},
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
merge: function( first, second ) {
var len = +second.length,
j = 0,
i = first.length;
for ( ; j < len; j++ ) {
first[ i++ ] = second[ j ];
}
first.length = i;
return first;
},
grep: function( elems, callback, invert ) {
var callbackInverse,
matches = [],
i = 0,
length = elems.length,
callbackExpect = !invert;
// Go through the array, only saving the items
// that pass the validator function
for ( ; i < length; i++ ) {
callbackInverse = !callback( elems[ i ], i );
if ( callbackInverse !== callbackExpect ) {
matches.push( elems[ i ] );
}
}
return matches;
},
// arg is for internal usage only
map: function( elems, callback, arg ) {
var length, value,
i = 0,
ret = [];
// Go through the array, translating each of the items to their new values
if ( isArrayLike( elems ) ) {
length = elems.length;
for ( ; i < length; i++ ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
// Go through every key on the object,
} else {
for ( i in elems ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
}
// Flatten any nested arrays
return concat.apply( [], ret );
},
// A global GUID counter for objects
guid: 1,
// Bind a function to a context, optionally partially applying any
// arguments.
proxy: function( fn, context ) {
var tmp, args, proxy;
if ( typeof context === "string" ) {
tmp = fn[ context ];
context = fn;
fn = tmp;
}
// Quick check to determine if target is callable, in the spec
// this throws a TypeError, but we will just return undefined.
if ( !jQuery.isFunction( fn ) ) {
return undefined;
}
// Simulated bind
args = slice.call( arguments, 2 );
proxy = function() {
return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
};
// Set the guid of unique handler to the same of original handler, so it can be removed
proxy.guid = fn.guid = fn.guid || jQuery.guid++;
return proxy;
},
now: Date.now,
// jQuery.support is not used in Core but other projects attach their
// properties to it so it needs to exist.
support: support
} );
if ( typeof Symbol === "function" ) {
jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
}
// Populate the class2type map
jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
function( i, name ) {
class2type[ "[object " + name + "]" ] = name.toLowerCase();
} );
function isArrayLike( obj ) {
// Support: real iOS 8.2 only (not reproducible in simulator)
// `in` check used to prevent JIT error (gh-2145)
// hasOwn isn't used here due to false negatives
// regarding Nodelist length in IE
var length = !!obj && "length" in obj && obj.length,
type = jQuery.type( obj );
if ( type === "function" || jQuery.isWindow( obj ) ) {
return false;
}
return type === "array" || length === 0 ||
typeof length === "number" && length > 0 && ( length - 1 ) in obj;
}
return jQuery;
} );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/dist/core.js
|
core.js
|
!function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",M="\\["+K+"*("+L+")(?:"+K+"*([*^$|!~]?=)"+K+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+L+"))|)"+K+"*\\]",N=":("+L+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+M+")*)|.*)\\)|)",O=new RegExp(K+"+","g"),P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0&&("form"in a||"label"in a)},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"form"in b?b.parentNode&&b.disabled===!1?"label"in b?"label"in b.parentNode?b.parentNode.disabled===a:b.disabled===a:b.isDisabled===a||b.isDisabled!==!a&&ea(b)===a:b.disabled===a:"label"in b&&b.disabled===a}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}}):(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c,d,e,f=b.getElementById(a);if(f){if(c=f.getAttributeNode("id"),c&&c.value===a)return[f];e=b.getElementsByName(a),d=0;while(f=e[d++])if(c=f.getAttributeNode("id"),c&&c.value===a)return[f]}return[]}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c<b;c+=2)a.push(c);return a}),odd:pa(function(a,b){for(var c=1;c<b;c+=2)a.push(c);return a}),lt:pa(function(a,b,c){for(var d=c<0?c+b:c;--d>=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=ma(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=na(b);function ra(){}ra.prototype=d.filters=d.pseudos,d.setFilters=new ra,g=ga.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=Q.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=R.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(P," ")}),h=h.slice(c.length));for(g in d.filter)!(e=V[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?ga.error(a):z(a,i).slice(0)};function sa(a){for(var b=0,c=a.length,d="";b<c;b++)d+=a[b].value;return d}function ta(a,b,c){var d=b.dir,e=b.next,f=e||d,g=c&&"parentNode"===f,h=x++;return b.first?function(b,c,e){while(b=b[d])if(1===b.nodeType||g)return a(b,c,e);return!1}:function(b,c,i){var j,k,l,m=[w,h];if(i){while(b=b[d])if((1===b.nodeType||g)&&a(b,c,i))return!0}else while(b=b[d])if(1===b.nodeType||g)if(l=b[u]||(b[u]={}),k=l[b.uniqueID]||(l[b.uniqueID]={}),e&&e===b.nodeName.toLowerCase())b=b[d]||b;else{if((j=k[f])&&j[0]===w&&j[1]===h)return m[2]=j[2];if(k[f]=m,m[2]=a(b,c,i))return!0}return!1}}function ua(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d<e;d++)ga(a,b[d],c);return c}function wa(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;h<i;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function xa(a,b,c,d,e,f){return d&&!d[u]&&(d=xa(d)),e&&!e[u]&&(e=xa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||va(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:wa(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=wa(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i<f;i++)if(c=d.relative[a[i].type])m=[ta(ua(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;e<f;e++)if(d.relative[a[e].type])break;return xa(i>1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i<e&&ya(a.slice(i,e)),e<f&&ya(a=a.slice(e)),e<f&&sa(a))}m.push(c)}return ua(m)}function za(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,c,e){var f,i,j,k,l,m="function"==typeof a&&a,n=!e&&g(a=m.selector||a);if(c=c||[],1===n.length){if(i=n[0]=n[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&9===b.nodeType&&p&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(_,aa),b)||[])[0],!b)return c;m&&(b=b.parentNode),a=a.slice(i.shift().value.length)}f=V.needsContext.test(a)?0:i.length;while(f--){if(j=i[f],d.relative[k=j.type])break;if((l=d.find[k])&&(e=l(j.matches[0].replace(_,aa),$.test(i[0].type)&&qa(b.parentNode)||b))){if(i.splice(f,1),a=e.length&&sa(i),!a)return G.apply(c,e),c;break}}}return(m||h(a,n))(e,b,!p,c,!b||$.test(a)&&qa(b.parentNode)||b),c},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null});var Aa=a.Sizzle;ga.noConflict=function(){return a.Sizzle===ga&&(a.Sizzle=Aa),ga},"function"==typeof define&&define.amd?define(function(){return ga}):"undefined"!=typeof module&&module.exports?module.exports=ga:a.Sizzle=ga}(window);
//# sourceMappingURL=sizzle.min.map
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/external/sizzle/dist/sizzle.min.js
|
sizzle.min.js
|
(function( window ) {
var i,
support,
Expr,
getText,
isXML,
tokenize,
compile,
select,
outermostContext,
sortInput,
hasDuplicate,
// Local document vars
setDocument,
document,
docElem,
documentIsHTML,
rbuggyQSA,
rbuggyMatches,
matches,
contains,
// Instance-specific data
expando = "sizzle" + 1 * new Date(),
preferredDoc = window.document,
dirruns = 0,
done = 0,
classCache = createCache(),
tokenCache = createCache(),
compilerCache = createCache(),
sortOrder = function( a, b ) {
if ( a === b ) {
hasDuplicate = true;
}
return 0;
},
// Instance methods
hasOwn = ({}).hasOwnProperty,
arr = [],
pop = arr.pop,
push_native = arr.push,
push = arr.push,
slice = arr.slice,
// Use a stripped-down indexOf as it's faster than native
// https://jsperf.com/thor-indexof-vs-for/5
indexOf = function( list, elem ) {
var i = 0,
len = list.length;
for ( ; i < len; i++ ) {
if ( list[i] === elem ) {
return i;
}
}
return -1;
},
booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",
// Regular expressions
// http://www.w3.org/TR/css3-selectors/#whitespace
whitespace = "[\\x20\\t\\r\\n\\f]",
// http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+",
// Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors
attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace +
// Operator (capture 2)
"*([*^$|!~]?=)" + whitespace +
// "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]"
"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace +
"*\\]",
pseudos = ":(" + identifier + ")(?:\\((" +
// To reduce the number of selectors needing tokenize in the preFilter, prefer arguments:
// 1. quoted (capture 3; capture 4 or capture 5)
"('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" +
// 2. simple (capture 6)
"((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" +
// 3. anything else (capture 2)
".*" +
")\\)|)",
// Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
rwhitespace = new RegExp( whitespace + "+", "g" ),
rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ),
rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ),
rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ),
rpseudo = new RegExp( pseudos ),
ridentifier = new RegExp( "^" + identifier + "$" ),
matchExpr = {
"ID": new RegExp( "^#(" + identifier + ")" ),
"CLASS": new RegExp( "^\\.(" + identifier + ")" ),
"TAG": new RegExp( "^(" + identifier + "|[*])" ),
"ATTR": new RegExp( "^" + attributes ),
"PSEUDO": new RegExp( "^" + pseudos ),
"CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace +
"*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace +
"*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
"bool": new RegExp( "^(?:" + booleans + ")$", "i" ),
// For use in libraries implementing .is()
// We use this for POS matching in `select`
"needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" +
whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" )
},
rinputs = /^(?:input|select|textarea|button)$/i,
rheader = /^h\d$/i,
rnative = /^[^{]+\{\s*\[native \w/,
// Easily-parseable/retrievable ID or TAG or CLASS selectors
rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,
rsibling = /[+~]/,
// CSS escapes
// http://www.w3.org/TR/CSS21/syndata.html#escaped-characters
runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ),
funescape = function( _, escaped, escapedWhitespace ) {
var high = "0x" + escaped - 0x10000;
// NaN means non-codepoint
// Support: Firefox<24
// Workaround erroneous numeric interpretation of +"0x"
return high !== high || escapedWhitespace ?
escaped :
high < 0 ?
// BMP codepoint
String.fromCharCode( high + 0x10000 ) :
// Supplemental Plane codepoint (surrogate pair)
String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 );
},
// CSS string/identifier serialization
// https://drafts.csswg.org/cssom/#common-serializing-idioms
rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,
fcssescape = function( ch, asCodePoint ) {
if ( asCodePoint ) {
// U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
if ( ch === "\0" ) {
return "\uFFFD";
}
// Control characters and (dependent upon position) numbers get escaped as code points
return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
}
// Other potentially-special ASCII characters get backslash-escaped
return "\\" + ch;
},
// Used for iframes
// See setDocument()
// Removing the function wrapper causes a "Permission Denied"
// error in IE
unloadHandler = function() {
setDocument();
},
disabledAncestor = addCombinator(
function( elem ) {
return elem.disabled === true && ("form" in elem || "label" in elem);
},
{ dir: "parentNode", next: "legend" }
);
// Optimize for push.apply( _, NodeList )
try {
push.apply(
(arr = slice.call( preferredDoc.childNodes )),
preferredDoc.childNodes
);
// Support: Android<4.0
// Detect silently failing push.apply
arr[ preferredDoc.childNodes.length ].nodeType;
} catch ( e ) {
push = { apply: arr.length ?
// Leverage slice if possible
function( target, els ) {
push_native.apply( target, slice.call(els) );
} :
// Support: IE<9
// Otherwise append directly
function( target, els ) {
var j = target.length,
i = 0;
// Can't trust NodeList.length
while ( (target[j++] = els[i++]) ) {}
target.length = j - 1;
}
};
}
function Sizzle( selector, context, results, seed ) {
var m, i, elem, nid, match, groups, newSelector,
newContext = context && context.ownerDocument,
// nodeType defaults to 9, since context defaults to document
nodeType = context ? context.nodeType : 9;
results = results || [];
// Return early from calls with invalid selector or context
if ( typeof selector !== "string" || !selector ||
nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) {
return results;
}
// Try to shortcut find operations (as opposed to filters) in HTML documents
if ( !seed ) {
if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) {
setDocument( context );
}
context = context || document;
if ( documentIsHTML ) {
// If the selector is sufficiently simple, try using a "get*By*" DOM method
// (excepting DocumentFragment context, where the methods don't exist)
if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) {
// ID selector
if ( (m = match[1]) ) {
// Document context
if ( nodeType === 9 ) {
if ( (elem = context.getElementById( m )) ) {
// Support: IE, Opera, Webkit
// TODO: identify versions
// getElementById can match elements by name instead of ID
if ( elem.id === m ) {
results.push( elem );
return results;
}
} else {
return results;
}
// Element context
} else {
// Support: IE, Opera, Webkit
// TODO: identify versions
// getElementById can match elements by name instead of ID
if ( newContext && (elem = newContext.getElementById( m )) &&
contains( context, elem ) &&
elem.id === m ) {
results.push( elem );
return results;
}
}
// Type selector
} else if ( match[2] ) {
push.apply( results, context.getElementsByTagName( selector ) );
return results;
// Class selector
} else if ( (m = match[3]) && support.getElementsByClassName &&
context.getElementsByClassName ) {
push.apply( results, context.getElementsByClassName( m ) );
return results;
}
}
// Take advantage of querySelectorAll
if ( support.qsa &&
!compilerCache[ selector + " " ] &&
(!rbuggyQSA || !rbuggyQSA.test( selector )) ) {
if ( nodeType !== 1 ) {
newContext = context;
newSelector = selector;
// qSA looks outside Element context, which is not what we want
// Thanks to Andrew Dupont for this workaround technique
// Support: IE <=8
// Exclude object elements
} else if ( context.nodeName.toLowerCase() !== "object" ) {
// Capture the context ID, setting it first if necessary
if ( (nid = context.getAttribute( "id" )) ) {
nid = nid.replace( rcssescape, fcssescape );
} else {
context.setAttribute( "id", (nid = expando) );
}
// Prefix every selector in the list
groups = tokenize( selector );
i = groups.length;
while ( i-- ) {
groups[i] = "#" + nid + " " + toSelector( groups[i] );
}
newSelector = groups.join( "," );
// Expand context for sibling selectors
newContext = rsibling.test( selector ) && testContext( context.parentNode ) ||
context;
}
if ( newSelector ) {
try {
push.apply( results,
newContext.querySelectorAll( newSelector )
);
return results;
} catch ( qsaError ) {
} finally {
if ( nid === expando ) {
context.removeAttribute( "id" );
}
}
}
}
}
}
// All others
return select( selector.replace( rtrim, "$1" ), context, results, seed );
}
/**
* Create key-value caches of limited size
* @returns {function(string, object)} Returns the Object data after storing it on itself with
* property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength)
* deleting the oldest entry
*/
function createCache() {
var keys = [];
function cache( key, value ) {
// Use (key + " ") to avoid collision with native prototype properties (see Issue #157)
if ( keys.push( key + " " ) > Expr.cacheLength ) {
// Only keep the most recent entries
delete cache[ keys.shift() ];
}
return (cache[ key + " " ] = value);
}
return cache;
}
/**
* Mark a function for special use by Sizzle
* @param {Function} fn The function to mark
*/
function markFunction( fn ) {
fn[ expando ] = true;
return fn;
}
/**
* Support testing using an element
* @param {Function} fn Passed the created element and returns a boolean result
*/
function assert( fn ) {
var el = document.createElement("fieldset");
try {
return !!fn( el );
} catch (e) {
return false;
} finally {
// Remove from its parent by default
if ( el.parentNode ) {
el.parentNode.removeChild( el );
}
// release memory in IE
el = null;
}
}
/**
* Adds the same handler for all of the specified attrs
* @param {String} attrs Pipe-separated list of attributes
* @param {Function} handler The method that will be applied
*/
function addHandle( attrs, handler ) {
var arr = attrs.split("|"),
i = arr.length;
while ( i-- ) {
Expr.attrHandle[ arr[i] ] = handler;
}
}
/**
* Checks document order of two siblings
* @param {Element} a
* @param {Element} b
* @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b
*/
function siblingCheck( a, b ) {
var cur = b && a,
diff = cur && a.nodeType === 1 && b.nodeType === 1 &&
a.sourceIndex - b.sourceIndex;
// Use IE sourceIndex if available on both nodes
if ( diff ) {
return diff;
}
// Check if b follows a
if ( cur ) {
while ( (cur = cur.nextSibling) ) {
if ( cur === b ) {
return -1;
}
}
}
return a ? 1 : -1;
}
/**
* Returns a function to use in pseudos for input types
* @param {String} type
*/
function createInputPseudo( type ) {
return function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && elem.type === type;
};
}
/**
* Returns a function to use in pseudos for buttons
* @param {String} type
*/
function createButtonPseudo( type ) {
return function( elem ) {
var name = elem.nodeName.toLowerCase();
return (name === "input" || name === "button") && elem.type === type;
};
}
/**
* Returns a function to use in pseudos for :enabled/:disabled
* @param {Boolean} disabled true for :disabled; false for :enabled
*/
function createDisabledPseudo( disabled ) {
// Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable
return function( elem ) {
// Only certain elements can match :enabled or :disabled
// https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled
// https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled
if ( "form" in elem ) {
// Check for inherited disabledness on relevant non-disabled elements:
// * listed form-associated elements in a disabled fieldset
// https://html.spec.whatwg.org/multipage/forms.html#category-listed
// https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled
// * option elements in a disabled optgroup
// https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled
// All such elements have a "form" property.
if ( elem.parentNode && elem.disabled === false ) {
// Option elements defer to a parent optgroup if present
if ( "label" in elem ) {
if ( "label" in elem.parentNode ) {
return elem.parentNode.disabled === disabled;
} else {
return elem.disabled === disabled;
}
}
// Support: IE 6 - 11
// Use the isDisabled shortcut property to check for disabled fieldset ancestors
return elem.isDisabled === disabled ||
// Where there is no isDisabled, check manually
/* jshint -W018 */
elem.isDisabled !== !disabled &&
disabledAncestor( elem ) === disabled;
}
return elem.disabled === disabled;
// Try to winnow out elements that can't be disabled before trusting the disabled property.
// Some victims get caught in our net (label, legend, menu, track), but it shouldn't
// even exist on them, let alone have a boolean value.
} else if ( "label" in elem ) {
return elem.disabled === disabled;
}
// Remaining elements are neither :enabled nor :disabled
return false;
};
}
/**
* Returns a function to use in pseudos for positionals
* @param {Function} fn
*/
function createPositionalPseudo( fn ) {
return markFunction(function( argument ) {
argument = +argument;
return markFunction(function( seed, matches ) {
var j,
matchIndexes = fn( [], seed.length, argument ),
i = matchIndexes.length;
// Match elements found at the specified indexes
while ( i-- ) {
if ( seed[ (j = matchIndexes[i]) ] ) {
seed[j] = !(matches[j] = seed[j]);
}
}
});
});
}
/**
* Checks a node for validity as a Sizzle context
* @param {Element|Object=} context
* @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value
*/
function testContext( context ) {
return context && typeof context.getElementsByTagName !== "undefined" && context;
}
// Expose support vars for convenience
support = Sizzle.support = {};
/**
* Detects XML nodes
* @param {Element|Object} elem An element or a document
* @returns {Boolean} True iff elem is a non-HTML XML node
*/
isXML = Sizzle.isXML = function( elem ) {
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = elem && (elem.ownerDocument || elem).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
};
/**
* Sets document-related variables once based on the current document
* @param {Element|Object} [doc] An element or document object to use to set the document
* @returns {Object} Returns the current document
*/
setDocument = Sizzle.setDocument = function( node ) {
var hasCompare, subWindow,
doc = node ? node.ownerDocument || node : preferredDoc;
// Return early if doc is invalid or already selected
if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) {
return document;
}
// Update global variables
document = doc;
docElem = document.documentElement;
documentIsHTML = !isXML( document );
// Support: IE 9-11, Edge
// Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936)
if ( preferredDoc !== document &&
(subWindow = document.defaultView) && subWindow.top !== subWindow ) {
// Support: IE 11, Edge
if ( subWindow.addEventListener ) {
subWindow.addEventListener( "unload", unloadHandler, false );
// Support: IE 9 - 10 only
} else if ( subWindow.attachEvent ) {
subWindow.attachEvent( "onunload", unloadHandler );
}
}
/* Attributes
---------------------------------------------------------------------- */
// Support: IE<8
// Verify that getAttribute really returns attributes and not properties
// (excepting IE8 booleans)
support.attributes = assert(function( el ) {
el.className = "i";
return !el.getAttribute("className");
});
/* getElement(s)By*
---------------------------------------------------------------------- */
// Check if getElementsByTagName("*") returns only elements
support.getElementsByTagName = assert(function( el ) {
el.appendChild( document.createComment("") );
return !el.getElementsByTagName("*").length;
});
// Support: IE<9
support.getElementsByClassName = rnative.test( document.getElementsByClassName );
// Support: IE<10
// Check if getElementById returns elements by name
// The broken getElementById methods don't pick up programmatically-set names,
// so use a roundabout getElementsByName test
support.getById = assert(function( el ) {
docElem.appendChild( el ).id = expando;
return !document.getElementsByName || !document.getElementsByName( expando ).length;
});
// ID filter and find
if ( support.getById ) {
Expr.filter["ID"] = function( id ) {
var attrId = id.replace( runescape, funescape );
return function( elem ) {
return elem.getAttribute("id") === attrId;
};
};
Expr.find["ID"] = function( id, context ) {
if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
var elem = context.getElementById( id );
return elem ? [ elem ] : [];
}
};
} else {
Expr.filter["ID"] = function( id ) {
var attrId = id.replace( runescape, funescape );
return function( elem ) {
var node = typeof elem.getAttributeNode !== "undefined" &&
elem.getAttributeNode("id");
return node && node.value === attrId;
};
};
// Support: IE 6 - 7 only
// getElementById is not reliable as a find shortcut
Expr.find["ID"] = function( id, context ) {
if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
var node, i, elems,
elem = context.getElementById( id );
if ( elem ) {
// Verify the id attribute
node = elem.getAttributeNode("id");
if ( node && node.value === id ) {
return [ elem ];
}
// Fall back on getElementsByName
elems = context.getElementsByName( id );
i = 0;
while ( (elem = elems[i++]) ) {
node = elem.getAttributeNode("id");
if ( node && node.value === id ) {
return [ elem ];
}
}
}
return [];
}
};
}
// Tag
Expr.find["TAG"] = support.getElementsByTagName ?
function( tag, context ) {
if ( typeof context.getElementsByTagName !== "undefined" ) {
return context.getElementsByTagName( tag );
// DocumentFragment nodes don't have gEBTN
} else if ( support.qsa ) {
return context.querySelectorAll( tag );
}
} :
function( tag, context ) {
var elem,
tmp = [],
i = 0,
// By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too
results = context.getElementsByTagName( tag );
// Filter out possible comments
if ( tag === "*" ) {
while ( (elem = results[i++]) ) {
if ( elem.nodeType === 1 ) {
tmp.push( elem );
}
}
return tmp;
}
return results;
};
// Class
Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) {
if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) {
return context.getElementsByClassName( className );
}
};
/* QSA/matchesSelector
---------------------------------------------------------------------- */
// QSA and matchesSelector support
// matchesSelector(:active) reports false when true (IE9/Opera 11.5)
rbuggyMatches = [];
// qSa(:focus) reports false when true (Chrome 21)
// We allow this because of a bug in IE8/9 that throws an error
// whenever `document.activeElement` is accessed on an iframe
// So, we allow :focus to pass through QSA all the time to avoid the IE error
// See https://bugs.jquery.com/ticket/13378
rbuggyQSA = [];
if ( (support.qsa = rnative.test( document.querySelectorAll )) ) {
// Build QSA regex
// Regex strategy adopted from Diego Perini
assert(function( el ) {
// Select is set to empty string on purpose
// This is to test IE's treatment of not explicitly
// setting a boolean content attribute,
// since its presence should be enough
// https://bugs.jquery.com/ticket/12359
docElem.appendChild( el ).innerHTML = "<a id='" + expando + "'></a>" +
"<select id='" + expando + "-\r\\' msallowcapture=''>" +
"<option selected=''></option></select>";
// Support: IE8, Opera 11-12.16
// Nothing should be selected when empty strings follow ^= or $= or *=
// The test attribute must be unknown in Opera but "safe" for WinRT
// https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section
if ( el.querySelectorAll("[msallowcapture^='']").length ) {
rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" );
}
// Support: IE8
// Boolean attributes and "value" are not treated correctly
if ( !el.querySelectorAll("[selected]").length ) {
rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" );
}
// Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+
if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) {
rbuggyQSA.push("~=");
}
// Webkit/Opera - :checked should return selected option elements
// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
// IE8 throws error here and will not see later tests
if ( !el.querySelectorAll(":checked").length ) {
rbuggyQSA.push(":checked");
}
// Support: Safari 8+, iOS 8+
// https://bugs.webkit.org/show_bug.cgi?id=136851
// In-page `selector#id sibling-combinator selector` fails
if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) {
rbuggyQSA.push(".#.+[+~]");
}
});
assert(function( el ) {
el.innerHTML = "<a href='' disabled='disabled'></a>" +
"<select disabled='disabled'><option/></select>";
// Support: Windows 8 Native Apps
// The type and name attributes are restricted during .innerHTML assignment
var input = document.createElement("input");
input.setAttribute( "type", "hidden" );
el.appendChild( input ).setAttribute( "name", "D" );
// Support: IE8
// Enforce case-sensitivity of name attribute
if ( el.querySelectorAll("[name=d]").length ) {
rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" );
}
// FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
// IE8 throws error here and will not see later tests
if ( el.querySelectorAll(":enabled").length !== 2 ) {
rbuggyQSA.push( ":enabled", ":disabled" );
}
// Support: IE9-11+
// IE's :disabled selector does not pick up the children of disabled fieldsets
docElem.appendChild( el ).disabled = true;
if ( el.querySelectorAll(":disabled").length !== 2 ) {
rbuggyQSA.push( ":enabled", ":disabled" );
}
// Opera 10-11 does not throw on post-comma invalid pseudos
el.querySelectorAll("*,:x");
rbuggyQSA.push(",.*:");
});
}
if ( (support.matchesSelector = rnative.test( (matches = docElem.matches ||
docElem.webkitMatchesSelector ||
docElem.mozMatchesSelector ||
docElem.oMatchesSelector ||
docElem.msMatchesSelector) )) ) {
assert(function( el ) {
// Check to see if it's possible to do matchesSelector
// on a disconnected node (IE 9)
support.disconnectedMatch = matches.call( el, "*" );
// This should fail with an exception
// Gecko does not error, returns false instead
matches.call( el, "[s!='']:x" );
rbuggyMatches.push( "!=", pseudos );
});
}
rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") );
rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") );
/* Contains
---------------------------------------------------------------------- */
hasCompare = rnative.test( docElem.compareDocumentPosition );
// Element contains another
// Purposefully self-exclusive
// As in, an element does not contain itself
contains = hasCompare || rnative.test( docElem.contains ) ?
function( a, b ) {
var adown = a.nodeType === 9 ? a.documentElement : a,
bup = b && b.parentNode;
return a === bup || !!( bup && bup.nodeType === 1 && (
adown.contains ?
adown.contains( bup ) :
a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16
));
} :
function( a, b ) {
if ( b ) {
while ( (b = b.parentNode) ) {
if ( b === a ) {
return true;
}
}
}
return false;
};
/* Sorting
---------------------------------------------------------------------- */
// Document order sorting
sortOrder = hasCompare ?
function( a, b ) {
// Flag for duplicate removal
if ( a === b ) {
hasDuplicate = true;
return 0;
}
// Sort on method existence if only one input has compareDocumentPosition
var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
if ( compare ) {
return compare;
}
// Calculate position if both inputs belong to the same document
compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ?
a.compareDocumentPosition( b ) :
// Otherwise we know they are disconnected
1;
// Disconnected nodes
if ( compare & 1 ||
(!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) {
// Choose the first element that is related to our preferred document
if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) {
return -1;
}
if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) {
return 1;
}
// Maintain original order
return sortInput ?
( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
0;
}
return compare & 4 ? -1 : 1;
} :
function( a, b ) {
// Exit early if the nodes are identical
if ( a === b ) {
hasDuplicate = true;
return 0;
}
var cur,
i = 0,
aup = a.parentNode,
bup = b.parentNode,
ap = [ a ],
bp = [ b ];
// Parentless nodes are either documents or disconnected
if ( !aup || !bup ) {
return a === document ? -1 :
b === document ? 1 :
aup ? -1 :
bup ? 1 :
sortInput ?
( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
0;
// If the nodes are siblings, we can do a quick check
} else if ( aup === bup ) {
return siblingCheck( a, b );
}
// Otherwise we need full lists of their ancestors for comparison
cur = a;
while ( (cur = cur.parentNode) ) {
ap.unshift( cur );
}
cur = b;
while ( (cur = cur.parentNode) ) {
bp.unshift( cur );
}
// Walk down the tree looking for a discrepancy
while ( ap[i] === bp[i] ) {
i++;
}
return i ?
// Do a sibling check if the nodes have a common ancestor
siblingCheck( ap[i], bp[i] ) :
// Otherwise nodes in our document sort first
ap[i] === preferredDoc ? -1 :
bp[i] === preferredDoc ? 1 :
0;
};
return document;
};
Sizzle.matches = function( expr, elements ) {
return Sizzle( expr, null, null, elements );
};
Sizzle.matchesSelector = function( elem, expr ) {
// Set document vars if needed
if ( ( elem.ownerDocument || elem ) !== document ) {
setDocument( elem );
}
// Make sure that attribute selectors are quoted
expr = expr.replace( rattributeQuotes, "='$1']" );
if ( support.matchesSelector && documentIsHTML &&
!compilerCache[ expr + " " ] &&
( !rbuggyMatches || !rbuggyMatches.test( expr ) ) &&
( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) {
try {
var ret = matches.call( elem, expr );
// IE 9's matchesSelector returns false on disconnected nodes
if ( ret || support.disconnectedMatch ||
// As well, disconnected nodes are said to be in a document
// fragment in IE 9
elem.document && elem.document.nodeType !== 11 ) {
return ret;
}
} catch (e) {}
}
return Sizzle( expr, document, null, [ elem ] ).length > 0;
};
Sizzle.contains = function( context, elem ) {
// Set document vars if needed
if ( ( context.ownerDocument || context ) !== document ) {
setDocument( context );
}
return contains( context, elem );
};
Sizzle.attr = function( elem, name ) {
// Set document vars if needed
if ( ( elem.ownerDocument || elem ) !== document ) {
setDocument( elem );
}
var fn = Expr.attrHandle[ name.toLowerCase() ],
// Don't get fooled by Object.prototype properties (jQuery #13807)
val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ?
fn( elem, name, !documentIsHTML ) :
undefined;
return val !== undefined ?
val :
support.attributes || !documentIsHTML ?
elem.getAttribute( name ) :
(val = elem.getAttributeNode(name)) && val.specified ?
val.value :
null;
};
Sizzle.escape = function( sel ) {
return (sel + "").replace( rcssescape, fcssescape );
};
Sizzle.error = function( msg ) {
throw new Error( "Syntax error, unrecognized expression: " + msg );
};
/**
* Document sorting and removing duplicates
* @param {ArrayLike} results
*/
Sizzle.uniqueSort = function( results ) {
var elem,
duplicates = [],
j = 0,
i = 0;
// Unless we *know* we can detect duplicates, assume their presence
hasDuplicate = !support.detectDuplicates;
sortInput = !support.sortStable && results.slice( 0 );
results.sort( sortOrder );
if ( hasDuplicate ) {
while ( (elem = results[i++]) ) {
if ( elem === results[ i ] ) {
j = duplicates.push( i );
}
}
while ( j-- ) {
results.splice( duplicates[ j ], 1 );
}
}
// Clear input after sorting to release objects
// See https://github.com/jquery/sizzle/pull/225
sortInput = null;
return results;
};
/**
* Utility function for retrieving the text value of an array of DOM nodes
* @param {Array|Element} elem
*/
getText = Sizzle.getText = function( elem ) {
var node,
ret = "",
i = 0,
nodeType = elem.nodeType;
if ( !nodeType ) {
// If no nodeType, this is expected to be an array
while ( (node = elem[i++]) ) {
// Do not traverse comment nodes
ret += getText( node );
}
} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
// Use textContent for elements
// innerText usage removed for consistency of new lines (jQuery #11153)
if ( typeof elem.textContent === "string" ) {
return elem.textContent;
} else {
// Traverse its children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
ret += getText( elem );
}
}
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
// Do not include comment or processing instruction nodes
return ret;
};
Expr = Sizzle.selectors = {
// Can be adjusted by the user
cacheLength: 50,
createPseudo: markFunction,
match: matchExpr,
attrHandle: {},
find: {},
relative: {
">": { dir: "parentNode", first: true },
" ": { dir: "parentNode" },
"+": { dir: "previousSibling", first: true },
"~": { dir: "previousSibling" }
},
preFilter: {
"ATTR": function( match ) {
match[1] = match[1].replace( runescape, funescape );
// Move the given value to match[3] whether quoted or unquoted
match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape );
if ( match[2] === "~=" ) {
match[3] = " " + match[3] + " ";
}
return match.slice( 0, 4 );
},
"CHILD": function( match ) {
/* matches from matchExpr["CHILD"]
1 type (only|nth|...)
2 what (child|of-type)
3 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
4 xn-component of xn+y argument ([+-]?\d*n|)
5 sign of xn-component
6 x of xn-component
7 sign of y-component
8 y of y-component
*/
match[1] = match[1].toLowerCase();
if ( match[1].slice( 0, 3 ) === "nth" ) {
// nth-* requires argument
if ( !match[3] ) {
Sizzle.error( match[0] );
}
// numeric x and y parameters for Expr.filter.CHILD
// remember that false/true cast respectively to 0/1
match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) );
match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" );
// other types prohibit arguments
} else if ( match[3] ) {
Sizzle.error( match[0] );
}
return match;
},
"PSEUDO": function( match ) {
var excess,
unquoted = !match[6] && match[2];
if ( matchExpr["CHILD"].test( match[0] ) ) {
return null;
}
// Accept quoted arguments as-is
if ( match[3] ) {
match[2] = match[4] || match[5] || "";
// Strip excess characters from unquoted arguments
} else if ( unquoted && rpseudo.test( unquoted ) &&
// Get excess from tokenize (recursively)
(excess = tokenize( unquoted, true )) &&
// advance to the next closing parenthesis
(excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) {
// excess is a negative index
match[0] = match[0].slice( 0, excess );
match[2] = unquoted.slice( 0, excess );
}
// Return only captures needed by the pseudo filter method (type and argument)
return match.slice( 0, 3 );
}
},
filter: {
"TAG": function( nodeNameSelector ) {
var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase();
return nodeNameSelector === "*" ?
function() { return true; } :
function( elem ) {
return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
};
},
"CLASS": function( className ) {
var pattern = classCache[ className + " " ];
return pattern ||
(pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) &&
classCache( className, function( elem ) {
return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" );
});
},
"ATTR": function( name, operator, check ) {
return function( elem ) {
var result = Sizzle.attr( elem, name );
if ( result == null ) {
return operator === "!=";
}
if ( !operator ) {
return true;
}
result += "";
return operator === "=" ? result === check :
operator === "!=" ? result !== check :
operator === "^=" ? check && result.indexOf( check ) === 0 :
operator === "*=" ? check && result.indexOf( check ) > -1 :
operator === "$=" ? check && result.slice( -check.length ) === check :
operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 :
operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" :
false;
};
},
"CHILD": function( type, what, argument, first, last ) {
var simple = type.slice( 0, 3 ) !== "nth",
forward = type.slice( -4 ) !== "last",
ofType = what === "of-type";
return first === 1 && last === 0 ?
// Shortcut for :nth-*(n)
function( elem ) {
return !!elem.parentNode;
} :
function( elem, context, xml ) {
var cache, uniqueCache, outerCache, node, nodeIndex, start,
dir = simple !== forward ? "nextSibling" : "previousSibling",
parent = elem.parentNode,
name = ofType && elem.nodeName.toLowerCase(),
useCache = !xml && !ofType,
diff = false;
if ( parent ) {
// :(first|last|only)-(child|of-type)
if ( simple ) {
while ( dir ) {
node = elem;
while ( (node = node[ dir ]) ) {
if ( ofType ?
node.nodeName.toLowerCase() === name :
node.nodeType === 1 ) {
return false;
}
}
// Reverse direction for :only-* (if we haven't yet done so)
start = dir = type === "only" && !start && "nextSibling";
}
return true;
}
start = [ forward ? parent.firstChild : parent.lastChild ];
// non-xml :nth-child(...) stores cache data on `parent`
if ( forward && useCache ) {
// Seek `elem` from a previously-cached index
// ...in a gzip-friendly way
node = parent;
outerCache = node[ expando ] || (node[ expando ] = {});
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
(outerCache[ node.uniqueID ] = {});
cache = uniqueCache[ type ] || [];
nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
diff = nodeIndex && cache[ 2 ];
node = nodeIndex && parent.childNodes[ nodeIndex ];
while ( (node = ++nodeIndex && node && node[ dir ] ||
// Fallback to seeking `elem` from the start
(diff = nodeIndex = 0) || start.pop()) ) {
// When found, cache indexes on `parent` and break
if ( node.nodeType === 1 && ++diff && node === elem ) {
uniqueCache[ type ] = [ dirruns, nodeIndex, diff ];
break;
}
}
} else {
// Use previously-cached element index if available
if ( useCache ) {
// ...in a gzip-friendly way
node = elem;
outerCache = node[ expando ] || (node[ expando ] = {});
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
(outerCache[ node.uniqueID ] = {});
cache = uniqueCache[ type ] || [];
nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
diff = nodeIndex;
}
// xml :nth-child(...)
// or :nth-last-child(...) or :nth(-last)?-of-type(...)
if ( diff === false ) {
// Use the same loop as above to seek `elem` from the start
while ( (node = ++nodeIndex && node && node[ dir ] ||
(diff = nodeIndex = 0) || start.pop()) ) {
if ( ( ofType ?
node.nodeName.toLowerCase() === name :
node.nodeType === 1 ) &&
++diff ) {
// Cache the index of each encountered element
if ( useCache ) {
outerCache = node[ expando ] || (node[ expando ] = {});
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
(outerCache[ node.uniqueID ] = {});
uniqueCache[ type ] = [ dirruns, diff ];
}
if ( node === elem ) {
break;
}
}
}
}
}
// Incorporate the offset, then check against cycle size
diff -= last;
return diff === first || ( diff % first === 0 && diff / first >= 0 );
}
};
},
"PSEUDO": function( pseudo, argument ) {
// pseudo-class names are case-insensitive
// http://www.w3.org/TR/selectors/#pseudo-classes
// Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
// Remember that setFilters inherits from pseudos
var args,
fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
Sizzle.error( "unsupported pseudo: " + pseudo );
// The user may use createPseudo to indicate that
// arguments are needed to create the filter function
// just as Sizzle does
if ( fn[ expando ] ) {
return fn( argument );
}
// But maintain support for old signatures
if ( fn.length > 1 ) {
args = [ pseudo, pseudo, "", argument ];
return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
markFunction(function( seed, matches ) {
var idx,
matched = fn( seed, argument ),
i = matched.length;
while ( i-- ) {
idx = indexOf( seed, matched[i] );
seed[ idx ] = !( matches[ idx ] = matched[i] );
}
}) :
function( elem ) {
return fn( elem, 0, args );
};
}
return fn;
}
},
pseudos: {
// Potentially complex pseudos
"not": markFunction(function( selector ) {
// Trim the selector passed to compile
// to avoid treating leading and trailing
// spaces as combinators
var input = [],
results = [],
matcher = compile( selector.replace( rtrim, "$1" ) );
return matcher[ expando ] ?
markFunction(function( seed, matches, context, xml ) {
var elem,
unmatched = matcher( seed, null, xml, [] ),
i = seed.length;
// Match elements unmatched by `matcher`
while ( i-- ) {
if ( (elem = unmatched[i]) ) {
seed[i] = !(matches[i] = elem);
}
}
}) :
function( elem, context, xml ) {
input[0] = elem;
matcher( input, null, xml, results );
// Don't keep the element (issue #299)
input[0] = null;
return !results.pop();
};
}),
"has": markFunction(function( selector ) {
return function( elem ) {
return Sizzle( selector, elem ).length > 0;
};
}),
"contains": markFunction(function( text ) {
text = text.replace( runescape, funescape );
return function( elem ) {
return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1;
};
}),
// "Whether an element is represented by a :lang() selector
// is based solely on the element's language value
// being equal to the identifier C,
// or beginning with the identifier C immediately followed by "-".
// The matching of C against the element's language value is performed case-insensitively.
// The identifier C does not have to be a valid language name."
// http://www.w3.org/TR/selectors/#lang-pseudo
"lang": markFunction( function( lang ) {
// lang value must be a valid identifier
if ( !ridentifier.test(lang || "") ) {
Sizzle.error( "unsupported lang: " + lang );
}
lang = lang.replace( runescape, funescape ).toLowerCase();
return function( elem ) {
var elemLang;
do {
if ( (elemLang = documentIsHTML ?
elem.lang :
elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) {
elemLang = elemLang.toLowerCase();
return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0;
}
} while ( (elem = elem.parentNode) && elem.nodeType === 1 );
return false;
};
}),
// Miscellaneous
"target": function( elem ) {
var hash = window.location && window.location.hash;
return hash && hash.slice( 1 ) === elem.id;
},
"root": function( elem ) {
return elem === docElem;
},
"focus": function( elem ) {
return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex);
},
// Boolean properties
"enabled": createDisabledPseudo( false ),
"disabled": createDisabledPseudo( true ),
"checked": function( elem ) {
// In CSS3, :checked should return both checked and selected elements
// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
var nodeName = elem.nodeName.toLowerCase();
return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected);
},
"selected": function( elem ) {
// Accessing this property makes selected-by-default
// options in Safari work properly
if ( elem.parentNode ) {
elem.parentNode.selectedIndex;
}
return elem.selected === true;
},
// Contents
"empty": function( elem ) {
// http://www.w3.org/TR/selectors/#empty-pseudo
// :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5),
// but not by others (comment: 8; processing instruction: 7; etc.)
// nodeType < 6 works because attributes (2) do not appear as children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
if ( elem.nodeType < 6 ) {
return false;
}
}
return true;
},
"parent": function( elem ) {
return !Expr.pseudos["empty"]( elem );
},
// Element/input types
"header": function( elem ) {
return rheader.test( elem.nodeName );
},
"input": function( elem ) {
return rinputs.test( elem.nodeName );
},
"button": function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && elem.type === "button" || name === "button";
},
"text": function( elem ) {
var attr;
return elem.nodeName.toLowerCase() === "input" &&
elem.type === "text" &&
// Support: IE<8
// New HTML5 attribute values (e.g., "search") appear with elem.type === "text"
( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" );
},
// Position-in-collection
"first": createPositionalPseudo(function() {
return [ 0 ];
}),
"last": createPositionalPseudo(function( matchIndexes, length ) {
return [ length - 1 ];
}),
"eq": createPositionalPseudo(function( matchIndexes, length, argument ) {
return [ argument < 0 ? argument + length : argument ];
}),
"even": createPositionalPseudo(function( matchIndexes, length ) {
var i = 0;
for ( ; i < length; i += 2 ) {
matchIndexes.push( i );
}
return matchIndexes;
}),
"odd": createPositionalPseudo(function( matchIndexes, length ) {
var i = 1;
for ( ; i < length; i += 2 ) {
matchIndexes.push( i );
}
return matchIndexes;
}),
"lt": createPositionalPseudo(function( matchIndexes, length, argument ) {
var i = argument < 0 ? argument + length : argument;
for ( ; --i >= 0; ) {
matchIndexes.push( i );
}
return matchIndexes;
}),
"gt": createPositionalPseudo(function( matchIndexes, length, argument ) {
var i = argument < 0 ? argument + length : argument;
for ( ; ++i < length; ) {
matchIndexes.push( i );
}
return matchIndexes;
})
}
};
Expr.pseudos["nth"] = Expr.pseudos["eq"];
// Add button/input type pseudos
for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) {
Expr.pseudos[ i ] = createInputPseudo( i );
}
for ( i in { submit: true, reset: true } ) {
Expr.pseudos[ i ] = createButtonPseudo( i );
}
// Easy API for creating new setFilters
function setFilters() {}
setFilters.prototype = Expr.filters = Expr.pseudos;
Expr.setFilters = new setFilters();
tokenize = Sizzle.tokenize = function( selector, parseOnly ) {
var matched, match, tokens, type,
soFar, groups, preFilters,
cached = tokenCache[ selector + " " ];
if ( cached ) {
return parseOnly ? 0 : cached.slice( 0 );
}
soFar = selector;
groups = [];
preFilters = Expr.preFilter;
while ( soFar ) {
// Comma and first run
if ( !matched || (match = rcomma.exec( soFar )) ) {
if ( match ) {
// Don't consume trailing commas as valid
soFar = soFar.slice( match[0].length ) || soFar;
}
groups.push( (tokens = []) );
}
matched = false;
// Combinators
if ( (match = rcombinators.exec( soFar )) ) {
matched = match.shift();
tokens.push({
value: matched,
// Cast descendant combinators to space
type: match[0].replace( rtrim, " " )
});
soFar = soFar.slice( matched.length );
}
// Filters
for ( type in Expr.filter ) {
if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] ||
(match = preFilters[ type ]( match ))) ) {
matched = match.shift();
tokens.push({
value: matched,
type: type,
matches: match
});
soFar = soFar.slice( matched.length );
}
}
if ( !matched ) {
break;
}
}
// Return the length of the invalid excess
// if we're just parsing
// Otherwise, throw an error or return tokens
return parseOnly ?
soFar.length :
soFar ?
Sizzle.error( selector ) :
// Cache the tokens
tokenCache( selector, groups ).slice( 0 );
};
function toSelector( tokens ) {
var i = 0,
len = tokens.length,
selector = "";
for ( ; i < len; i++ ) {
selector += tokens[i].value;
}
return selector;
}
function addCombinator( matcher, combinator, base ) {
var dir = combinator.dir,
skip = combinator.next,
key = skip || dir,
checkNonElements = base && key === "parentNode",
doneName = done++;
return combinator.first ?
// Check against closest ancestor/preceding element
function( elem, context, xml ) {
while ( (elem = elem[ dir ]) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
return matcher( elem, context, xml );
}
}
return false;
} :
// Check against all ancestor/preceding elements
function( elem, context, xml ) {
var oldCache, uniqueCache, outerCache,
newCache = [ dirruns, doneName ];
// We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching
if ( xml ) {
while ( (elem = elem[ dir ]) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
if ( matcher( elem, context, xml ) ) {
return true;
}
}
}
} else {
while ( (elem = elem[ dir ]) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
outerCache = elem[ expando ] || (elem[ expando ] = {});
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {});
if ( skip && skip === elem.nodeName.toLowerCase() ) {
elem = elem[ dir ] || elem;
} else if ( (oldCache = uniqueCache[ key ]) &&
oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) {
// Assign to newCache so results back-propagate to previous elements
return (newCache[ 2 ] = oldCache[ 2 ]);
} else {
// Reuse newcache so results back-propagate to previous elements
uniqueCache[ key ] = newCache;
// A match means we're done; a fail means we have to keep checking
if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) {
return true;
}
}
}
}
}
return false;
};
}
function elementMatcher( matchers ) {
return matchers.length > 1 ?
function( elem, context, xml ) {
var i = matchers.length;
while ( i-- ) {
if ( !matchers[i]( elem, context, xml ) ) {
return false;
}
}
return true;
} :
matchers[0];
}
function multipleContexts( selector, contexts, results ) {
var i = 0,
len = contexts.length;
for ( ; i < len; i++ ) {
Sizzle( selector, contexts[i], results );
}
return results;
}
function condense( unmatched, map, filter, context, xml ) {
var elem,
newUnmatched = [],
i = 0,
len = unmatched.length,
mapped = map != null;
for ( ; i < len; i++ ) {
if ( (elem = unmatched[i]) ) {
if ( !filter || filter( elem, context, xml ) ) {
newUnmatched.push( elem );
if ( mapped ) {
map.push( i );
}
}
}
}
return newUnmatched;
}
function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
if ( postFilter && !postFilter[ expando ] ) {
postFilter = setMatcher( postFilter );
}
if ( postFinder && !postFinder[ expando ] ) {
postFinder = setMatcher( postFinder, postSelector );
}
return markFunction(function( seed, results, context, xml ) {
var temp, i, elem,
preMap = [],
postMap = [],
preexisting = results.length,
// Get initial elements from seed or context
elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ),
// Prefilter to get matcher input, preserving a map for seed-results synchronization
matcherIn = preFilter && ( seed || !selector ) ?
condense( elems, preMap, preFilter, context, xml ) :
elems,
matcherOut = matcher ?
// If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
// ...intermediate processing is necessary
[] :
// ...otherwise use results directly
results :
matcherIn;
// Find primary matches
if ( matcher ) {
matcher( matcherIn, matcherOut, context, xml );
}
// Apply postFilter
if ( postFilter ) {
temp = condense( matcherOut, postMap );
postFilter( temp, [], context, xml );
// Un-match failing elements by moving them back to matcherIn
i = temp.length;
while ( i-- ) {
if ( (elem = temp[i]) ) {
matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem);
}
}
}
if ( seed ) {
if ( postFinder || preFilter ) {
if ( postFinder ) {
// Get the final matcherOut by condensing this intermediate into postFinder contexts
temp = [];
i = matcherOut.length;
while ( i-- ) {
if ( (elem = matcherOut[i]) ) {
// Restore matcherIn since elem is not yet a final match
temp.push( (matcherIn[i] = elem) );
}
}
postFinder( null, (matcherOut = []), temp, xml );
}
// Move matched elements from seed to results to keep them synchronized
i = matcherOut.length;
while ( i-- ) {
if ( (elem = matcherOut[i]) &&
(temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) {
seed[temp] = !(results[temp] = elem);
}
}
}
// Add elements to results, through postFinder if defined
} else {
matcherOut = condense(
matcherOut === results ?
matcherOut.splice( preexisting, matcherOut.length ) :
matcherOut
);
if ( postFinder ) {
postFinder( null, results, matcherOut, xml );
} else {
push.apply( results, matcherOut );
}
}
});
}
function matcherFromTokens( tokens ) {
var checkContext, matcher, j,
len = tokens.length,
leadingRelative = Expr.relative[ tokens[0].type ],
implicitRelative = leadingRelative || Expr.relative[" "],
i = leadingRelative ? 1 : 0,
// The foundational matcher ensures that elements are reachable from top-level context(s)
matchContext = addCombinator( function( elem ) {
return elem === checkContext;
}, implicitRelative, true ),
matchAnyContext = addCombinator( function( elem ) {
return indexOf( checkContext, elem ) > -1;
}, implicitRelative, true ),
matchers = [ function( elem, context, xml ) {
var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
(checkContext = context).nodeType ?
matchContext( elem, context, xml ) :
matchAnyContext( elem, context, xml ) );
// Avoid hanging onto element (issue #299)
checkContext = null;
return ret;
} ];
for ( ; i < len; i++ ) {
if ( (matcher = Expr.relative[ tokens[i].type ]) ) {
matchers = [ addCombinator(elementMatcher( matchers ), matcher) ];
} else {
matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches );
// Return special upon seeing a positional matcher
if ( matcher[ expando ] ) {
// Find the next relative operator (if any) for proper handling
j = ++i;
for ( ; j < len; j++ ) {
if ( Expr.relative[ tokens[j].type ] ) {
break;
}
}
return setMatcher(
i > 1 && elementMatcher( matchers ),
i > 1 && toSelector(
// If the preceding token was a descendant combinator, insert an implicit any-element `*`
tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" })
).replace( rtrim, "$1" ),
matcher,
i < j && matcherFromTokens( tokens.slice( i, j ) ),
j < len && matcherFromTokens( (tokens = tokens.slice( j )) ),
j < len && toSelector( tokens )
);
}
matchers.push( matcher );
}
}
return elementMatcher( matchers );
}
function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
var bySet = setMatchers.length > 0,
byElement = elementMatchers.length > 0,
superMatcher = function( seed, context, xml, results, outermost ) {
var elem, j, matcher,
matchedCount = 0,
i = "0",
unmatched = seed && [],
setMatched = [],
contextBackup = outermostContext,
// We must always have either seed elements or outermost context
elems = seed || byElement && Expr.find["TAG"]( "*", outermost ),
// Use integer dirruns iff this is the outermost matcher
dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1),
len = elems.length;
if ( outermost ) {
outermostContext = context === document || context || outermost;
}
// Add elements passing elementMatchers directly to results
// Support: IE<9, Safari
// Tolerate NodeList properties (IE: "length"; Safari: <number>) matching elements by id
for ( ; i !== len && (elem = elems[i]) != null; i++ ) {
if ( byElement && elem ) {
j = 0;
if ( !context && elem.ownerDocument !== document ) {
setDocument( elem );
xml = !documentIsHTML;
}
while ( (matcher = elementMatchers[j++]) ) {
if ( matcher( elem, context || document, xml) ) {
results.push( elem );
break;
}
}
if ( outermost ) {
dirruns = dirrunsUnique;
}
}
// Track unmatched elements for set filters
if ( bySet ) {
// They will have gone through all possible matchers
if ( (elem = !matcher && elem) ) {
matchedCount--;
}
// Lengthen the array for every element, matched or not
if ( seed ) {
unmatched.push( elem );
}
}
}
// `i` is now the count of elements visited above, and adding it to `matchedCount`
// makes the latter nonnegative.
matchedCount += i;
// Apply set filters to unmatched elements
// NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount`
// equals `i`), unless we didn't visit _any_ elements in the above loop because we have
// no element matchers and no seed.
// Incrementing an initially-string "0" `i` allows `i` to remain a string only in that
// case, which will result in a "00" `matchedCount` that differs from `i` but is also
// numerically zero.
if ( bySet && i !== matchedCount ) {
j = 0;
while ( (matcher = setMatchers[j++]) ) {
matcher( unmatched, setMatched, context, xml );
}
if ( seed ) {
// Reintegrate element matches to eliminate the need for sorting
if ( matchedCount > 0 ) {
while ( i-- ) {
if ( !(unmatched[i] || setMatched[i]) ) {
setMatched[i] = pop.call( results );
}
}
}
// Discard index placeholder values to get only actual matches
setMatched = condense( setMatched );
}
// Add matches to results
push.apply( results, setMatched );
// Seedless set matches succeeding multiple successful matchers stipulate sorting
if ( outermost && !seed && setMatched.length > 0 &&
( matchedCount + setMatchers.length ) > 1 ) {
Sizzle.uniqueSort( results );
}
}
// Override manipulation of globals by nested matchers
if ( outermost ) {
dirruns = dirrunsUnique;
outermostContext = contextBackup;
}
return unmatched;
};
return bySet ?
markFunction( superMatcher ) :
superMatcher;
}
compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) {
var i,
setMatchers = [],
elementMatchers = [],
cached = compilerCache[ selector + " " ];
if ( !cached ) {
// Generate a function of recursive functions that can be used to check each element
if ( !match ) {
match = tokenize( selector );
}
i = match.length;
while ( i-- ) {
cached = matcherFromTokens( match[i] );
if ( cached[ expando ] ) {
setMatchers.push( cached );
} else {
elementMatchers.push( cached );
}
}
// Cache the compiled function
cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) );
// Save selector and tokenization
cached.selector = selector;
}
return cached;
};
/**
* A low-level selection function that works with Sizzle's compiled
* selector functions
* @param {String|Function} selector A selector or a pre-compiled
* selector function built with Sizzle.compile
* @param {Element} context
* @param {Array} [results]
* @param {Array} [seed] A set of elements to match against
*/
select = Sizzle.select = function( selector, context, results, seed ) {
var i, tokens, token, type, find,
compiled = typeof selector === "function" && selector,
match = !seed && tokenize( (selector = compiled.selector || selector) );
results = results || [];
// Try to minimize operations if there is only one selector in the list and no seed
// (the latter of which guarantees us context)
if ( match.length === 1 ) {
// Reduce context if the leading compound selector is an ID
tokens = match[0] = match[0].slice( 0 );
if ( tokens.length > 2 && (token = tokens[0]).type === "ID" &&
context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) {
context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0];
if ( !context ) {
return results;
// Precompiled matchers will still verify ancestry, so step up a level
} else if ( compiled ) {
context = context.parentNode;
}
selector = selector.slice( tokens.shift().value.length );
}
// Fetch a seed set for right-to-left matching
i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length;
while ( i-- ) {
token = tokens[i];
// Abort if we hit a combinator
if ( Expr.relative[ (type = token.type) ] ) {
break;
}
if ( (find = Expr.find[ type ]) ) {
// Search, expanding context for leading sibling combinators
if ( (seed = find(
token.matches[0].replace( runescape, funescape ),
rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context
)) ) {
// If seed is empty or no tokens remain, we can return early
tokens.splice( i, 1 );
selector = seed.length && toSelector( tokens );
if ( !selector ) {
push.apply( results, seed );
return results;
}
break;
}
}
}
}
// Compile and execute a filtering function if one is not provided
// Provide `match` to avoid retokenization if we modified the selector above
( compiled || compile( selector, match ) )(
seed,
context,
!documentIsHTML,
results,
!context || rsibling.test( selector ) && testContext( context.parentNode ) || context
);
return results;
};
// One-time assignments
// Sort stability
support.sortStable = expando.split("").sort( sortOrder ).join("") === expando;
// Support: Chrome 14-35+
// Always assume duplicates if they aren't passed to the comparison function
support.detectDuplicates = !!hasDuplicate;
// Initialize against the default document
setDocument();
// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27)
// Detached nodes confoundingly follow *each other*
support.sortDetached = assert(function( el ) {
// Should return 1, but returns 4 (following)
return el.compareDocumentPosition( document.createElement("fieldset") ) & 1;
});
// Support: IE<8
// Prevent attribute/property "interpolation"
// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx
if ( !assert(function( el ) {
el.innerHTML = "<a href='#'></a>";
return el.firstChild.getAttribute("href") === "#" ;
}) ) {
addHandle( "type|href|height|width", function( elem, name, isXML ) {
if ( !isXML ) {
return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 );
}
});
}
// Support: IE<9
// Use defaultValue in place of getAttribute("value")
if ( !support.attributes || !assert(function( el ) {
el.innerHTML = "<input/>";
el.firstChild.setAttribute( "value", "" );
return el.firstChild.getAttribute( "value" ) === "";
}) ) {
addHandle( "value", function( elem, name, isXML ) {
if ( !isXML && elem.nodeName.toLowerCase() === "input" ) {
return elem.defaultValue;
}
});
}
// Support: IE<9
// Use getAttributeNode to fetch booleans when getAttribute lies
if ( !assert(function( el ) {
return el.getAttribute("disabled") == null;
}) ) {
addHandle( booleans, function( elem, name, isXML ) {
var val;
if ( !isXML ) {
return elem[ name ] === true ? name.toLowerCase() :
(val = elem.getAttributeNode( name )) && val.specified ?
val.value :
null;
}
});
}
// EXPOSE
var _sizzle = window.Sizzle;
Sizzle.noConflict = function() {
if ( window.Sizzle === Sizzle ) {
window.Sizzle = _sizzle;
}
return Sizzle;
};
if ( typeof define === "function" && define.amd ) {
define(function() { return Sizzle; });
// Sizzle requires that there be a global window in Common-JS like environments
} else if ( typeof module !== "undefined" && module.exports ) {
module.exports = Sizzle;
} else {
window.Sizzle = Sizzle;
}
// EXPOSE
})( window );
|
AnalysisProjectDependencies
|
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/jquery/external/sizzle/dist/sizzle.js
|
sizzle.js
|
import pandas as pd
import numpy as np
import scipy.stats as stats
from pandas_profiling import ProfileReport
class analyze:
def __init__(self, data):
self.data = data
def extract_categoricals(self):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
dataframe = self.data.select_dtypes(exclude=numerics)
return dataframe.head(20)
def extract_numericals(self):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
dataset = self.data.select_dtypes(include=numerics)
return dataset.head(20)
def intro(self):
print('................................................')
print('Here is the introductory information of your dataset:\n')
print(self.data.info())
print('\n')
print('................................................')
print("The summary of your dataset's descripive statistics are:\n")
print(self.data.describe())
print('\n')
print('................................................')
print('The shape of your dataset is:\n')
print(self.data.shape)
print('\n')
print('................................................')
print('The columns of your dataset are:\n')
print(self.data.columns)
print('\n')
print('................................................')
print('The number of rows of your dataset is:\n')
print(len(self.data))
print('\n')
print('................................................')
print('The data types of your dataset are:\n')
print(self.data.dtypes)
print('\n')
print('................................................')
print('The number of unique/distinct entries in your dataset is:\n')
print(self.data.nunique())
print('\n')
print('................................................')
print('These are the missing values in your dataset:\n')
print(self.data.isna().sum())
print('\n')
print('................................................')
print('The first 10 rows of your dataset are:\n')
print(self.data.head(10))
print('\n')
def analyze(self):
minimum = self.data.min()
tenth_percentile = self.data.quantile(0.1)
tenth_percentile_without_missing_values = self.data.dropna().quantile(0.1)
Q1 = self.data.quantile(0.25)
Q1_without_missing_values = self.data.dropna().quantile(0.25)
median = self.data.median()
median_without_missing_values = self.data.dropna().median()
Q3 = self.data.quantile(0.75)
Q3_without_missing_values = self.data.dropna().quantile(0.75)
ninetieth_percentile = self.data.quantile(0.9)
ninetieth_percentile_without_missing_values = self.data.dropna().quantile(0.9)
maximum = self.data.max()
standard_deviation = self.data.std()
standard_deviation_without_missing_values = self.data.dropna().std()
variance = self.data.var()
variance_without_missing_values = self.data.dropna().var()
mean = self.data.mean()
mean_without_missing_values = self.data.dropna().mean()
kurtosis = self.data.kurtosis()
skew = self.data.skew()
mode = self.data.mode()
correlation = self.data.corr()
print('................................................')
print('These are the categorical features in your dataset:\n')
print(analyze.extract_categoricals(self))
print('\n')
print('................................................')
print('These are the numerical features in your dataset:\n')
print(analyze.extract_numericals(self))
print('\n')
print('................................................')
print('The minimum values for each column in the dataset are:\n')
print(minimum)
print('\n')
print('................................................')
print('The 10th percentile values for each column in the dataset are:\n')
print(tenth_percentile)
print('\n')
print('................................................')
print('The 10th percentile values for each column in the dataset without missing values are:\n')
print(tenth_percentile_without_missing_values)
print('\n')
print('................................................')
print('The first quartile values of your dataset are:\n')
print(Q1)
print('\n')
print('................................................')
print('The first quartile values of your dataset without missing values are:\n')
print(Q1_without_missing_values)
print('\n')
print('................................................')
print('The median values for each column of your dataset are:\n')
print(median)
print('\n')
print('................................................')
print('The median values for each column of your dataset without missing values are:\n')
print(median_without_missing_values)
print('\n')
print('................................................')
print('The third quartile values for each column of your dataset are:\n')
print(Q3)
print('\n')
print('................................................')
print('The third quartile values for each column of your dataset without missing values are:\n')
print(Q3_without_missing_values)
print('\n')
print('................................................')
print('The 90th percentile values for each column of your dataset are:\n')
print(ninetieth_percentile)
print('\n')
print('................................................')
print('The 90th percentile values for each column of your dataset without missing values are:\n')
print(ninetieth_percentile_without_missing_values)
print('\n')
print('................................................')
print('The maximum values for each columns of your dataset are:\n')
print(maximum)
print('\n')
print('................................................')
print('The standard deviation values for each column of your dataset are:\n')
print(standard_deviation)
print('\n')
print('................................................')
print('The standard deviation values for each column of your dataset without missinh values are:\n')
print(standard_deviation_without_missing_values)
print('\n')
print('................................................')
print('The variance values of your dataset are:\n')
print(variance)
print('\n')
print('................................................')
print('The variance values of your dataset without missing values are:\n')
print(variance_without_missing_values)
print('\n')
print('................................................')
print('The mean values for each column of your dataset are:\n')
print(mean)
print('\n')
print('................................................')
print('The mean values for each column of your dataset without missing values')
print(mean_without_missing_values)
print('\n')
print('................................................')
print('The kurtosis of your dataset is:\n')
print(kurtosis)
print('\n')
print('................................................')
print('The skew of your dataset is:\n')
print(skew)
print('\n')
print('................................................')
print('The modal values of your dataset columns are:\n')
print(mode[:20])
print('\n')
print('................................................')
print('The correlation of your dataset is:\n')
print(correlation)
print('\n')
interquartile_range = self.data.quantile(0.75) - self.data.quantile(0.25)
print('................................................')
print('The interquartile ranges of your dataset columns is:\n')
print(interquartile_range)
print('\n')
interquartile_range_without_missing_data = self.data.dropna().quantile(0.75) - self.data.dropna().quantile(0.25)
print('................................................')
print('The interquartile ranges of your dataset columns without missing data are:\n')
print(interquartile_range_without_missing_data)
print('\n')
coeff_var = (np.std(analyze.extract_numericals(self), ddof=1) / np.mean(analyze.extract_numericals(self))) * 100
print('................................................')
print('The coefficients of variation of your dataset columns are:\n')
print(coeff_var)
print('\n')
coeff_var_without_missing_values = (np.std(analyze.extract_numericals(self).dropna(), ddof=1) / np.mean(analyze.extract_numericals(self).dropna())) * 100
print('................................................')
print('The coefficients of variation of your dataset columns without missing values are:\n')
print(coeff_var_without_missing_values)
print('\n')
print('Do you want to generate a distribution report of your dataset to properly understand the statistical distributions and assumptions of your data?')
response = input('[y/n]: ').lower()
if response == 'y' or response == 'yes':
print("Generating report...", '\n')
print('Do you want to generate a distribution report for the whole dataset or just a specific column?')
response = input('[type y for whole or n for specific]: ').lower()
if response == 'y' or response == 'yes':
print("Generating report...", '\n')
for col in analyze.extract_numericals(self).dropna().columns:
get_dist(analyze.extract_numericals(self).dropna()[col])
elif response == 'n' or response == 'specific':
action = str(input("Type the column you want to analyze as written on the dataset e.g PassengerId:"))
if action in self.data.columns:
print("Generating report...", '\n')
get_dist(analyze.extract_numericals(self).dropna()[action])
else:
print('Invalid column. Please check the spelling and confirm that the column is in the dataset')
else:
print("Invalid command")
elif response == 'n' or response == 'no':
print("Summarized analysis complete")
else:
print("Summarized analysis complete")
print('Do you want to generate a detailed report on the exploration of your dataset?')
response = input('[y/n]: ').lower()
if response == 'y' or response == 'yes':
print("Generating report...", '\n')
prof = ProfileReport(self.data)
prof.to_file(output_file='report.html')
print('Your Report has been generated and saved as \'report.html\'')
elif response == 'n' or response == 'no':
print('Process Completed')
else:
print('Process Completed')
def get_dist(column):
df = column
size = len(df)
chi_square_statistics = []
stat_bins = np.linspace(0,100,11)
stat_cutoffs = np.percentile(df, stat_bins)
visible_freq, bins = (np.histogram(df, bins=stat_cutoffs))
total_freq = np.cumsum(visible_freq)
dist_types = ['weibull_min','norm','weibull_max','beta',
'invgauss','uniform','gamma','expon',
'lognorm','pearson3','triang']
for name in ['weibull_min','norm','weibull_max','beta',
'invgauss','uniform','gamma','expon',
'lognorm','pearson3','triang']:
dist = getattr(stats, name)
param = dist.fit(df)
print("{}\n{}\n".format(dist, param))
cdf_fit = dist.cdf(stat_cutoffs, *param)
extant_freq = []
for bin in range(len(stat_bins)-1):
cdf_area = cdf_fit[bin+1] - cdf_fit[bin]
extant_freq.append(cdf_area)
extant_freq = np.array(extant_freq) * size
total_extant_freq = np.cumsum(extant_freq)
ss = sum (((total_extant_freq - total_freq) ** 2) / total_freq)
chi_square_statistics.append(ss)
results = pd.DataFrame()
results['Distribution'] = dist_types
results['chi_square'] = chi_square_statistics
results.sort_values(['chi_square'], inplace=True)
print ('\nDistributions listed by goodness of fit for {}:'.format(column.name))
print ('............................................')
print (results)
|
Analyze
|
/Analyze-1.0.1-py3-none-any.whl/Analyze.py
|
Analyze.py
|
import re
import math
from qlib.data.sql import SqlObjectEngine, Table
def getwords(doc):
splitter=re.compile('\\W*')
print(doc)
# Split the words by non-alpha characters
words=[s.lower() for s in splitter.split(doc)
if len(s)>2 and len(s)<20]
# Return the unique set of words only
return dict([(w,1) for w in words])
class ClassiferCCTable(Table):
category = str
count = int
class ClassiferFCTable(Table):
feature = str
category = str
count = int
class classifier:
def __init__(self,getfeatures,filename=None):
# Counts of feature/category combinations
self.fc={}
# Counts of documents in each category
self.cc={}
self.getfeatures=getfeatures
def setdb(self,dbfile):
self.con=SqlObjectEngine(database=dbfile)
try:
self.con.create(ClassiferCCTable)
self.con.create(ClassiferFCTable)
except Exception as e:
pass
def incf(self,f,cat):
count=self.fcount(f,cat)
if count==0:
m = ClassiferFCTable(feature=f, category=cat, count=1)
self.con.add(m)
self.con.save(m)
else:
m = self.con.find_one(ClassiferFCTable, feature=f, category=cat)
m['count'] = m.count + 1
self.con.save(m)
def fcount(self,f,cat):
res = self.con.find_one(ClassiferFCTable, feature=f, category=cat)
if not res: return 0
else: return float(res.count)
def incc(self,cat):
count=self.catcount(cat)
if count==0:
m = ClassiferCCTable(category=cat, count=1)
self.con.add(m)
self.con.save(m)
else:
m = self.con.find_one(ClassiferCCTable, category=cat)
m['count'] = m.count + 1
self.con.save(m)
def catcount(self,cat):
res = self.con.find_one(ClassiferCCTable, category=cat)
if res==None: return 0
else: return float(res.count)
def categories(self):
cur=self.con.find(ClassiferCCTable);
return [d.category for d in cur]
def totalcount(self):
res = self.con.sql.select('ClassiferCCTable', 'count',sum_mode=True)
if res==None: return 0
return list(res)[0]
def train(self,item,cat):
features=self.getfeatures(item)
# Increment the count for every feature with this category
for f in features:
self.incf(f,cat)
# Increment the count for this category
self.incc(cat)
def fprob(self,f,cat):
if self.catcount(cat)==0: return 0
# The total number of times this feature appeared in this
# category divided by the total number of items in this category
return self.fcount(f,cat)/self.catcount(cat)
def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
# Calculate current probability
basicprob=prf(f,cat)
# Count the number of times this feature has appeared in
# all categories
totals=sum([self.fcount(f,c) for c in self.categories()])
# Calculate the weighted average
bp=((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
class naivebayes(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.thresholds={}
def docprob(self,item,cat):
features=self.getfeatures(item)
# Multiply the probabilities of all the features together
p=1
for f in features: p*=self.weightedprob(f,cat,self.fprob)
return p
def prob(self,item,cat):
catprob=self.catcount(cat)/self.totalcount()
docprob=self.docprob(item,cat)
return docprob*catprob
def setthreshold(self,cat,t):
self.thresholds[cat]=t
def getthreshold(self,cat):
if cat not in self.thresholds: return 1.0
return self.thresholds[cat]
def classify(self,item,default=None):
probs={}
# Find the category with the highest probability
max=0.0
for cat in self.categories():
probs[cat]=self.prob(item,cat)
if probs[cat]>max:
max=probs[cat]
best=cat
# Make sure the probability exceeds threshold*next best
for cat in probs:
if cat==best: continue
if probs[cat]*self.getthreshold(best)>probs[best]: return default
return best
class fisherclassifier(classifier):
def cprob(self,f,cat):
# The frequency of this feature in this category
clf=self.fprob(f,cat)
if clf==0: return 0
# The frequency of this feature in all the categories
freqsum=sum([self.fprob(f,c) for c in self.categories()])
# The probability is the frequency in this category divided by
# the overall frequency
p=clf/(freqsum)
return p
def fisherprob(self,item,cat):
# Multiply all the probabilities together
p=1
features=self.getfeatures(item)
for f in features:
p*=(self.weightedprob(f,cat,self.cprob))
# Take the natural log and multiply by -2
fscore=-2*math.log(p)
# Use the inverse chi2 function to get a probability
return self.invchi2(fscore,len(features)*2)
def invchi2(self,chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m / i
sum += term
return min(sum, 1.0)
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.minimums={}
def setminimum(self,cat,min):
self.minimums[cat]=min
def getminimum(self,cat):
if cat not in self.minimums: return 0
return self.minimums[cat]
def classify(self,item,default=None):
# Loop through looking for the best result
best=default
max=0.0
for c in self.categories():
p=self.fisherprob(item,c)
# Make sure it exceeds its minimum
if p>self.getminimum(c) and p>max:
best=c
max=p
return best
|
Analyzer-zero
|
/Analyzer-zero-0.3.tar.gz/Analyzer-zero-0.3/analyzerlibs/classify/__init__.py
|
__init__.py
|
from qlib.data.sql import SqlObjectEngine, Table
class Doc(Table):
title = 'title'
url = str
content = str
tag = str
class InLink(Table):
fromid = int
toid= int
strength = float
class OutLink(Table):
fromid = int
toid = int
strength = float
class Hidden(Table):
link_type = 'Doc'
linkid = int
desc = 'this is describle for hidden node . the hidden node can target to any obj. default is doc'
def __init__(self, handler=None, **kargs):
v = kargs['linkid']
if hasattr(v, '_table'):
kargs['linkid'] = v.id
kargs['link_type'] = v.__class__.__name__
kargs['desc'] = v.__class__.__name__
super().__init__(handler=handler, **kargs)
def __call__(self, sqlhandler, Obj ):
return handler.find_one(Obj, ID=self.linkid)
class Neural:
layer_map =[InLink, Hidden, OutLink]
def __init__(self, database):
self._db = SqlObjectEngine(database=database)
self.database_path = database
if not ('InLink',) in self._db.sql.table_list():
self._db.create(InLink)
if not ('OutLink',) in self._db.sql.table_list():
self._db.create(OutLink)
if not ('Hidden',) in self._db.sql.table_list():
self._db.create(Hidden)
def __del__(self):
self._db.close()
def _get_from_db(self, table,**kargs):
if hasattr(table, '_table'):
return self._db.find_one(table, **kargs)
else:
return self._db.sql.first(table, **kargs)
def getStrength(self, fromid, toid, layer):
if isinstance(layer, int):
l = self.__class__.layer_map[layer]
elif hasattr(layer, '_table'):
l = layer
else:
raise Exception("not such layer : ",layer)
res = self._get_from_db(l, fromid=fromid, toid=toid)
if not res:
if l is InLink:
return -0.2
else:
return 0.0
return res.strength
def getAllHiddenIds(self, inids, outids):
res_ids = set()
for inid in inids:
rows = self._db.find(InLink, fromid=inid)
for r in rows:
res_ids.add(r.toid)
for outid in outids:
rows = self._db.find(OutLink, toid=outids)
for r in rows:
res_ids.add(r.fromid)
return res_ids
def setUpNetwork(self, inids, outids):
self.inids = inids
self.outids = outids
self.hiddenids = self.getAllHiddenIds(inids, outids)
# node
self.ai = [1.0] * len(self.inids)
self.ah = [1.0] * len(self.hiddenids)
self.ao = [1.0] * len(self.outids)
# create weights matrix
self.wi = [[ self.getStrength(inid, hiddenid, 0) for hiddenid in self.hiddenids] for inid in self.inids ]
self.wo = [[ self.getStrength(hiddenid, outid, 2) for hiddenid in self.hiddenids] for outid in self.outids ]
def feedforward(self, activation_func):
# the only inputs are the query words
for i in range(len(self.inids)):
self.ai[i] = 1.0
# hidden activations
self.ah = activation_func(self.inids, self.hiddenids, self.ai, self.wi)
self.ao = activation_func(self.hiddenids, self.outids, self.ah, self.ao)
return self.ao
def getresult(self, inids, outids):
pass
def setStrength(self, fromid, toid, layer, strength):
res = self.getStrength(fromid,toid, layer)
if not res:
if isinstance(layer, int):
l = self.__class__.layer_map[layer]
elif hasattr(layer, '_table'):
l = layer
else:
raise Exception("not such layer : ",layer)
self._db.add(l(fromid=fromid, toid=toid, strength=strength))
else:
res['strength'] = strength
self._db.save(res)
def __repr__(self):
return '%d-layer-nerual| store in %s' % (len(self.layer_map), self.database_path)
|
Analyzer-zero
|
/Analyzer-zero-0.3.tar.gz/Analyzer-zero-0.3/analyzerlibs/nerual/_nerual.py
|
_nerual.py
|
# AnaviInfraredPhat
## AnaviInfraredPhat as an application
Publishes temperature, pressure, humidity, luminosity and heat index from your Pi over 0MQ.
```
python3 -m AnaviInfraredPhat --verbose
2019-05-12 10:40:19.192 JST [ DEBUG] __init__ : Logging with global level DEBUG
2019-05-12 10:40:19.196 JST [ DEBUG] __init__ : Sysout logging with level DEBUG
2019-05-12 10:40:19.199 JST [ DEBUG] __init__ : Changing sysout format to logfile
2019-05-12 10:40:19.217 JST [ DEBUG] __init__ : Added TimedRotatingFileHandler to /var/logAnaviInfraredPhat.log with level DEBUG.
2019-05-12 10:40:19.228 JST [ DEBUG] __init__ : Log rotates every D and keeps 15 logs.
2019-05-12 10:40:19.233 JST [ INFO] __main__ : Starting
2019-05-12 10:40:20.093 JST [ DEBUG] __main__ : {"hi": null, "l": 63, "hi_cmt": "No concerns", "h": 42, "p": 1015, "t": 25.8}
```
## AnaviInfraredPhat as a module
The module can be used from a different machine than the Raspberry Pi hosting the Anavi Infrared Phat.
It has a class for each sensor provided with the original Anavi Infrared Phat Kit.
* BH1750
* HTU21D
* BMP180
As well as a class to emit IR codes.
* IRSEND (adapted from Joan's script @ http://abyz.co.uk/rpi/pigpio/code/irrp_py.zip. It uses records files which are to be created with irrp.py)
Some more functions are available as example or boilerplate code:
* report_tphl
* report_tphl_average
* report_tphl_as_text
Please refer to the code.
|
AnaviInfraredPhat
|
/AnaviInfraredPhat-0.0.10.tar.gz/AnaviInfraredPhat-0.0.10/README.md
|
README.md
|
Ancestration – Family Inheritance for Python
============================================
This project implements the so-called *family inheritance* for Python 2 and 3.
It is based on the doctoral thesis of Patrick Lay "Entwurf eines Objektmodells
für semistrukturierte Daten im Kontext von XML Content Management Systemen"
(Rheinische Friedrich-Wilhelms Universität Bonn, 2006) and is developed as
part of the diploma thesis of Michael Pohl "Architektur und Implementierung
des Objektmodells für ein Web Application Framework" (Rheinische
Friedrich-Wilhelms Universität Bonn, 2013-2014).
|
Ancestration
|
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/README.txt
|
README.txt
|
from __future__ import unicode_literals
from ancestration import FamilyInheritanceError
invalid_adoption_object = lambda obj: FamilyInheritanceError(
'Invalid object to adopt into family, only classes and functions are allowed: {}'.format(obj))
multiple_family_declarations = lambda: FamilyInheritanceError(
'A module may be made a family module only once.')
invalid_family_extends = lambda extends: FamilyInheritanceError(
'Only family modules can be family-extented, but is {}.'.format(type(extends)))
outside_family = lambda: FamilyInheritanceError(
'A family class may only be defined in a family module.')
multiple_family_bases = lambda: FamilyInheritanceError(
'A family class may not extend more than one family class.')
different_family_base = lambda: FamilyInheritanceError(
'The redefined family class has a different family base class than the original.')
no_super_family = lambda: FamilyInheritanceError(
'"FAMILY_INHERIT" was given but there is no super family.')
no_super_family_base = lambda cls_name: FamilyInheritanceError(
'"FAMILY_INHERIT" contains "{}", but no equally named class was found in the super family.'.format(cls_name))
missing_attribute = lambda attr_name: FamilyInheritanceError(
'The "FAMILY_INHERIT" attribute "{}" does not exist in super family base.'.format(attr_name))
def adoption_import_error(family_module, module, import_error):
error_module = import_error.__class__.__module__
if error_module is None or error_module == '__builtin__':
error_module = ''
error_qualified_name = error_module + '.' + import_error.__class__.__name__
message = 'Could not adopt the module "{}" into family "{}", because an exception of type "{}" was raised'.format(
module, family_module.__name__, error_qualified_name)
error_message = '{}'.format(import_error)
if len(error_message) == 0:
message += '.'
else:
message += ' with message: {}'.format(error_message)
return FamilyInheritanceError(message)
|
Ancestration
|
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/ancestration/_errors.py
|
_errors.py
|
import inspect
import collections
from types import FunctionType
from ancestration import _family_metaclass
from ancestration._errors import (multiple_family_declarations,
invalid_family_extends, multiple_family_bases,
different_family_base, no_super_family, no_super_family_base,
missing_attribute)
families = dict()
is_family = lambda module: module in families
def add_reload_family(module):
try:
from imp import reload
except ImportError:
pass
def reload_family():
del families[module]
return reload(module)
module.reload_family = reload_family
class FamilyInheritanceSet(collections.Set):
__slots__ = ('_set')
def __init__(self):
self._set = set()
def __contains__(self, item):
return item in self._set
def __iter__(self):
return iter(self._set)
def __len__(self):
return len(self._set)
def _add(self, item):
self._set.add(item)
def walk(self):
for child in iter(self._set):
yield child
for descendant in child.walk():
yield descendant
def clone_class(original_cls, module, new_bases=None, new_dct=None):
name = original_cls.__name__
if new_bases is None:
bases = original_cls.__bases__
else:
bases = new_bases
if new_dct is None:
dct = dict(original_cls.__dict__)
else:
dct = new_dct
new_cls = type(name, bases, dct)
new_cls.__module__ = module.__name__
setattr(module, name, new_cls)
return new_cls
def redefine_function_globals(original_func, new_globals):
return FunctionType(original_func.__code__, new_globals,
original_func.__name__)
class FamilyInheritanceItem(FamilyInheritanceSet):
__slots__ = ('_cls', '_parent', '_family_base', '_super_family_base',
'_is_inherited', '_name', '_is_base', '_attributes')
def __init__(self, cls, parent, super_family_base, is_inherited=False):
FamilyInheritanceSet.__init__(self)
self._name = cls.__name__
self._cls = cls
self._parent = parent
self._is_inherited = is_inherited
self._super_family_base = super_family_base
self._is_base = not isinstance(parent, FamilyInheritanceItem)
parent._add(self)
@property
def name(self):
return self._name
@property
def is_base(self):
return self._is_base
@property
def is_inherited(self):
return self._is_inherited
@property
def cls(self):
return self._cls
@property
def parent(self):
return self._parent
@property
def super_family_base(self):
return self._super_family_base
@property
def attributes(self):
return self._attributes
def _replace_class(self, new_cls, module):
for child in self:
child._replace_base_class(self._cls, new_cls, module)
self._is_inherited = False
self._cls = new_cls
def _replace_base_class(self, old_base, new_base, module):
new_bases = tuple(
new_base if base is old_base else base
for base in self._cls.__bases__
)
new_cls = clone_class(self._cls, module, new_bases)
for child in self:
child._replace_base_class(self._cls, new_cls, module)
self._cls = new_cls
def __hash__(self):
return object.__hash__(self)
class FamilyInheritance(collections.Mapping):
def __init__(self, module, extends):
if is_family(module):
raise multiple_family_declarations()
self._module = module
self._mapping = dict()
self._functions = set()
self._base_classes = FamilyInheritanceSet()
if extends is None:
self._super_family = None
else:
if not is_family(extends):
raise invalid_family_extends(extends)
self._super_family = families[extends]
# Copy attributes from super family:
for attr_name in dir(extends):
if not hasattr(module, attr_name):
attr_value = getattr(extends, attr_name)
if (attr_value.__class__ is FunctionType and
attr_value.__name__ in
self._super_family._functions):
# Redefine family functions:
attr_value = redefine_function_globals(attr_value,
module.__dict__)
self._functions.add(attr_value.__name__)
setattr(module, attr_name, attr_value)
# Inherit family classes from super family:
for inherited_item in self._super_family.walk():
new_bases = []
if inherited_item.is_base:
parent = self._base_classes
else:
parent = self._mapping[inherited_item.parent.name]
new_bases.append(parent.cls)
original_cls = inherited_item.cls
new_bases.append(original_cls)
new_bases = tuple(new_bases)
cls = clone_class(original_cls, module, new_bases, {})
item = FamilyInheritanceItem(cls, parent, inherited_item,
True)
self._mapping[item.name] = item
families[module] = self
add_reload_family(module)
@property
def module(self):
return self._module
@property
def super_family(self):
return self._super_family
@property
def base_classes(self):
return self._base_classes
def walk(self):
return self._base_classes.walk()
def __getitem__(self, key):
return self._mapping[key]
def __iter__(self):
return iter(self._mapping)
def __len__(self):
return len(self._mapping)
def _compute_bases(self, name, bases):
# compute super family base:
try:
super_family_base = self._super_family[name]
super_family_base_cls = super_family_base.cls
except (TypeError, KeyError):
super_family_base = None
super_family_base_cls = None
# compute in-family base and create class bases list:
new_bases = []
try:
family_base = self._mapping[name]
except KeyError:
# newly defined family class
family_base_cls = None
for base in bases:
if base.__name__ in self._mapping:
if family_base_cls is None:
family_base_cls = base
else:
raise multiple_family_bases()
elif base is not object and base is not super_family_base_cls:
new_bases.append(base)
if family_base_cls is None:
family_base = None
else:
family_base = self._mapping[family_base_cls.__name__]
else:
# redefined family class
family_base_cls = family_base.cls
for base in bases:
try:
defined_family_base = self._mapping[base.__name__]
except KeyError:
if (base is not object
and base is not super_family_base_cls):
new_bases.append(base)
else:
if (defined_family_base is not family_base
and base.__name__ != name):
raise different_family_base()
# add family bases to class bases:
if super_family_base is not None:
new_bases.insert(0, super_family_base_cls)
if family_base is not None:
new_bases.insert(0, family_base_cls)
if len(new_bases) == 0:
new_bases.append(object)
return family_base, super_family_base, tuple(new_bases)
def _apply_family_inherit(self, name, dct, super_family_base):
try:
family_inherit = dct['FAMILY_INHERIT']
except KeyError:
return
if self._super_family is None:
raise no_super_family()
if super_family_base is None:
raise no_super_family_base(name)
super_family_base_cls = super_family_base.cls
for family_inherit_name in family_inherit:
family_inherit_name = str(family_inherit_name)
try:
family_inherit_value = getattr(super_family_base_cls,
family_inherit_name)
except AttributeError:
raise missing_attribute(family_inherit_name)
dct[family_inherit_name] = family_inherit_value
def _create_family_class(self, name, bases, dct):
family_base, super_family_base, new_bases = self._compute_bases(
name, bases)
self._apply_family_inherit(name, dct, super_family_base)
dct['in_family_base'] = None if family_base is None else family_base.cls
dct['super_family_base'] = (None if super_family_base is None
else super_family_base.cls)
cls = _family_metaclass(name, new_bases, dct)
cls.__module__ = self._module.__name__
try:
item = self._mapping[name]
except KeyError:
if family_base is None:
parent_item = self._base_classes
else:
parent_item = family_base
item = FamilyInheritanceItem(cls, parent_item, super_family_base)
self._mapping[name] = item
else:
item._replace_class(cls, self._module)
return cls
def _register_family_function(self, func):
self._functions.add(func.__name__)
del collections
|
Ancestration
|
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/ancestration/_family_inheritance.py
|
_family_inheritance.py
|
def _get_calling_module(stack_number=1):
import inspect
stack = inspect.stack()
frame = stack[stack_number][0]
module = inspect.getmodule(frame)
del stack, frame
return module
class _FamilyExtender(object):
def __init__(self, parent, module_name):
self._parent = parent
self._module_name = module_name
def __getattr__(self, module_name):
return _FamilyExtender(self, module_name)
def __call__(self):
def module_names():
current = self
while current is not None:
yield current._module_name
current = current._parent
extends = '.'.join(reversed(list(module_names())))
return family(extends, 3)
class family(object):
'''\
Callable to make current module a family.
'''
def __call__(self, extends=None, _stack_number=2):
if extends is not None:
import inspect
if not inspect.ismodule(extends):
from importlib import import_module
extends = import_module(str(extends))
module = _get_calling_module(_stack_number)
from ancestration._family_inheritance import FamilyInheritance
FamilyInheritance(module, extends)
return extends
def __getattr__(self, module_name):
return _FamilyExtender(None, module_name)
family = family()
class _family_metaclass_mixin(object):
def mro(self):
import logging; logging.warn("mro()")
bases = type.mro(self)
cls = bases[0]
from collections import deque
pending = deque()
pending.append(cls)
bases = deque()
while len(pending) > 0:
current = pending.popleft()
bases.append(current)
pending.extend(current.__bases__)
import logging; logging.warn(bases)
return tuple(bases)
def __new__(metacls, name, bases, dct):
import logging; logging.warn("__new__()")
import pdb; pdb.set_trace()
cls = type.__new__(metacls, name, bases, dct)
return cls
def __init__(self, name, bases, dct):
import logging; logging.warn("__init__()")
type.__init__(self, name, bases, dct)
import abc
class _family_metaclass(abc.ABCMeta):
_abcmeta = abc.ABCMeta
def mro(self):
bases = type.mro(self)
cls = bases[0]
from collections import deque
visited = set()
pending = deque()
pending.append(cls)
bases = deque()
while len(pending) > 0:
current = pending.popleft()
if current not in visited:
visited.add(current)
bases.append(current)
pending.extend(current.__bases__)
return tuple(bases)
def __new__(metacls, name, bases, dct):
cls = metacls._abcmeta.__new__(metacls, name, bases, dct)
return cls
def __init__(self, name, bases, dct):
self._abcmeta.__init__(self, name, bases, dct)
del abc
class family_class(_family_metaclass):
'''\
To be used as a metaclass or as a class decorator. This includes the class
in the family inheritance. See :ref:`family_inheritance` and
:ref:`define_family_class` for more information.
:param args: Must contain either only the class (if used as a class
decorator) or the name, the tuple of bases and the dictionary of the
class to create (if used as a metaclass).
:raises ancestration.FamilyInheritanceError: if there is an error in the
specification of the family class.
:raises ValueError: if the wrong number of arguments is supplied.
:returns: The family class object.
'''
def __new__(metacls, *args):
if len(args) not in (1, 3):
raise ValueError(
'Invalid number of arguments, only 1 or 3 are allowed, got {}: {}'.format(
len(args), args))
module = _get_calling_module(2)
from ancestration._family_inheritance import families
try:
family = families[module]
except KeyError:
from ancestration._errors import outside_family
raise outside_family()
try:
name, bases, dct = args
except ValueError:
cls = args[0]
name = cls.__name__
bases = cls.__bases__
dct = dict(cls.__dict__)
dct['_family_module'] = module
return family._create_family_class(name, bases, dct)
def family_function(func):
'''\
A function decorator that registers the name of the decorated non-lambda
function as a `family function`. This means in child family modules the
same function is available under the same name, but there it uses the
child family's dictionary for global lookup. Thus it uses overridden
classes, functions and so on.
Note that this only works for functions defined and accessible as
attributes of the family module, not for methods or nested functions.
:param func: The function to be made a family function.
:raises ValueError: if ``func`` is not a function or it is a lambda
function.
:raises ancestration.FamilyInheritanceError: if it is not used in a
family module.
:returns: ``func``
'''
import types
if (func.__class__ is not types.FunctionType
or func.__class__.__name__ == '<lambda>'):
raise ValueError('May only be used on non-lambda functions.')
module = _get_calling_module(2)
from ancestration._family_inheritance import families
try:
family = families[module]
except KeyError:
from ancestration._errors import outside_family
raise outside_family()
family._register_family_function(func)
return func
def adopt(*args, **kargs):
'''\
A function which integrates the classes and functions given as arguments
into the family inheritance, thus making them family classes and family
functions. The arguments may also be strings, in this case the object is
looked up in the module which is given in the named argument ``module`` and
defaults to the current (family) module.
A callback function may be supplied which is called with each adopted
object. If the returned value is different from :const:`None` the
class/function is replaced by this value.
:param args: The functions and classes to integrate into family inheritance,
may also be given as strings. If none are given but the ``module``
argument is specified, all classes and functions of that module are
adopted.
:param kargs: The named argument ``callback`` may be given as a function
with one argument, which is called with each adopted object. The named
argument ``module`` may be a module or a string denoting a module. If a
``module`` string starts with a ``.``, it is resolved starting with the
module where :func:`!adopt` is called from. To create custom
adoption-functions using this function, specify the named
``stack_depth`` argument with a number denoting the number of calls from
the module where it is called until :func:`!adopt` is called (a function
to be called instead of :func:`!adopt`, directly calling should specify
a ``1``). If the named argument ``include_attributes`` is given and
either :const:`True` or an iterable of strings, all or the given
non-class and non-function attributes will also be imported.
:raises FamilyInheritanceError: if an argument specifies neither a class nor
function.
:raises ImportError: If the module given in the argument ``module`` as a
string could not be imported.
'''
family_module = _get_calling_module(2 + kargs.get('stack_depth', 0))
from inspect import isclass, isfunction, ismodule
islambda = lambda obj: obj.__class__.__name__ == '<lambda>'
is_family_item = lambda item: ((isclass(item)
or (isfunction(item) and not islambda(item)))
and item.__module__ == module.__name__)
try:
module = kargs['module']
except KeyError:
module = family_module
else:
if not ismodule(module):
if module[0] == '.':
module = family_module.__name__ + module
from importlib import import_module
try:
module = import_module(module)
except Exception as e:
from ancestration._errors import adoption_import_error
raise adoption_import_error(family_module, module, e)
try:
include_attributes = kargs['include_attributes']
if include_attributes is not True and include_attributes is not False:
include_attributes = set(str(attr_name)
for attr_name in include_attributes)
except KeyError:
include_attributes = False
from ancestration._family_inheritance import families
try:
family = families[family_module]
except KeyError:
from ancestration._errors import outside_family
raise outside_family()
callback = kargs.get('callback', None)
attributes = {}
if len(args) == 0 and 'module' in kargs:
items = []
for name in dir(module):
item = getattr(module, name)
if is_family_item(item):
items.append(item)
elif include_attributes is not False:
if include_attributes is True or name in include_attributes:
attributes[name] = item
else:
items = args
if include_attributes is not False:
if include_attributes is True:
attribute_names = dir(module)
else:
attribute_names = include_attributes
for name in attribute_names:
item = getattr(module, name)
attributes[name] = item
for item in items:
try:
if str(item) == item:
try:
item = getattr(module, item)
except AttributeError:
raise FamilyInheritanceError(
'There is no attribute "{}" in module "{}".'.format(item,
module.__name__))
except TypeError:
pass
name = item.__name__
if isclass(item):
bases = item.__bases__
dct = dict(item.__dict__)
family_obj = family._create_family_class(name, bases, dct)
elif isfunction(item) and not islambda(item):
family._register_family_function(item)
family_obj = item
else:
from ancestration._errors import invalid_adoption_object
raise invalid_adoption_object(item)
setattr(family_module, name, family_obj)
if callback is not None:
callback_result = callback(family_obj)
if callback_result is not None:
family_obj = callback_result
for attr_name, attr_value in attributes.items():
setattr(family_module, attr_name, attr_value)
class class_module(object):
from importlib import import_module
_import = staticmethod(import_module)
def __call__(self, cls_or_obj):
if cls_or_obj.__class__ is type:
cls = cls_or_obj
else:
cls = cls_or_obj.__class__
return self.__get__(None, cls)
def __get__(self, obj, cls):
return self._import(cls.__module__)
def __set__(self, obj, value):
raise AttributeError('Setting is not allowed.')
def __delete__(self, obj):
raise AttributeError('Deleting is not allowed.')
class_module = class_module()
class super_descriptor(object):
def __get__(self, obj, cls):
return super(cls, obj)
def __set__(self, obj, value):
raise AttributeError('Setting is not allowed.')
def __delete__(self, obj):
raise AttributeError('Deleting is not allowed.')
super_descriptor = super_descriptor()
class FamilyInheritanceError(Exception):
'''Raised if there is a problem with the family inheritance.'''
pass
class LAZY_CLS_ATTR(object):
__slots__ = '_operation', '_parent', '_value', '_has_value'
__isabstractmethod__ = False # fix for being detected as an abstract method
def __init__(self, operation, parent):
self._operation = operation
self._parent = parent
self._has_value = False
def __getattr__(self, name):
return self.__class__(lambda current: getattr(current, name), self)
def __getitem__(self, key):
if self._parent is None:
raise ValueError(
'First retrieve an attribute from ancestration.LAZY_CLS_ATTR before retrieving items.')
return self.__class__(lambda current: current[key], self)
def __call__(self, *args, **kargs):
if self._parent is None:
raise ValueError(
'First retrieve an attribute from ancestration.LAZY_CLS_ATTR before calling.')
return self.__class__(lambda current: current(*args, **kargs), self)
def __get__(self, obj, cls):
if self._has_value:
return self._value
if self._parent is None:
raise ValueError(
'First retrieve an attribute from ancestration.LAZY_CLS_ATTR before assigning to a class attribute.')
from collections import deque
operations = deque()
lazy_obj = self
while lazy_obj is not None:
operation = lazy_obj._operation
if operation is not None:
operations.appendleft(operation)
lazy_obj = lazy_obj._parent
value = cls
while len(operations) > 0:
operation = operations.popleft()
value = operation(value)
self._value = value
self._has_value = True
del self._operation, self._parent
return value
def __set__(self, obj, value):
raise AttributeError('Setting is not allowed.')
def __delete__(self, obj):
raise AttributeError('Deleting is not allowed.')
LAZY_CLS_ATTR = LAZY_CLS_ATTR(None, None)
|
Ancestration
|
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/ancestration/__init__.py
|
__init__.py
|
***************
Getting started
***************
Installation
------------
.. grid:: 2
.. grid-item-card:: Installing with conda
:text-align: center
:columns: 12
.. code-block:: bash
conda create -n anchor -c conda-forge anchor-annotator
+++
.. button-link:: https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html
:color: primary
:expand:
Install Conda
.. grid-item-card:: Running anchor
:text-align: center
.. code-block:: bash
conda activate anchor
mfa anchor
.. grid-item-card:: First steps
:text-align: center
First time using Anchor? Want a walk-through of a specific use case?
+++
.. button-ref:: first_steps
:expand:
:color: primary
First steps
.. toctree::
:maxdepth: 1
:hidden:
installation
first_steps/index
|
Anchor-annotator
|
/Anchor_annotator-0.0.9.tar.gz/Anchor_annotator-0.0.9/docs/source/getting_started.rst
|
getting_started.rst
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.