code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
import os
import pathlib
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, BatchSampler
from torch.utils.data import WeightedRandomSampler
import scipy.sparse as ssp
import sklearn.preprocessing as prep
import sklearn.pipeline as ppln
from sklearn.utils import class_weight
import matplotlib.pyplot as plt
from utils import to_sparse_tensor, bin_data, normalize, split_bucketed_data, \
summarize_feature_matrix
def get_composite_transformer(n_quantiles):
transformer = ppln.Pipeline([
("quantile", prep.QuantileTransformer(output_distribution="normal",
n_quantiles=n_quantiles)),
("normalize", prep.StandardScaler())
])
return transformer
class BinnedTransformer:
def __init__(self, num_bins, create_transformer_f):
self.num_bins = num_bins
self.transformers = [create_transformer_f() for _ in range(num_bins)]
def fit_transform(self, x_reg, x_class):
transformed_x_reg = np.copy(x_reg)
for bin_idx in range(self.num_bins):
sample_idcs = x_class == bin_idx
transformer = self.transformers[bin_idx]
transformed_x_reg[sample_idcs] = transformer.fit_transform(
transformed_x_reg[sample_idcs])
return transformed_x_reg
def inverse_transform(self, x_reg, x_class):
x_reg = x_reg.reshape(-1)
transformed_x_reg = np.copy(x_reg)
for bin_idx in range(self.num_bins):
sample_idcs = x_class == bin_idx
if np.sum(sample_idcs) == 0: continue # no sample of that class
transformer = self.transformers[bin_idx]
transformed_x_reg[sample_idcs] = transformer.inverse_transform(
x_reg[sample_idcs].reshape(-1, 1)).reshape(-1)
return transformed_x_reg
class GraphTopologicalData:
def __init__(self, adj_matrix=None, unweighted_adj_matrix=None,
inc_matrix=None, inc_matrix_dense=None, edge_indices=None,
edge_weights=None):
self.adj_matrix = adj_matrix # NxN sparse matrix
self.unweighted_adj_matrix = unweighted_adj_matrix # NxN sparse matrix
self.inc_matrix = inc_matrix # NxE sparse matrix
self.inc_matrix_dense = inc_matrix_dense # NxE dense matrix
self.edge_indices = edge_indices # Ex2 dense matrix
self.edge_weights = edge_weights # E dense vector
class UrbanPlanningDataset:
def __init__(self, data_base_path="Data/", num_bins=4, batch_size=32,
n_quantiles=1000, resample=False,
excluded_node_feature_columns=tuple(),
excluded_edge_feature_columns=tuple(),
use_binned_transformer=False, include_approx_flows=False,
flow_adj_threshold=0, seed=7):
"""
Loads city data set.
:param data_base_path: Location at which to find the node features,
edge features, and the adjacency matrix.
:param num_bins: Number of bins for dividing the data set labels. The
bin index may be a classification target or for computing MAEs for each
bin separately.
:param batch_size:
:param n_quantiles: Number of quantiles to use for the quantile
transformer that preprocesses features and labels.
:param excluded_node_feature_columns: Tuple of names of the columns
to remove from the node feature data set.
:param excluded_edge_feature_columns: Tuple of names of the columns to
remove from the edge feature data set.
:param resample: If True, we use a weighted random sampler to ensure
that each epoch contains an equal number of samples from each bin.
:param use_binned_transformer: If True, the edge labels are rescaled
using an individual transformer for each bin. Inverting the
transformation then requires both a regression and classification
prediction.
:param include_approx_flows: If True, the edge features include the
approximate flows (normally used just for flow adjacency matrix).
:param flow_adj_threshold: When constructing the unweighted flow
adjacency matrix, only include edges with a flow greater or equal that
threshold.
:param seed: Random seed to always obtain the same split into training,
validation, and test set.
:return: Tuple consisting of
- Node features of shape [N, K]
- Sparse adjacency matrix of shape [N, N]
- Loader for the training set of edges
- Loader for the validation set of edges
- Loader for the test set of edges
- Number of node features
- Number of edge features
- Scaler used for edge labels
"""
print("Loading data")
self.num_bins = num_bins
self.batch_size = batch_size
self.n_quantiles = n_quantiles
self.use_binned_transformer = use_binned_transformer
get_composite_transformer_f = lambda: get_composite_transformer(
n_quantiles=n_quantiles)
# Load node data
(self.node_feats, self.num_nodes, self.num_node_feats,
self.node_scaler) = self._load_node_data(data_base_path,
get_composite_transformer_f,
excluded_node_feature_columns)
# Load edge data
(flow_edge_indices, self.edge_feats, self.edge_labels,
self.edge_labels_unscaled, self.label_scaler, self.edge_scaler,
self.num_edges, self.num_edge_feats) = self._load_edge_data(
data_base_path,
get_composite_transformer_f,
include_approx_flows,
excluded_edge_feature_columns)
self.max_label = np.max(self.edge_labels_unscaled)
print(f"\tMax label {self.max_label}")
(train_idcs, val_idcs, test_idcs) = self._load_dataset_split(
data_base_path)
# Load flow graph data
(flow_adj_matrix, flow_inc_matrix, flow_adj_indices,
unweighted_flow_adj_matrix,
flow_adj_values) = self._load_flow_graph_data(
data_base_path, self.num_nodes, self.num_edges, flow_adj_threshold)
self.flow_topology = GraphTopologicalData(
adj_matrix=flow_adj_matrix,
edge_indices=flow_adj_indices,
unweighted_adj_matrix=unweighted_flow_adj_matrix,
inc_matrix=flow_inc_matrix,
edge_weights=flow_adj_values
)
# Load geographical graph data
(geo_adj_matrix, geo_inc_matrix,
geo_edge_indices, geo_adj_values) = self._load_geo_graph_data(
data_base_path, self.num_nodes, self.num_edges, self.flow_topology)
self.geo_topology = GraphTopologicalData(
adj_matrix=geo_adj_matrix,
inc_matrix=geo_inc_matrix,
edge_indices=geo_edge_indices,
edge_weights=geo_adj_values)
# Load bin data
self.bin_bounds = [10.0, 100.0, 1000.0, 10000.0]
(self.edge_buckets, self.train_bin_weights, self.val_bin_weights,
self.test_bin_weights) = self._load_bin_data(self.bin_bounds,
self.edge_labels_unscaled,
num_bins, train_idcs,
val_idcs, test_idcs)
print(f"\tBin counts: {np.array([np.sum(self.edge_buckets == i) for i in range(num_bins)])}")
print(f"\tTraining bin weights: {self.train_bin_weights}")
print(f"\tValidation bin weights: {self.val_bin_weights}")
print(f"\tTest bin weights: {self.test_bin_weights}")
# If specified, use the binned transformer to transform labels
if use_binned_transformer:
self.label_scaler = BinnedTransformer(self.num_bins,
get_composite_transformer_f)
self.edge_labels = self.label_scaler.fit_transform(
self.edge_labels_unscaled.reshape(-1, 1), self.edge_buckets).reshape(-1)
# plt.hist(self.edge_labels, bins=100)
# plt.show()
# Create edge feature matrix
indices = flow_edge_indices.transpose(1, 0)
values = self.edge_feats
edge_feat_matrix = torch.sparse.FloatTensor(torch.from_numpy(indices), torch.from_numpy(values))
self.edge_feat_matrix = edge_feat_matrix.to_dense()
# Convert numpy arrays to tensors
self.node_feats = torch.from_numpy(self.node_feats)
self.edge_feats = torch.from_numpy(self.edge_feats)
flow_edge_indices = torch.from_numpy(flow_edge_indices)
self.flow_topology.edge_indices = torch.from_numpy(self.flow_topology.edge_indices)
self.flow_topology.edge_weights = torch.from_numpy(self.flow_topology.edge_weights)
self.geo_topology.edge_indices = torch.from_numpy(self.geo_topology.edge_indices)
self.geo_topology.edge_weights = torch.from_numpy(self.geo_topology.edge_weights)
self.edge_labels = torch.from_numpy(self.edge_labels)
self.edge_labels_unscaled = torch.from_numpy(self.edge_labels_unscaled)
self.edge_buckets = torch.from_numpy(self.edge_buckets)
self.train_bin_weights = torch.from_numpy(self.train_bin_weights)
self.val_bin_weights = torch.from_numpy(self.val_bin_weights)
self.test_bin_weights = torch.from_numpy(self.test_bin_weights)
# Matrices
self.geo_topology.adj_matrix = to_sparse_tensor(normalize(self.geo_topology.adj_matrix))
self.geo_topology.inc_matrix = to_sparse_tensor(self.geo_topology.inc_matrix)
self.flow_topology.adj_matrix = to_sparse_tensor(self.flow_topology.adj_matrix) # Sparse tensor of shape [N, N] containing the flow values between nodes.
self.flow_topology.unweighted_adj_matrix = to_sparse_tensor(self.flow_topology.unweighted_adj_matrix)
self.flow_topology.inc_matrix = to_sparse_tensor(self.flow_topology.inc_matrix)
self._check_data_consistency()
# Create data loaders
(self.train_loader, self.val_loader,
self.test_loader) = self._create_data_loaders(train_idcs, val_idcs,
test_idcs,
self.train_bin_weights,
flow_edge_indices, # different from flow_graph_topology.edge_indices because of additional 0-flows
self.edge_feats,
self.edge_labels,
self.edge_buckets,
batch_size, resample,
seed)
print("Finished loading data")
def _check_data_consistency(self):
tensors = [self.node_feats, self.edge_feats,
self.flow_topology.edge_indices,
self.geo_topology.edge_indices, self.edge_labels,
self.edge_labels_unscaled, self.edge_buckets,
self.train_bin_weights, self.val_bin_weights,
self.test_bin_weights, self.geo_topology.adj_matrix,
self.geo_topology.inc_matrix, self.flow_topology.adj_matrix,
self.flow_topology.unweighted_adj_matrix,
self.flow_topology.inc_matrix, self.edge_feat_matrix]
print("Checking ", end="")
for idx, tensor in enumerate(tensors):
print(f"{idx}, ", end="")
if (isinstance(tensor, torch.sparse.FloatTensor) or
isinstance(tensor, torch.sparse.LongTensor)):
assert not torch.isnan(tensor.coalesce().indices()).any()
assert not torch.isnan(tensor.coalesce().values()).any()
else:
assert not torch.isnan(tensor).any()
print("done")
def to(self, device):
"""
Moves all tensors of the dataset that will not be iterated over in
minibatch to the specified device.
:param device: Device specifier.
"""
self.node_feats = self.node_feats.to(device=device)
self.edge_feats = self.edge_feats.to(device=device)
self.flow_topology.edge_indices = self.flow_topology.edge_indices.to(device=device)
self.geo_topology.edge_indices = self.geo_topology.edge_indices.to(device=device)
self.train_bin_weights = self.train_bin_weights.to(device=device)
self.geo_topology.adj_matrix = self.geo_topology.adj_matrix.to(device=device)
self.geo_topology.inc_matrix = self.geo_topology.inc_matrix.to(device=device)
self.geo_topology.edge_weights = self.geo_topology.edge_weights.to(device=device)
self.flow_topology.adj_matrix = self.flow_topology.adj_matrix.to(device=device)
self.flow_topology.unweighted_adj_matrix = self.flow_topology.unweighted_adj_matrix.to(
device=device)
self.flow_topology.inc_matrix = self.flow_topology.inc_matrix.to(device=device)
self.flow_topology.edge_weights = self.flow_topology.edge_weights.to(device=device)
self.edge_feat_matrix = self.edge_feat_matrix.to(device=device)
@staticmethod
def _load_node_data(data_base_path, get_composite_transformer_f,
excluded_columns):
# Node features
node_data = pd.read_pickle(os.path.join(data_base_path, "node_data.pk"))
if len(excluded_columns) > 0:
node_data.drop(list(excluded_columns), axis=1, inplace=True)
node_feats = node_data.values
# Rescale continuous features
node_scaler = get_composite_transformer_f()
cont_feature_idcs = UrbanPlanningDataset._get_continuous_feature_idcs(node_data)
node_feats[:, cont_feature_idcs] = node_scaler.fit_transform(node_feats[:, cont_feature_idcs])
node_feats = node_feats.astype(np.float32)
num_nodes = node_feats.shape[0]
num_node_feats = node_feats.shape[1]
return node_feats, num_nodes, num_node_feats, node_scaler
@staticmethod
def _load_edge_data(data_base_path, get_composite_transformer_f,
include_approx_flows, excluded_columns):
# Edge data
edge_data = pd.read_pickle(os.path.join(data_base_path, "edge_data.pk"))
if len(excluded_columns) > 0:
edge_data.drop(list(excluded_columns), axis=1, inplace=True)
edge_feats = edge_data.values
edge_indices = edge_feats[:, :2].astype(np.int)
edge_feats = edge_feats[:, 2:]
# Load approximate flows and potentially concatenate to edge features
# approx_flows = np.load(os.path.join(data_base_path,
# "approx_flows.npy"))
if include_approx_flows:
raise NotImplementedError
# edge_feats = np.concatenate((edge_feats, approx_flows.reshape(-1, 1)),
# axis=-1)
num_edges = edge_feats.shape[0]
edge_labels = np.load(os.path.join(data_base_path, "flows.npy"))
edge_labels_unscaled = np.copy(edge_labels).astype(np.float32)
# Transform edge features
edge_scaler = get_composite_transformer_f()
cont_feature_idcs = UrbanPlanningDataset._get_continuous_feature_idcs(edge_data.iloc[:, 2:])
edge_feats[:, cont_feature_idcs] = edge_scaler.fit_transform(edge_feats)[:, cont_feature_idcs]
edge_feats = edge_feats.astype(np.float32)
# Transform edge labels
edge_labels = edge_labels.astype(np.float32)
label_scaler = get_composite_transformer_f()
edge_labels = label_scaler.fit_transform(
edge_labels.reshape(-1, 1)).reshape(-1)
num_edge_feats = edge_feats.shape[1]
return (edge_indices, edge_feats, edge_labels, edge_labels_unscaled,
label_scaler, edge_scaler, num_edges, num_edge_feats)
@staticmethod
def _load_dataset_split(data_base_path):
data_base_path = pathlib.Path(data_base_path)
train_idcs = np.load(data_base_path / "train_edge_indices.npy")
val_idcs = np.load(data_base_path / "val_edge_indices.npy")
test_idcs = np.load(data_base_path / "test_edge_indices.npy")
return train_idcs, val_idcs, test_idcs
@staticmethod
def _load_bin_data(bin_bounds, edge_labels_unscaled, num_bins,
train_idcs, val_idcs, test_idcs):
# Get edge buckets (assign each edge to a bucket based on magnitude of
# flow)
edge_buckets = bin_data(edge_labels_unscaled, num_bins,
scale="custom", bin_bounds=bin_bounds)
# Compute weights for each bucket to counterbalance the imbalanced
# class/bin distribution
train_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[train_idcs])
val_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[val_idcs])
test_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[test_idcs])
train_bin_weights = train_bin_weights.astype(np.float32)
val_bin_weights = val_bin_weights.astype(np.float32)
test_bin_weights = test_bin_weights.astype(np.float32)
return edge_buckets, train_bin_weights, val_bin_weights, test_bin_weights
@staticmethod
def _load_flow_graph_data(data_base_path, num_nodes, num_edges,
flow_adj_threshold):
# Flow adjacency matrix
flow_adj_indices = np.load(os.path.join(data_base_path,
"flow_adj_indices.npy")).T
flow_adj_values = np.load(os.path.join(data_base_path,
"flow_adj_values.npy"))
flow_adj_matrix = ssp.coo_matrix((flow_adj_values,
(flow_adj_indices[0],
flow_adj_indices[1])),
shape=(num_nodes, num_nodes))
flow_adj_matrix = flow_adj_matrix.tocsr()
unweighted_flow_adj_indices = flow_adj_indices[:,
flow_adj_values >= flow_adj_threshold]
flow_adj_values = flow_adj_values[flow_adj_values >= flow_adj_threshold]
unweighted_flow_adj_matrix = ssp.coo_matrix(
(flow_adj_values,
(unweighted_flow_adj_indices[0], unweighted_flow_adj_indices[1])),
shape=(num_nodes, num_nodes))
unweighted_flow_adj_matrix.setdiag(np.ones(num_nodes))
flow_adj_values = unweighted_flow_adj_matrix.tocoo().data
flow_adj_indices = np.stack((unweighted_flow_adj_matrix.row,
unweighted_flow_adj_matrix.col), axis=-1)
flow_adj_indices = flow_adj_indices.astype(np.int64)
flow_adj_values = flow_adj_values.astype(np.float32)
unweighted_flow_adj_matrix = (unweighted_flow_adj_matrix > 0.0).astype(np.float)
# Flow incidence matrix for all edges
flow_inc_indices = np.load(os.path.join(data_base_path,
"flow_inc_indices.npy"))
flow_inc_matrix = ssp.coo_matrix(
(np.ones(flow_inc_indices.shape[1]),
(flow_inc_indices[0],
flow_inc_indices[1])),
shape=(num_nodes, num_edges))
flow_inc_matrix = flow_inc_matrix.tocsr()
return (flow_adj_matrix, flow_inc_matrix, flow_adj_indices,
unweighted_flow_adj_matrix, flow_adj_values)
@staticmethod
def _load_geo_graph_data(data_base_path, num_nodes, num_edges,
flow_topology):
# Geographical adjacency matrix
geo_adj_indices = np.load(os.path.join(data_base_path,
"geo_adj_indices.npy"))
geo_adj_matrix = ssp.coo_matrix((np.ones(geo_adj_indices.shape[1]),
(geo_adj_indices[0],
geo_adj_indices[1])),
shape=(num_nodes, num_nodes))
geo_adj_matrix = geo_adj_matrix.tocsr()
# Geographical incidence matrix for all edges
geo_inc_indices = np.load(os.path.join(data_base_path,
"geo_inc_indices.npy"))
geo_inc_matrix = ssp.coo_matrix(
(np.ones(geo_inc_indices.shape[1]),
(geo_inc_indices[0],
geo_inc_indices[1])),
shape=(num_nodes, num_edges))
geo_inc_matrix = geo_inc_matrix.tocsr()
# Get flows for the geographical edges
all_edges = np.array(flow_topology.adj_matrix.todense()).reshape(-1) # N^2 matrix
geo_indices_of_edges = np.array(geo_adj_matrix.todense()).reshape(-1).nonzero() # N^2 matrix
geo_flows = all_edges[geo_indices_of_edges]
del all_edges
all_edges = None
del geo_indices_of_edges
geo_indices_of_edges = None
geo_flows = (geo_flows+1e-5).astype(np.float32)
return geo_adj_matrix, geo_inc_matrix, geo_adj_indices.T, geo_flows
@staticmethod
def _create_data_loaders(train_idcs, val_idcs, test_idcs,
train_bin_weights, edge_indices, edge_feats,
edge_labels, edge_buckets, batch_size, resample,
seed):
"""
:param train_idcs:
:param val_idcs:
:param test_idcs:
:param train_bin_weights:
:param edge_indices:
:param edge_feats:
:param edge_labels:
:param edge_buckets:
:param flow_node_edges_matrix: Transpose of the incidence matrix
for incoming edges. Shape [E, N].
:param batch_size:
:param resample:
:param seed:
:return:
"""
assert (len(edge_indices) == len(edge_feats) == len(edge_labels)
== len(edge_buckets))
train_idcs = torch.from_numpy(train_idcs)
val_idcs = torch.from_numpy(val_idcs)
test_idcs = torch.from_numpy(test_idcs)
# Sample weights
train_sample_weights = train_bin_weights[edge_buckets[train_idcs]]
# Compute split into training, validation, and test set
np.random.seed(seed)
if resample:
train_sampler = BatchSampler(
WeightedRandomSampler(train_sample_weights,
train_idcs.shape[0]),
batch_size=batch_size, drop_last=False)
train_loader = DataLoader(TensorDataset(edge_indices[train_idcs],
edge_feats[train_idcs],
edge_labels[train_idcs],
edge_buckets[train_idcs]),
batch_sampler=train_sampler)
else:
train_loader = DataLoader(TensorDataset(edge_indices[train_idcs],
edge_feats[train_idcs],
edge_labels[train_idcs],
edge_buckets[train_idcs]),
batch_size=batch_size, shuffle=False)
val_loader = DataLoader(TensorDataset(edge_indices[val_idcs],
edge_feats[val_idcs],
edge_labels[val_idcs],
edge_buckets[val_idcs]),
batch_size=batch_size, shuffle=False)
test_loader = DataLoader(TensorDataset(edge_indices[test_idcs],
edge_feats[test_idcs],
edge_labels[test_idcs],
edge_buckets[test_idcs]),
batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader
@staticmethod
def _get_continuous_feature_idcs(df):
continuous_feature_idcs = []
for idx, col in enumerate(df.columns):
if len(df[col].unique()) > 2:
continuous_feature_idcs.append(idx)
return continuous_feature_idcs
if __name__ == '__main__':
ds = UrbanPlanningDataset(data_base_path="Data/London_high/",
use_binned_transformer=True,
excluded_node_feature_columns=tuple())
print("\n\nNode features")
summarize_feature_matrix(ds.node_feats.numpy())
print("\n\nEdge features")
summarize_feature_matrix(ds.edge_feats.numpy())
|
dataset.py
|
import os
import pathlib
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, BatchSampler
from torch.utils.data import WeightedRandomSampler
import scipy.sparse as ssp
import sklearn.preprocessing as prep
import sklearn.pipeline as ppln
from sklearn.utils import class_weight
import matplotlib.pyplot as plt
from utils import to_sparse_tensor, bin_data, normalize, split_bucketed_data, \
summarize_feature_matrix
def get_composite_transformer(n_quantiles):
transformer = ppln.Pipeline([
("quantile", prep.QuantileTransformer(output_distribution="normal",
n_quantiles=n_quantiles)),
("normalize", prep.StandardScaler())
])
return transformer
class BinnedTransformer:
def __init__(self, num_bins, create_transformer_f):
self.num_bins = num_bins
self.transformers = [create_transformer_f() for _ in range(num_bins)]
def fit_transform(self, x_reg, x_class):
transformed_x_reg = np.copy(x_reg)
for bin_idx in range(self.num_bins):
sample_idcs = x_class == bin_idx
transformer = self.transformers[bin_idx]
transformed_x_reg[sample_idcs] = transformer.fit_transform(
transformed_x_reg[sample_idcs])
return transformed_x_reg
def inverse_transform(self, x_reg, x_class):
x_reg = x_reg.reshape(-1)
transformed_x_reg = np.copy(x_reg)
for bin_idx in range(self.num_bins):
sample_idcs = x_class == bin_idx
if np.sum(sample_idcs) == 0: continue # no sample of that class
transformer = self.transformers[bin_idx]
transformed_x_reg[sample_idcs] = transformer.inverse_transform(
x_reg[sample_idcs].reshape(-1, 1)).reshape(-1)
return transformed_x_reg
class GraphTopologicalData:
def __init__(self, adj_matrix=None, unweighted_adj_matrix=None,
inc_matrix=None, inc_matrix_dense=None, edge_indices=None,
edge_weights=None):
self.adj_matrix = adj_matrix # NxN sparse matrix
self.unweighted_adj_matrix = unweighted_adj_matrix # NxN sparse matrix
self.inc_matrix = inc_matrix # NxE sparse matrix
self.inc_matrix_dense = inc_matrix_dense # NxE dense matrix
self.edge_indices = edge_indices # Ex2 dense matrix
self.edge_weights = edge_weights # E dense vector
class UrbanPlanningDataset:
def __init__(self, data_base_path="Data/", num_bins=4, batch_size=32,
n_quantiles=1000, resample=False,
excluded_node_feature_columns=tuple(),
excluded_edge_feature_columns=tuple(),
use_binned_transformer=False, include_approx_flows=False,
flow_adj_threshold=0, seed=7):
"""
Loads city data set.
:param data_base_path: Location at which to find the node features,
edge features, and the adjacency matrix.
:param num_bins: Number of bins for dividing the data set labels. The
bin index may be a classification target or for computing MAEs for each
bin separately.
:param batch_size:
:param n_quantiles: Number of quantiles to use for the quantile
transformer that preprocesses features and labels.
:param excluded_node_feature_columns: Tuple of names of the columns
to remove from the node feature data set.
:param excluded_edge_feature_columns: Tuple of names of the columns to
remove from the edge feature data set.
:param resample: If True, we use a weighted random sampler to ensure
that each epoch contains an equal number of samples from each bin.
:param use_binned_transformer: If True, the edge labels are rescaled
using an individual transformer for each bin. Inverting the
transformation then requires both a regression and classification
prediction.
:param include_approx_flows: If True, the edge features include the
approximate flows (normally used just for flow adjacency matrix).
:param flow_adj_threshold: When constructing the unweighted flow
adjacency matrix, only include edges with a flow greater or equal that
threshold.
:param seed: Random seed to always obtain the same split into training,
validation, and test set.
:return: Tuple consisting of
- Node features of shape [N, K]
- Sparse adjacency matrix of shape [N, N]
- Loader for the training set of edges
- Loader for the validation set of edges
- Loader for the test set of edges
- Number of node features
- Number of edge features
- Scaler used for edge labels
"""
print("Loading data")
self.num_bins = num_bins
self.batch_size = batch_size
self.n_quantiles = n_quantiles
self.use_binned_transformer = use_binned_transformer
get_composite_transformer_f = lambda: get_composite_transformer(
n_quantiles=n_quantiles)
# Load node data
(self.node_feats, self.num_nodes, self.num_node_feats,
self.node_scaler) = self._load_node_data(data_base_path,
get_composite_transformer_f,
excluded_node_feature_columns)
# Load edge data
(flow_edge_indices, self.edge_feats, self.edge_labels,
self.edge_labels_unscaled, self.label_scaler, self.edge_scaler,
self.num_edges, self.num_edge_feats) = self._load_edge_data(
data_base_path,
get_composite_transformer_f,
include_approx_flows,
excluded_edge_feature_columns)
self.max_label = np.max(self.edge_labels_unscaled)
print(f"\tMax label {self.max_label}")
(train_idcs, val_idcs, test_idcs) = self._load_dataset_split(
data_base_path)
# Load flow graph data
(flow_adj_matrix, flow_inc_matrix, flow_adj_indices,
unweighted_flow_adj_matrix,
flow_adj_values) = self._load_flow_graph_data(
data_base_path, self.num_nodes, self.num_edges, flow_adj_threshold)
self.flow_topology = GraphTopologicalData(
adj_matrix=flow_adj_matrix,
edge_indices=flow_adj_indices,
unweighted_adj_matrix=unweighted_flow_adj_matrix,
inc_matrix=flow_inc_matrix,
edge_weights=flow_adj_values
)
# Load geographical graph data
(geo_adj_matrix, geo_inc_matrix,
geo_edge_indices, geo_adj_values) = self._load_geo_graph_data(
data_base_path, self.num_nodes, self.num_edges, self.flow_topology)
self.geo_topology = GraphTopologicalData(
adj_matrix=geo_adj_matrix,
inc_matrix=geo_inc_matrix,
edge_indices=geo_edge_indices,
edge_weights=geo_adj_values)
# Load bin data
self.bin_bounds = [10.0, 100.0, 1000.0, 10000.0]
(self.edge_buckets, self.train_bin_weights, self.val_bin_weights,
self.test_bin_weights) = self._load_bin_data(self.bin_bounds,
self.edge_labels_unscaled,
num_bins, train_idcs,
val_idcs, test_idcs)
print(f"\tBin counts: {np.array([np.sum(self.edge_buckets == i) for i in range(num_bins)])}")
print(f"\tTraining bin weights: {self.train_bin_weights}")
print(f"\tValidation bin weights: {self.val_bin_weights}")
print(f"\tTest bin weights: {self.test_bin_weights}")
# If specified, use the binned transformer to transform labels
if use_binned_transformer:
self.label_scaler = BinnedTransformer(self.num_bins,
get_composite_transformer_f)
self.edge_labels = self.label_scaler.fit_transform(
self.edge_labels_unscaled.reshape(-1, 1), self.edge_buckets).reshape(-1)
# plt.hist(self.edge_labels, bins=100)
# plt.show()
# Create edge feature matrix
indices = flow_edge_indices.transpose(1, 0)
values = self.edge_feats
edge_feat_matrix = torch.sparse.FloatTensor(torch.from_numpy(indices), torch.from_numpy(values))
self.edge_feat_matrix = edge_feat_matrix.to_dense()
# Convert numpy arrays to tensors
self.node_feats = torch.from_numpy(self.node_feats)
self.edge_feats = torch.from_numpy(self.edge_feats)
flow_edge_indices = torch.from_numpy(flow_edge_indices)
self.flow_topology.edge_indices = torch.from_numpy(self.flow_topology.edge_indices)
self.flow_topology.edge_weights = torch.from_numpy(self.flow_topology.edge_weights)
self.geo_topology.edge_indices = torch.from_numpy(self.geo_topology.edge_indices)
self.geo_topology.edge_weights = torch.from_numpy(self.geo_topology.edge_weights)
self.edge_labels = torch.from_numpy(self.edge_labels)
self.edge_labels_unscaled = torch.from_numpy(self.edge_labels_unscaled)
self.edge_buckets = torch.from_numpy(self.edge_buckets)
self.train_bin_weights = torch.from_numpy(self.train_bin_weights)
self.val_bin_weights = torch.from_numpy(self.val_bin_weights)
self.test_bin_weights = torch.from_numpy(self.test_bin_weights)
# Matrices
self.geo_topology.adj_matrix = to_sparse_tensor(normalize(self.geo_topology.adj_matrix))
self.geo_topology.inc_matrix = to_sparse_tensor(self.geo_topology.inc_matrix)
self.flow_topology.adj_matrix = to_sparse_tensor(self.flow_topology.adj_matrix) # Sparse tensor of shape [N, N] containing the flow values between nodes.
self.flow_topology.unweighted_adj_matrix = to_sparse_tensor(self.flow_topology.unweighted_adj_matrix)
self.flow_topology.inc_matrix = to_sparse_tensor(self.flow_topology.inc_matrix)
self._check_data_consistency()
# Create data loaders
(self.train_loader, self.val_loader,
self.test_loader) = self._create_data_loaders(train_idcs, val_idcs,
test_idcs,
self.train_bin_weights,
flow_edge_indices, # different from flow_graph_topology.edge_indices because of additional 0-flows
self.edge_feats,
self.edge_labels,
self.edge_buckets,
batch_size, resample,
seed)
print("Finished loading data")
def _check_data_consistency(self):
tensors = [self.node_feats, self.edge_feats,
self.flow_topology.edge_indices,
self.geo_topology.edge_indices, self.edge_labels,
self.edge_labels_unscaled, self.edge_buckets,
self.train_bin_weights, self.val_bin_weights,
self.test_bin_weights, self.geo_topology.adj_matrix,
self.geo_topology.inc_matrix, self.flow_topology.adj_matrix,
self.flow_topology.unweighted_adj_matrix,
self.flow_topology.inc_matrix, self.edge_feat_matrix]
print("Checking ", end="")
for idx, tensor in enumerate(tensors):
print(f"{idx}, ", end="")
if (isinstance(tensor, torch.sparse.FloatTensor) or
isinstance(tensor, torch.sparse.LongTensor)):
assert not torch.isnan(tensor.coalesce().indices()).any()
assert not torch.isnan(tensor.coalesce().values()).any()
else:
assert not torch.isnan(tensor).any()
print("done")
def to(self, device):
"""
Moves all tensors of the dataset that will not be iterated over in
minibatch to the specified device.
:param device: Device specifier.
"""
self.node_feats = self.node_feats.to(device=device)
self.edge_feats = self.edge_feats.to(device=device)
self.flow_topology.edge_indices = self.flow_topology.edge_indices.to(device=device)
self.geo_topology.edge_indices = self.geo_topology.edge_indices.to(device=device)
self.train_bin_weights = self.train_bin_weights.to(device=device)
self.geo_topology.adj_matrix = self.geo_topology.adj_matrix.to(device=device)
self.geo_topology.inc_matrix = self.geo_topology.inc_matrix.to(device=device)
self.geo_topology.edge_weights = self.geo_topology.edge_weights.to(device=device)
self.flow_topology.adj_matrix = self.flow_topology.adj_matrix.to(device=device)
self.flow_topology.unweighted_adj_matrix = self.flow_topology.unweighted_adj_matrix.to(
device=device)
self.flow_topology.inc_matrix = self.flow_topology.inc_matrix.to(device=device)
self.flow_topology.edge_weights = self.flow_topology.edge_weights.to(device=device)
self.edge_feat_matrix = self.edge_feat_matrix.to(device=device)
@staticmethod
def _load_node_data(data_base_path, get_composite_transformer_f,
excluded_columns):
# Node features
node_data = pd.read_pickle(os.path.join(data_base_path, "node_data.pk"))
if len(excluded_columns) > 0:
node_data.drop(list(excluded_columns), axis=1, inplace=True)
node_feats = node_data.values
# Rescale continuous features
node_scaler = get_composite_transformer_f()
cont_feature_idcs = UrbanPlanningDataset._get_continuous_feature_idcs(node_data)
node_feats[:, cont_feature_idcs] = node_scaler.fit_transform(node_feats[:, cont_feature_idcs])
node_feats = node_feats.astype(np.float32)
num_nodes = node_feats.shape[0]
num_node_feats = node_feats.shape[1]
return node_feats, num_nodes, num_node_feats, node_scaler
@staticmethod
def _load_edge_data(data_base_path, get_composite_transformer_f,
include_approx_flows, excluded_columns):
# Edge data
edge_data = pd.read_pickle(os.path.join(data_base_path, "edge_data.pk"))
if len(excluded_columns) > 0:
edge_data.drop(list(excluded_columns), axis=1, inplace=True)
edge_feats = edge_data.values
edge_indices = edge_feats[:, :2].astype(np.int)
edge_feats = edge_feats[:, 2:]
# Load approximate flows and potentially concatenate to edge features
# approx_flows = np.load(os.path.join(data_base_path,
# "approx_flows.npy"))
if include_approx_flows:
raise NotImplementedError
# edge_feats = np.concatenate((edge_feats, approx_flows.reshape(-1, 1)),
# axis=-1)
num_edges = edge_feats.shape[0]
edge_labels = np.load(os.path.join(data_base_path, "flows.npy"))
edge_labels_unscaled = np.copy(edge_labels).astype(np.float32)
# Transform edge features
edge_scaler = get_composite_transformer_f()
cont_feature_idcs = UrbanPlanningDataset._get_continuous_feature_idcs(edge_data.iloc[:, 2:])
edge_feats[:, cont_feature_idcs] = edge_scaler.fit_transform(edge_feats)[:, cont_feature_idcs]
edge_feats = edge_feats.astype(np.float32)
# Transform edge labels
edge_labels = edge_labels.astype(np.float32)
label_scaler = get_composite_transformer_f()
edge_labels = label_scaler.fit_transform(
edge_labels.reshape(-1, 1)).reshape(-1)
num_edge_feats = edge_feats.shape[1]
return (edge_indices, edge_feats, edge_labels, edge_labels_unscaled,
label_scaler, edge_scaler, num_edges, num_edge_feats)
@staticmethod
def _load_dataset_split(data_base_path):
data_base_path = pathlib.Path(data_base_path)
train_idcs = np.load(data_base_path / "train_edge_indices.npy")
val_idcs = np.load(data_base_path / "val_edge_indices.npy")
test_idcs = np.load(data_base_path / "test_edge_indices.npy")
return train_idcs, val_idcs, test_idcs
@staticmethod
def _load_bin_data(bin_bounds, edge_labels_unscaled, num_bins,
train_idcs, val_idcs, test_idcs):
# Get edge buckets (assign each edge to a bucket based on magnitude of
# flow)
edge_buckets = bin_data(edge_labels_unscaled, num_bins,
scale="custom", bin_bounds=bin_bounds)
# Compute weights for each bucket to counterbalance the imbalanced
# class/bin distribution
train_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[train_idcs])
val_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[val_idcs])
test_bin_weights = class_weight.compute_class_weight('balanced',
np.unique(edge_buckets),
edge_buckets[test_idcs])
train_bin_weights = train_bin_weights.astype(np.float32)
val_bin_weights = val_bin_weights.astype(np.float32)
test_bin_weights = test_bin_weights.astype(np.float32)
return edge_buckets, train_bin_weights, val_bin_weights, test_bin_weights
@staticmethod
def _load_flow_graph_data(data_base_path, num_nodes, num_edges,
flow_adj_threshold):
# Flow adjacency matrix
flow_adj_indices = np.load(os.path.join(data_base_path,
"flow_adj_indices.npy")).T
flow_adj_values = np.load(os.path.join(data_base_path,
"flow_adj_values.npy"))
flow_adj_matrix = ssp.coo_matrix((flow_adj_values,
(flow_adj_indices[0],
flow_adj_indices[1])),
shape=(num_nodes, num_nodes))
flow_adj_matrix = flow_adj_matrix.tocsr()
unweighted_flow_adj_indices = flow_adj_indices[:,
flow_adj_values >= flow_adj_threshold]
flow_adj_values = flow_adj_values[flow_adj_values >= flow_adj_threshold]
unweighted_flow_adj_matrix = ssp.coo_matrix(
(flow_adj_values,
(unweighted_flow_adj_indices[0], unweighted_flow_adj_indices[1])),
shape=(num_nodes, num_nodes))
unweighted_flow_adj_matrix.setdiag(np.ones(num_nodes))
flow_adj_values = unweighted_flow_adj_matrix.tocoo().data
flow_adj_indices = np.stack((unweighted_flow_adj_matrix.row,
unweighted_flow_adj_matrix.col), axis=-1)
flow_adj_indices = flow_adj_indices.astype(np.int64)
flow_adj_values = flow_adj_values.astype(np.float32)
unweighted_flow_adj_matrix = (unweighted_flow_adj_matrix > 0.0).astype(np.float)
# Flow incidence matrix for all edges
flow_inc_indices = np.load(os.path.join(data_base_path,
"flow_inc_indices.npy"))
flow_inc_matrix = ssp.coo_matrix(
(np.ones(flow_inc_indices.shape[1]),
(flow_inc_indices[0],
flow_inc_indices[1])),
shape=(num_nodes, num_edges))
flow_inc_matrix = flow_inc_matrix.tocsr()
return (flow_adj_matrix, flow_inc_matrix, flow_adj_indices,
unweighted_flow_adj_matrix, flow_adj_values)
@staticmethod
def _load_geo_graph_data(data_base_path, num_nodes, num_edges,
flow_topology):
# Geographical adjacency matrix
geo_adj_indices = np.load(os.path.join(data_base_path,
"geo_adj_indices.npy"))
geo_adj_matrix = ssp.coo_matrix((np.ones(geo_adj_indices.shape[1]),
(geo_adj_indices[0],
geo_adj_indices[1])),
shape=(num_nodes, num_nodes))
geo_adj_matrix = geo_adj_matrix.tocsr()
# Geographical incidence matrix for all edges
geo_inc_indices = np.load(os.path.join(data_base_path,
"geo_inc_indices.npy"))
geo_inc_matrix = ssp.coo_matrix(
(np.ones(geo_inc_indices.shape[1]),
(geo_inc_indices[0],
geo_inc_indices[1])),
shape=(num_nodes, num_edges))
geo_inc_matrix = geo_inc_matrix.tocsr()
# Get flows for the geographical edges
all_edges = np.array(flow_topology.adj_matrix.todense()).reshape(-1) # N^2 matrix
geo_indices_of_edges = np.array(geo_adj_matrix.todense()).reshape(-1).nonzero() # N^2 matrix
geo_flows = all_edges[geo_indices_of_edges]
del all_edges
all_edges = None
del geo_indices_of_edges
geo_indices_of_edges = None
geo_flows = (geo_flows+1e-5).astype(np.float32)
return geo_adj_matrix, geo_inc_matrix, geo_adj_indices.T, geo_flows
@staticmethod
def _create_data_loaders(train_idcs, val_idcs, test_idcs,
train_bin_weights, edge_indices, edge_feats,
edge_labels, edge_buckets, batch_size, resample,
seed):
"""
:param train_idcs:
:param val_idcs:
:param test_idcs:
:param train_bin_weights:
:param edge_indices:
:param edge_feats:
:param edge_labels:
:param edge_buckets:
:param flow_node_edges_matrix: Transpose of the incidence matrix
for incoming edges. Shape [E, N].
:param batch_size:
:param resample:
:param seed:
:return:
"""
assert (len(edge_indices) == len(edge_feats) == len(edge_labels)
== len(edge_buckets))
train_idcs = torch.from_numpy(train_idcs)
val_idcs = torch.from_numpy(val_idcs)
test_idcs = torch.from_numpy(test_idcs)
# Sample weights
train_sample_weights = train_bin_weights[edge_buckets[train_idcs]]
# Compute split into training, validation, and test set
np.random.seed(seed)
if resample:
train_sampler = BatchSampler(
WeightedRandomSampler(train_sample_weights,
train_idcs.shape[0]),
batch_size=batch_size, drop_last=False)
train_loader = DataLoader(TensorDataset(edge_indices[train_idcs],
edge_feats[train_idcs],
edge_labels[train_idcs],
edge_buckets[train_idcs]),
batch_sampler=train_sampler)
else:
train_loader = DataLoader(TensorDataset(edge_indices[train_idcs],
edge_feats[train_idcs],
edge_labels[train_idcs],
edge_buckets[train_idcs]),
batch_size=batch_size, shuffle=False)
val_loader = DataLoader(TensorDataset(edge_indices[val_idcs],
edge_feats[val_idcs],
edge_labels[val_idcs],
edge_buckets[val_idcs]),
batch_size=batch_size, shuffle=False)
test_loader = DataLoader(TensorDataset(edge_indices[test_idcs],
edge_feats[test_idcs],
edge_labels[test_idcs],
edge_buckets[test_idcs]),
batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader
@staticmethod
def _get_continuous_feature_idcs(df):
continuous_feature_idcs = []
for idx, col in enumerate(df.columns):
if len(df[col].unique()) > 2:
continuous_feature_idcs.append(idx)
return continuous_feature_idcs
if __name__ == '__main__':
ds = UrbanPlanningDataset(data_base_path="Data/London_high/",
use_binned_transformer=True,
excluded_node_feature_columns=tuple())
print("\n\nNode features")
summarize_feature_matrix(ds.node_feats.numpy())
print("\n\nEdge features")
summarize_feature_matrix(ds.edge_feats.numpy())
| 0.873336 | 0.560493 |
import numpy as np
from gensim import corpora
from keras.preprocessing import sequence
from nltk.tokenize import TreebankWordTokenizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, column_or_1d
__author__ = "<NAME>"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
class Text2Sequence(BaseEstimator, TransformerMixin):
"""
Class used to transform text into integers sequence.
Parameters
----------
pad_string : string, optional (default='')
string value used for padding sequence.
seq_len : int, optional, (default=None)
fixed sequence length used to build output sequence. If None, this length will be computed during
transform operation, as the mean of all original sequences + 2 times standard deviation.
tokenizer : object, optional (default=TreebankWordTokenizer())
object used to tokenize text. The class used must implements a tokenize method.
stemmer : object, optional (default=None)
object used to stem text. The class used must implements a stem method.
Attributes
----------
dictionary_ : gensim.corpora.Dictionary
maps all tokens to an unique id.
dictionary_size_ : int
size of the dictionary_ attribute.
Examples
--------
>>> import numpy as np
>>> from nltk.stem.snowball import EnglishStemmer
>>> from dsbox.ml.neural_networks.processing import Text2Sequence
>>> X = np.array(['this is really awesome !', \
'this is really crap !!'])
>>> text2seq = Text2Sequence(stemmer=EnglishStemmer())
>>> text2seq.fit_transform(X)
array([[0, 5, 3, 4, 2, 1],
[5, 3, 4, 6, 1, 1]], dtype=int32)
"""
def __init__(self, pad_string='', tokenizer=TreebankWordTokenizer(), stemmer=None):
self.pad_string = pad_string
self.seq_len = None
self.tokenizer = tokenizer
self.stemmer = stemmer
self.attr_to_check = ["dictionary_",
"dictionary_size_",
]
def tokenize(self, text_list):
"""
Takes a list of texts and return a list of tokens for each text (stemmed or not).
Parameters
----------
text_list : array-like, iterable
list of strings
Returns
-------
list of token lists
"""
all_tokens = []
for text in text_list:
tokens = self.tokenizer.tokenize(text)
if self.stemmer is not None:
tokens = [self.stemmer.stem(word) for word in tokens]
all_tokens.append(tokens)
return all_tokens
def fit(self, X, y=None):
"""
Fit the transformer by building the word corpus.
Parameters
----------
X : array-like, iterable
Collection of str or an iterable which yields str
y : array-like, shape (n_samples,)
No used, only here for compatibility reason
Returns
-------
self : object
Returns an instance of self.
"""
x = column_or_1d(X)
all_tokens = self.tokenize(x)
# adding blank line for considering padding value
self.dictionary_ = corpora.Dictionary([[self.pad_string]] + all_tokens)
self.dictionary_size_ = len(self.dictionary_.keys())
return self
def transform(self, X):
"""
Transform a list of texts into sequences of integers (ids), based on a known corpus. If a word is not
found in the corpus built during fit operation, it will be replaced by the padding string value.
Parameters
----------
X : array-like, iterable
Collection of str or an iterable which yields str
Returns
-------
numpy array of ids sequences.
"""
check_is_fitted(self, self.attr_to_check)
x = column_or_1d(X)
new_tokens = self.tokenize(x)
pad_value = self.dictionary_.token2id[self.pad_string]
# transform all tokens into unique ids
word_ids, word_ids_len = [], []
for doc in new_tokens:
word_id = []
for word in doc:
if word in self.dictionary_.token2id:
word_id.append(self.dictionary_.token2id[word])
else:
word_id.append(pad_value)
word_ids.append(word_id)
word_ids_len.append(len(word_id))
# compute the length sequence
if self.seq_len is None:
self.seq_len = np.round((np.mean(word_ids_len) + 2 * np.std(word_ids_len))).astype(int)
return sequence.pad_sequences(np.array(word_ids), maxlen=self.seq_len, value=pad_value)
|
dsbox/ml/neural_networks/processing/text_classification.py
|
import numpy as np
from gensim import corpora
from keras.preprocessing import sequence
from nltk.tokenize import TreebankWordTokenizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, column_or_1d
__author__ = "<NAME>"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
class Text2Sequence(BaseEstimator, TransformerMixin):
"""
Class used to transform text into integers sequence.
Parameters
----------
pad_string : string, optional (default='')
string value used for padding sequence.
seq_len : int, optional, (default=None)
fixed sequence length used to build output sequence. If None, this length will be computed during
transform operation, as the mean of all original sequences + 2 times standard deviation.
tokenizer : object, optional (default=TreebankWordTokenizer())
object used to tokenize text. The class used must implements a tokenize method.
stemmer : object, optional (default=None)
object used to stem text. The class used must implements a stem method.
Attributes
----------
dictionary_ : gensim.corpora.Dictionary
maps all tokens to an unique id.
dictionary_size_ : int
size of the dictionary_ attribute.
Examples
--------
>>> import numpy as np
>>> from nltk.stem.snowball import EnglishStemmer
>>> from dsbox.ml.neural_networks.processing import Text2Sequence
>>> X = np.array(['this is really awesome !', \
'this is really crap !!'])
>>> text2seq = Text2Sequence(stemmer=EnglishStemmer())
>>> text2seq.fit_transform(X)
array([[0, 5, 3, 4, 2, 1],
[5, 3, 4, 6, 1, 1]], dtype=int32)
"""
def __init__(self, pad_string='', tokenizer=TreebankWordTokenizer(), stemmer=None):
self.pad_string = pad_string
self.seq_len = None
self.tokenizer = tokenizer
self.stemmer = stemmer
self.attr_to_check = ["dictionary_",
"dictionary_size_",
]
def tokenize(self, text_list):
"""
Takes a list of texts and return a list of tokens for each text (stemmed or not).
Parameters
----------
text_list : array-like, iterable
list of strings
Returns
-------
list of token lists
"""
all_tokens = []
for text in text_list:
tokens = self.tokenizer.tokenize(text)
if self.stemmer is not None:
tokens = [self.stemmer.stem(word) for word in tokens]
all_tokens.append(tokens)
return all_tokens
def fit(self, X, y=None):
"""
Fit the transformer by building the word corpus.
Parameters
----------
X : array-like, iterable
Collection of str or an iterable which yields str
y : array-like, shape (n_samples,)
No used, only here for compatibility reason
Returns
-------
self : object
Returns an instance of self.
"""
x = column_or_1d(X)
all_tokens = self.tokenize(x)
# adding blank line for considering padding value
self.dictionary_ = corpora.Dictionary([[self.pad_string]] + all_tokens)
self.dictionary_size_ = len(self.dictionary_.keys())
return self
def transform(self, X):
"""
Transform a list of texts into sequences of integers (ids), based on a known corpus. If a word is not
found in the corpus built during fit operation, it will be replaced by the padding string value.
Parameters
----------
X : array-like, iterable
Collection of str or an iterable which yields str
Returns
-------
numpy array of ids sequences.
"""
check_is_fitted(self, self.attr_to_check)
x = column_or_1d(X)
new_tokens = self.tokenize(x)
pad_value = self.dictionary_.token2id[self.pad_string]
# transform all tokens into unique ids
word_ids, word_ids_len = [], []
for doc in new_tokens:
word_id = []
for word in doc:
if word in self.dictionary_.token2id:
word_id.append(self.dictionary_.token2id[word])
else:
word_id.append(pad_value)
word_ids.append(word_id)
word_ids_len.append(len(word_id))
# compute the length sequence
if self.seq_len is None:
self.seq_len = np.round((np.mean(word_ids_len) + 2 * np.std(word_ids_len))).astype(int)
return sequence.pad_sequences(np.array(word_ids), maxlen=self.seq_len, value=pad_value)
| 0.858303 | 0.404684 |
from .parser import TokenGroup, TokenElement, TokenAttribute, is_quote, is_bracket
from .tokenizer import tokens
from .stringify import stringify
class ConvertState:
__slots__ = ('inserted', 'text', 'repeat_guard', 'repeaters', 'variables',
'_text_inserted')
def __init__(self, text: str = None, variables={}, max_repeat=None):
self.inserted = False
self.text = text
self.repeat_guard = max_repeat if max_repeat is not None else 1000000
self.variables = variables
self.repeaters = []
self._text_inserted = False
def get_text(self, pos: int):
self._text_inserted = True
if isinstance(self.text, list):
value = self.text[pos] if pos is not None else '\n'.join(self.text)
else:
value = self.text or ''
return value
def get_variable(self, name: str):
return self.variables.get(name) if self.variables else name
class Abbreviation:
__slots__ = ('type', 'children')
def __init__(self):
self.type = 'Abbreviation'
self.children = []
class AbbreviationNode:
__slots__ = ('type', 'name', 'value', 'repeat', 'attributes', 'children', 'self_closing')
def __init__(self, node: TokenElement, state: ConvertState):
self.type = 'AbbreviationNode'
self.name = stringify_name(node.name, state) if node.name else None
self.value = stringify_value(node.value, state) if node.value else None
self.attributes = None
self.children = []
self.repeat = clone_repeater(node.repeat) if node.repeat else None
self.self_closing = node.self_close
"Indicates current element is self-closing, e.g. should not contain closing pair"
class AbbreviationAttribute:
__slots__ = ('name', 'value', 'value_type', 'boolean', 'implied')
def __init__(self, name: str, value: list, value_type: str, boolean=False, implied=False):
self.name = name
self.value = value
self.value_type = value_type
"Indicates type of value stored in `.value` property"
self.boolean = boolean
"Attribute is boolean (e.g.name equals value)"
self.implied = implied
"Attribute is implied (e.g.must be outputted only if contains non-null value)"
def copy(self):
return AbbreviationAttribute(self.name, self.value, self.value_type, self.boolean, self.implied)
def convert(abbr: TokenGroup, options={}):
"Converts given token-based abbreviation into simplified and unrolled node-based abbreviation"
text = options.get('text')
state = ConvertState(text, options.get('variables'), options.get('max_repeat'))
result = Abbreviation()
result.children = convert_group(abbr, state)
if text is not None and not state._text_inserted:
# Text given but no implicitly repeated elements: insert it into deepest child
deepest = deepest_node(result.children[-1])
if deepest:
tx = '\n'.join(text) if isinstance(text, list) else text or ''
insert_text(deepest, tx)
return result
def convert_statement(node: TokenElement, state: ConvertState):
result = []
if node.repeat:
# Node is repeated: we should create copies of given node
# and supply context token with actual repeater state
original = node.repeat
repeat = clone_repeater(node.repeat)
if repeat.implicit and isinstance(state.text, list):
repeat.count = len(state.text)
else:
repeat.count = repeat.count or 1
state.repeaters.append(repeat)
i = 0
while i < repeat.count:
repeat.value = i
node.repeat = repeat
items = convert_group(node, state) if is_group(node) else convert_element(node, state)
if repeat.implicit and not state.inserted:
# It’s an implicit repeater but no repeater placeholders found inside,
# we should insert text into deepest node
target = items[-1]
deepest = deepest_node(target) if target else None
if deepest:
insert_text(deepest, state.get_text(repeat.value))
result += items
# We should output at least one repeated item even if it’s reached
# repeat limit
state.repeat_guard -= 1
if state.repeat_guard <= 0: break
i += 1
state.repeaters.pop()
node.repeat = original
if repeat.implicit: state.inserted = True
else:
result += convert_group(node, state) if is_group(node) else convert_element(node, state)
return result
def convert_element(node: TokenElement, state: ConvertState):
elem = AbbreviationNode(node, state)
result = [elem]
for child in node.elements:
elem.children += convert_statement(child, state)
if node.attributes:
elem.attributes = [convert_attribute(attr, state) for attr in node.attributes]
# In case if current node is a text-only snippet without fields, we should
# put all children as siblings
if not elem.name and elem.attributes is None and elem.value and not some(elem.value, is_field):
result += elem.children
elem.children = []
return result
def convert_group(node: TokenGroup, state: ConvertState):
result = []
for child in node.elements:
result += convert_statement(child, state)
if node.repeat:
result = attach_repeater(result, node.repeat)
return result
def convert_attribute(node: TokenAttribute, state: ConvertState):
attr = create_attribute(node, state)
if node.value:
tokens = node.value[:]
if is_quote(tokens[0]):
# It’s a quoted value: remove quotes from output but mark attribute
# value as quoted
quote = tokens.pop(0)
if len(tokens) and tokens[-1].type == quote.type:
tokens.pop()
attr.value_type = 'singleQuote' if quote.single else 'doubleQuote'
elif is_bracket(tokens[0], 'expression', True):
# Value is expression: remove brackets but mark value type
attr.value_type = 'expression'
tokens.pop(0)
if tokens and is_bracket(tokens[-1], 'expression', False):
tokens.pop()
attr.value = stringify_value(tokens, state)
return attr
def create_attribute(node: TokenAttribute, state: ConvertState):
name = stringify_name(node.name, state) if node.name else None
value_type = 'expression' if node.expression else 'raw'
boolean = False
implied = False
if name:
if name[-1] == '.':
boolean = True
name = name[0:-1]
if name[0] == '!':
implied = True
name = name[1:]
return AbbreviationAttribute(name, None, value_type, boolean, implied)
def stringify_name(tokens: list, state: ConvertState):
"Converts given token list to string"
return ''.join([stringify(token, state) for token in tokens])
def stringify_value(token_list: list, state: ConvertState):
"Converts given token list to value list"
result = []
accum = []
for token in token_list:
if is_field(token):
# We should keep original fields in output since some editors has their
# own syntax for field or doesn’t support fields at all so we should
# capture actual field location in output stream
if accum:
result.append(''.join(accum))
accum = []
result.append(token)
else:
accum.append(stringify(token, state))
if (accum):
result.append(''.join(accum))
return result
def is_group(node):
return isinstance(node, TokenGroup)
def is_field(token):
return isinstance(token, tokens.Field) and token.index is not None
def deepest_node(node: AbbreviationNode):
return deepest_node(node.children[-1]) if node.children else node
def insert_text(node: AbbreviationNode, text: str):
if node.value:
last_token = node.value[-1]
if isinstance(last_token, str):
node.value[-1] += text
else:
node.value.append(text)
else:
node.value = [text]
def attach_repeater(items: list, repeater: tokens.Repeater):
for item in items:
if not item.repeat:
item.repeat = clone_repeater(repeater)
return items
def clone_repeater(repeater: tokens.Repeater):
return tokens.Repeater(repeater.count, repeater.value, repeater.implicit)
def some(items: list, fn: callable):
for item in items:
if fn(item): return True
return False
|
emmet/abbreviation/convert.py
|
from .parser import TokenGroup, TokenElement, TokenAttribute, is_quote, is_bracket
from .tokenizer import tokens
from .stringify import stringify
class ConvertState:
__slots__ = ('inserted', 'text', 'repeat_guard', 'repeaters', 'variables',
'_text_inserted')
def __init__(self, text: str = None, variables={}, max_repeat=None):
self.inserted = False
self.text = text
self.repeat_guard = max_repeat if max_repeat is not None else 1000000
self.variables = variables
self.repeaters = []
self._text_inserted = False
def get_text(self, pos: int):
self._text_inserted = True
if isinstance(self.text, list):
value = self.text[pos] if pos is not None else '\n'.join(self.text)
else:
value = self.text or ''
return value
def get_variable(self, name: str):
return self.variables.get(name) if self.variables else name
class Abbreviation:
__slots__ = ('type', 'children')
def __init__(self):
self.type = 'Abbreviation'
self.children = []
class AbbreviationNode:
__slots__ = ('type', 'name', 'value', 'repeat', 'attributes', 'children', 'self_closing')
def __init__(self, node: TokenElement, state: ConvertState):
self.type = 'AbbreviationNode'
self.name = stringify_name(node.name, state) if node.name else None
self.value = stringify_value(node.value, state) if node.value else None
self.attributes = None
self.children = []
self.repeat = clone_repeater(node.repeat) if node.repeat else None
self.self_closing = node.self_close
"Indicates current element is self-closing, e.g. should not contain closing pair"
class AbbreviationAttribute:
__slots__ = ('name', 'value', 'value_type', 'boolean', 'implied')
def __init__(self, name: str, value: list, value_type: str, boolean=False, implied=False):
self.name = name
self.value = value
self.value_type = value_type
"Indicates type of value stored in `.value` property"
self.boolean = boolean
"Attribute is boolean (e.g.name equals value)"
self.implied = implied
"Attribute is implied (e.g.must be outputted only if contains non-null value)"
def copy(self):
return AbbreviationAttribute(self.name, self.value, self.value_type, self.boolean, self.implied)
def convert(abbr: TokenGroup, options={}):
"Converts given token-based abbreviation into simplified and unrolled node-based abbreviation"
text = options.get('text')
state = ConvertState(text, options.get('variables'), options.get('max_repeat'))
result = Abbreviation()
result.children = convert_group(abbr, state)
if text is not None and not state._text_inserted:
# Text given but no implicitly repeated elements: insert it into deepest child
deepest = deepest_node(result.children[-1])
if deepest:
tx = '\n'.join(text) if isinstance(text, list) else text or ''
insert_text(deepest, tx)
return result
def convert_statement(node: TokenElement, state: ConvertState):
result = []
if node.repeat:
# Node is repeated: we should create copies of given node
# and supply context token with actual repeater state
original = node.repeat
repeat = clone_repeater(node.repeat)
if repeat.implicit and isinstance(state.text, list):
repeat.count = len(state.text)
else:
repeat.count = repeat.count or 1
state.repeaters.append(repeat)
i = 0
while i < repeat.count:
repeat.value = i
node.repeat = repeat
items = convert_group(node, state) if is_group(node) else convert_element(node, state)
if repeat.implicit and not state.inserted:
# It’s an implicit repeater but no repeater placeholders found inside,
# we should insert text into deepest node
target = items[-1]
deepest = deepest_node(target) if target else None
if deepest:
insert_text(deepest, state.get_text(repeat.value))
result += items
# We should output at least one repeated item even if it’s reached
# repeat limit
state.repeat_guard -= 1
if state.repeat_guard <= 0: break
i += 1
state.repeaters.pop()
node.repeat = original
if repeat.implicit: state.inserted = True
else:
result += convert_group(node, state) if is_group(node) else convert_element(node, state)
return result
def convert_element(node: TokenElement, state: ConvertState):
elem = AbbreviationNode(node, state)
result = [elem]
for child in node.elements:
elem.children += convert_statement(child, state)
if node.attributes:
elem.attributes = [convert_attribute(attr, state) for attr in node.attributes]
# In case if current node is a text-only snippet without fields, we should
# put all children as siblings
if not elem.name and elem.attributes is None and elem.value and not some(elem.value, is_field):
result += elem.children
elem.children = []
return result
def convert_group(node: TokenGroup, state: ConvertState):
result = []
for child in node.elements:
result += convert_statement(child, state)
if node.repeat:
result = attach_repeater(result, node.repeat)
return result
def convert_attribute(node: TokenAttribute, state: ConvertState):
attr = create_attribute(node, state)
if node.value:
tokens = node.value[:]
if is_quote(tokens[0]):
# It’s a quoted value: remove quotes from output but mark attribute
# value as quoted
quote = tokens.pop(0)
if len(tokens) and tokens[-1].type == quote.type:
tokens.pop()
attr.value_type = 'singleQuote' if quote.single else 'doubleQuote'
elif is_bracket(tokens[0], 'expression', True):
# Value is expression: remove brackets but mark value type
attr.value_type = 'expression'
tokens.pop(0)
if tokens and is_bracket(tokens[-1], 'expression', False):
tokens.pop()
attr.value = stringify_value(tokens, state)
return attr
def create_attribute(node: TokenAttribute, state: ConvertState):
name = stringify_name(node.name, state) if node.name else None
value_type = 'expression' if node.expression else 'raw'
boolean = False
implied = False
if name:
if name[-1] == '.':
boolean = True
name = name[0:-1]
if name[0] == '!':
implied = True
name = name[1:]
return AbbreviationAttribute(name, None, value_type, boolean, implied)
def stringify_name(tokens: list, state: ConvertState):
"Converts given token list to string"
return ''.join([stringify(token, state) for token in tokens])
def stringify_value(token_list: list, state: ConvertState):
"Converts given token list to value list"
result = []
accum = []
for token in token_list:
if is_field(token):
# We should keep original fields in output since some editors has their
# own syntax for field or doesn’t support fields at all so we should
# capture actual field location in output stream
if accum:
result.append(''.join(accum))
accum = []
result.append(token)
else:
accum.append(stringify(token, state))
if (accum):
result.append(''.join(accum))
return result
def is_group(node):
return isinstance(node, TokenGroup)
def is_field(token):
return isinstance(token, tokens.Field) and token.index is not None
def deepest_node(node: AbbreviationNode):
return deepest_node(node.children[-1]) if node.children else node
def insert_text(node: AbbreviationNode, text: str):
if node.value:
last_token = node.value[-1]
if isinstance(last_token, str):
node.value[-1] += text
else:
node.value.append(text)
else:
node.value = [text]
def attach_repeater(items: list, repeater: tokens.Repeater):
for item in items:
if not item.repeat:
item.repeat = clone_repeater(repeater)
return items
def clone_repeater(repeater: tokens.Repeater):
return tokens.Repeater(repeater.count, repeater.value, repeater.implicit)
def some(items: list, fn: callable):
for item in items:
if fn(item): return True
return False
| 0.674908 | 0.231386 |
from rdkit import Chem
import numpy as np
class SmilesVectorizer(object):
"""SMILES vectorizer and devectorizer, with support for SMILES enumeration (atom order randomization)
as data augmentation
:parameter charset: string containing the characters for the vectorization
can also be generated via the .fit() method
:parameter pad: Length of the vectorization
:parameter leftpad: Add spaces to the left of the SMILES
:parameter isomericSmiles: Generate SMILES containing information about stereogenic centers
:parameter augment: Enumerate the SMILES during transform
:parameter canonical: use canonical SMILES during transform (overrides enum)
:parameter binary: Use RDKit binary strings instead of molecule objects
"""
def __init__(self, charset = '@C)(=cOn1S2/H[N]\\', pad=5, maxlength=120, leftpad=True, isomericSmiles=True, augment=True, canonical=False, startchar = '^', endchar = '$', unknownchar = '?', binary=False):
#Special Characters
self.startchar = startchar
self.endchar = endchar
self.unknownchar = unknownchar
#Vectorization and SMILES options
self.binary = binary
self.leftpad = leftpad
self.isomericSmiles = isomericSmiles
self.augment = augment
self.canonical = canonical
self._pad = pad
self._maxlength = maxlength
#The characterset
self._charset = None
self.charset = charset
#Calculate the dimensions
self.setdims()
@property
def charset(self):
return self._charset
@charset.setter
def charset(self, charset):
#Ensure start and endchars are in the charset
for char in [self.startchar, self.endchar, self.unknownchar]:
if char not in charset:
charset = charset + char
#Set the hidden properties
self._charset = charset
self._charlen = len(charset)
self._char_to_int = dict((c,i) for i,c in enumerate(charset))
self._int_to_char = dict((i,c) for i,c in enumerate(charset))
self.setdims()
@property
def maxlength(self):
return self._maxlength
@maxlength.setter
def maxlength(self, maxlength):
self._maxlength = maxlength
self.setdims()
@property
def pad(self):
return self._pad
@pad.setter
def pad(self, pad):
self._pad = pad
self.setdims()
def setdims(self):
"""Calculates and sets the output dimensions of the vectorized molecules from the current settings"""
self.dims = (self.maxlength + self.pad, self._charlen)
def fit(self, mols, extra_chars=[]):
"""Performs extraction of the charset and length of a SMILES datasets and sets self.maxlength and self.charset
:parameter smiles: Numpy array or Pandas series containing smiles as strings
:parameter extra_chars: List of extra chars to add to the charset (e.g. "\\\\" when "/" is present)
"""
smiles = [Chem.MolToSmiles(mol) for mol in mols]
charset = set("".join(list(smiles))) #Is there a smarter way when the list of SMILES is HUGE!
self.charset = "".join(charset.union(set(extra_chars)))
self.maxlength = max([len(smile) for smile in smiles])
def randomize_smiles(self, smiles):
"""Perform a randomization of a SMILES string
must be RDKit sanitizable"""
mol = Chem.MolFromSmiles(smiles)
nmol = self.randomize_mol(mol)
return Chem.MolToSmiles(nmol, canonical=self.canonical, isomericSmiles=self.isomericSmiles)
def randomize_mol(self, mol):
"""Performs a randomization of the atom order of an RDKit molecule"""
ans = list(range(mol.GetNumAtoms()))
np.random.shuffle(ans)
return Chem.RenumberAtoms(mol,ans)
def transform(self, mols, augment=None, canonical=None):
"""Perform an enumeration (atom order randomization) and vectorization of a Numpy array of RDkit molecules
:parameter mols: The RDKit molecules to transform in a list or array
:parameter augment: Override the objects .augment setting
:parameter canonical: Override the objects .canonical setting
:output: Numpy array with the vectorized molecules with shape [batch, maxlength+pad, charset]
"""
#TODO make it possible to use both SMILES, RDKit mols and RDKit binary strings in input
one_hot = np.zeros([len(mols)] + list(self.dims), dtype=np.int8)
#Possibl override object settings
if augment is None:
augment = self.augment
if canonical is None:
canonical = self.canonical
for i,mol in enumerate(mols):
#Fast convert from RDKit binary
if self.binary: mol = Chem.Mol(mol)
if augment:
mol = self.randomize_mol(mol)
ss = Chem.MolToSmiles(mol, canonical=canonical, isomericSmiles=self.isomericSmiles)
#TODO, Improvement make it robust to too long SMILES strings
#TODO, Improvement make a "jitter", with random offset within the possible frame
#TODO, Improvement make it report to many "?"'s
l = len(ss)
if self.leftpad:
offset = self.dims[0]-l-1
else:
offset = 1
for j,c in enumerate(ss):
charidx = self._char_to_int.get(c, self._char_to_int[self.unknownchar])
if j+offset >= one_hot.shape[1]:
print('I am here')
one_hot[i,j+offset,charidx] = 1
#Pad the start
one_hot[i,offset-1,self._char_to_int[self.startchar]] = 1
#Pad the end
one_hot[i,offset+l:,self._char_to_int[self.endchar]] = 1
#Pad the space in front of start (Could this lead to funky effects during sampling?)
#one_hot[i,:offset-1,self._char_to_int[self.endchar]] = 1
return one_hot
def reverse_transform(self, vect, strip=True):
""" Performs a conversion of a vectorized SMILES to a SMILES strings
charset must be the same as used for vectorization.
:parameter vect: Numpy array of vectorized SMILES.
:parameter strip: Strip start and end tokens from the SMILES string
"""
#TODO make it possible to take a single vectorized molecule, not a list
smiles = []
for v in vect:
#mask v
v=v[v.sum(axis=1)==1]
#Find one hot encoded index with argmax, translate to char and join to string
smile = "".join(self._int_to_char[i] for i in v.argmax(axis=1))
if strip:
smile = smile.strip(self.startchar + self.endchar)
smiles.append(smile)
return np.array(smiles)
from rdkit import DataStructs
from rdkit.Chem import AllChem
class HashedMorganVectorizer(object):
def __init__(self, radius=2, bits=2048, augment=None):
self.bits = bits
self.radius = radius
self.augment = augment #Not used
self.dims = (bits,)
self.keys = None
def transform_mol(self, mol):
""" transforms the molecule into a numpy bit array with the morgan bits
:parameter mol: the RDKit molecule to be transformed
"""
fp = AllChem.GetMorganFingerprintAsBitVect(mol,self.radius,nBits=self.bits)
arr = np.zeros((self.bits,))
DataStructs.ConvertToNumpyArray(fp, arr)
return arr
def transform(self, mols):
"""Transforms a list or array of RDKit molecules into an array with the Morgan bits
:parameter mols: list or array of RDKit molecules
"""
arr = np.zeros((len(mols), self.bits))
for i, mol in enumerate(mols):
arr[i,:] = self.transform_mol(mol)
return arr
class MorganDictVectorizer(object):
def __init__(self, radius=2, augment=None):
self.radius = radius
self.augment = augment #Not used
self.dims = None
def fit(self, mols):
"""Analyses the molecules and creates the key index for the creation of the dense array"""
keys=set()
for mol in mols:
fp = AllChem.GetMorganFingerprint(mol,self.radius)
keys.update(fp.GetNonzeroElements().keys())
keys = list(keys)
keys.sort()
self.keys= np.array(keys)
self.dims = len(self.keys)
def transform_mol(self, mol, misses=False):
""" transforms the mol into a dense array using the fitted keys as index
:parameter mol: the RDKit molecule to be transformed
:parameter misses: wheter to return the number of key misses for the molecule
"""
assert type(self.keys) is np.ndarray, "keys are not defined or is not an np.array, has the .fit(mols) function been used?"
#Get fingerprint as a dictionary
fp = AllChem.GetMorganFingerprint(mol,self.radius)
fp_d = fp.GetNonzeroElements()
#Prepare the array, and set the values
#TODO is there a way to vectorize and speed up this?
arr = np.zeros((self.dims,))
_misses = 0
for key, value in fp_d.items():
if key in self.keys:
arr[self.keys == key] = value
else:
_misses = _misses + 1
if misses:
return arr, _misses
else:
return arr
def transform(self, mols, misses=False):
"""Transforms a list or array of RDKit molecules into a dense array using the key dictionary (see .fit())
:parameter mols: list or array of RDKit molecules
:parameter misses: Wheter to return the number of key misses for each molecule
"""
arr = np.zeros((len(mols), self.dims))
if misses:
_misses = np.zeros((len(mols),1))
for i, mol in enumerate(mols):
arr[i,:], _misses[i] = self.transform_mol(mol, misses=misses)
return arr, _misses
else:
for i, mol in enumerate(mols):
arr[i,:] = self.transform_mol(mol, misses=False)
return arr
|
ddc_pub/vectorizers.py
|
from rdkit import Chem
import numpy as np
class SmilesVectorizer(object):
"""SMILES vectorizer and devectorizer, with support for SMILES enumeration (atom order randomization)
as data augmentation
:parameter charset: string containing the characters for the vectorization
can also be generated via the .fit() method
:parameter pad: Length of the vectorization
:parameter leftpad: Add spaces to the left of the SMILES
:parameter isomericSmiles: Generate SMILES containing information about stereogenic centers
:parameter augment: Enumerate the SMILES during transform
:parameter canonical: use canonical SMILES during transform (overrides enum)
:parameter binary: Use RDKit binary strings instead of molecule objects
"""
def __init__(self, charset = '@C)(=cOn1S2/H[N]\\', pad=5, maxlength=120, leftpad=True, isomericSmiles=True, augment=True, canonical=False, startchar = '^', endchar = '$', unknownchar = '?', binary=False):
#Special Characters
self.startchar = startchar
self.endchar = endchar
self.unknownchar = unknownchar
#Vectorization and SMILES options
self.binary = binary
self.leftpad = leftpad
self.isomericSmiles = isomericSmiles
self.augment = augment
self.canonical = canonical
self._pad = pad
self._maxlength = maxlength
#The characterset
self._charset = None
self.charset = charset
#Calculate the dimensions
self.setdims()
@property
def charset(self):
return self._charset
@charset.setter
def charset(self, charset):
#Ensure start and endchars are in the charset
for char in [self.startchar, self.endchar, self.unknownchar]:
if char not in charset:
charset = charset + char
#Set the hidden properties
self._charset = charset
self._charlen = len(charset)
self._char_to_int = dict((c,i) for i,c in enumerate(charset))
self._int_to_char = dict((i,c) for i,c in enumerate(charset))
self.setdims()
@property
def maxlength(self):
return self._maxlength
@maxlength.setter
def maxlength(self, maxlength):
self._maxlength = maxlength
self.setdims()
@property
def pad(self):
return self._pad
@pad.setter
def pad(self, pad):
self._pad = pad
self.setdims()
def setdims(self):
"""Calculates and sets the output dimensions of the vectorized molecules from the current settings"""
self.dims = (self.maxlength + self.pad, self._charlen)
def fit(self, mols, extra_chars=[]):
"""Performs extraction of the charset and length of a SMILES datasets and sets self.maxlength and self.charset
:parameter smiles: Numpy array or Pandas series containing smiles as strings
:parameter extra_chars: List of extra chars to add to the charset (e.g. "\\\\" when "/" is present)
"""
smiles = [Chem.MolToSmiles(mol) for mol in mols]
charset = set("".join(list(smiles))) #Is there a smarter way when the list of SMILES is HUGE!
self.charset = "".join(charset.union(set(extra_chars)))
self.maxlength = max([len(smile) for smile in smiles])
def randomize_smiles(self, smiles):
"""Perform a randomization of a SMILES string
must be RDKit sanitizable"""
mol = Chem.MolFromSmiles(smiles)
nmol = self.randomize_mol(mol)
return Chem.MolToSmiles(nmol, canonical=self.canonical, isomericSmiles=self.isomericSmiles)
def randomize_mol(self, mol):
"""Performs a randomization of the atom order of an RDKit molecule"""
ans = list(range(mol.GetNumAtoms()))
np.random.shuffle(ans)
return Chem.RenumberAtoms(mol,ans)
def transform(self, mols, augment=None, canonical=None):
"""Perform an enumeration (atom order randomization) and vectorization of a Numpy array of RDkit molecules
:parameter mols: The RDKit molecules to transform in a list or array
:parameter augment: Override the objects .augment setting
:parameter canonical: Override the objects .canonical setting
:output: Numpy array with the vectorized molecules with shape [batch, maxlength+pad, charset]
"""
#TODO make it possible to use both SMILES, RDKit mols and RDKit binary strings in input
one_hot = np.zeros([len(mols)] + list(self.dims), dtype=np.int8)
#Possibl override object settings
if augment is None:
augment = self.augment
if canonical is None:
canonical = self.canonical
for i,mol in enumerate(mols):
#Fast convert from RDKit binary
if self.binary: mol = Chem.Mol(mol)
if augment:
mol = self.randomize_mol(mol)
ss = Chem.MolToSmiles(mol, canonical=canonical, isomericSmiles=self.isomericSmiles)
#TODO, Improvement make it robust to too long SMILES strings
#TODO, Improvement make a "jitter", with random offset within the possible frame
#TODO, Improvement make it report to many "?"'s
l = len(ss)
if self.leftpad:
offset = self.dims[0]-l-1
else:
offset = 1
for j,c in enumerate(ss):
charidx = self._char_to_int.get(c, self._char_to_int[self.unknownchar])
if j+offset >= one_hot.shape[1]:
print('I am here')
one_hot[i,j+offset,charidx] = 1
#Pad the start
one_hot[i,offset-1,self._char_to_int[self.startchar]] = 1
#Pad the end
one_hot[i,offset+l:,self._char_to_int[self.endchar]] = 1
#Pad the space in front of start (Could this lead to funky effects during sampling?)
#one_hot[i,:offset-1,self._char_to_int[self.endchar]] = 1
return one_hot
def reverse_transform(self, vect, strip=True):
""" Performs a conversion of a vectorized SMILES to a SMILES strings
charset must be the same as used for vectorization.
:parameter vect: Numpy array of vectorized SMILES.
:parameter strip: Strip start and end tokens from the SMILES string
"""
#TODO make it possible to take a single vectorized molecule, not a list
smiles = []
for v in vect:
#mask v
v=v[v.sum(axis=1)==1]
#Find one hot encoded index with argmax, translate to char and join to string
smile = "".join(self._int_to_char[i] for i in v.argmax(axis=1))
if strip:
smile = smile.strip(self.startchar + self.endchar)
smiles.append(smile)
return np.array(smiles)
from rdkit import DataStructs
from rdkit.Chem import AllChem
class HashedMorganVectorizer(object):
def __init__(self, radius=2, bits=2048, augment=None):
self.bits = bits
self.radius = radius
self.augment = augment #Not used
self.dims = (bits,)
self.keys = None
def transform_mol(self, mol):
""" transforms the molecule into a numpy bit array with the morgan bits
:parameter mol: the RDKit molecule to be transformed
"""
fp = AllChem.GetMorganFingerprintAsBitVect(mol,self.radius,nBits=self.bits)
arr = np.zeros((self.bits,))
DataStructs.ConvertToNumpyArray(fp, arr)
return arr
def transform(self, mols):
"""Transforms a list or array of RDKit molecules into an array with the Morgan bits
:parameter mols: list or array of RDKit molecules
"""
arr = np.zeros((len(mols), self.bits))
for i, mol in enumerate(mols):
arr[i,:] = self.transform_mol(mol)
return arr
class MorganDictVectorizer(object):
def __init__(self, radius=2, augment=None):
self.radius = radius
self.augment = augment #Not used
self.dims = None
def fit(self, mols):
"""Analyses the molecules and creates the key index for the creation of the dense array"""
keys=set()
for mol in mols:
fp = AllChem.GetMorganFingerprint(mol,self.radius)
keys.update(fp.GetNonzeroElements().keys())
keys = list(keys)
keys.sort()
self.keys= np.array(keys)
self.dims = len(self.keys)
def transform_mol(self, mol, misses=False):
""" transforms the mol into a dense array using the fitted keys as index
:parameter mol: the RDKit molecule to be transformed
:parameter misses: wheter to return the number of key misses for the molecule
"""
assert type(self.keys) is np.ndarray, "keys are not defined or is not an np.array, has the .fit(mols) function been used?"
#Get fingerprint as a dictionary
fp = AllChem.GetMorganFingerprint(mol,self.radius)
fp_d = fp.GetNonzeroElements()
#Prepare the array, and set the values
#TODO is there a way to vectorize and speed up this?
arr = np.zeros((self.dims,))
_misses = 0
for key, value in fp_d.items():
if key in self.keys:
arr[self.keys == key] = value
else:
_misses = _misses + 1
if misses:
return arr, _misses
else:
return arr
def transform(self, mols, misses=False):
"""Transforms a list or array of RDKit molecules into a dense array using the key dictionary (see .fit())
:parameter mols: list or array of RDKit molecules
:parameter misses: Wheter to return the number of key misses for each molecule
"""
arr = np.zeros((len(mols), self.dims))
if misses:
_misses = np.zeros((len(mols),1))
for i, mol in enumerate(mols):
arr[i,:], _misses[i] = self.transform_mol(mol, misses=misses)
return arr, _misses
else:
for i, mol in enumerate(mols):
arr[i,:] = self.transform_mol(mol, misses=False)
return arr
| 0.71602 | 0.571049 |
import re
from os import path
from setuptools import setup
from codecs import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
return open(path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='jira-tracker',
version=find_version('jira_tracker','__init__.py'),
description='Simple Jira story point tracking tool',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/josephbmanley/jira-tracker',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='MIT',
# See https://pypi.org/classifiers/
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='Jira',
packages=['jira_tracker'],
install_requires=['argparse','jira','pyyaml','wheel'],
package_data={},
entry_points={
'console_scripts' : [
'jira-tracker=jira_tracker:main'
]
}
)
|
setup.py
|
import re
from os import path
from setuptools import setup
from codecs import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
return open(path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='jira-tracker',
version=find_version('jira_tracker','__init__.py'),
description='Simple Jira story point tracking tool',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/josephbmanley/jira-tracker',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='MIT',
# See https://pypi.org/classifiers/
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='Jira',
packages=['jira_tracker'],
install_requires=['argparse','jira','pyyaml','wheel'],
package_data={},
entry_points={
'console_scripts' : [
'jira-tracker=jira_tracker:main'
]
}
)
| 0.47025 | 0.160562 |
from __future__ import unicode_literals
from .imapclient_test import IMAPClientTest
from .util import Mock
class TestFolderStatus(IMAPClientTest):
def test_basic(self):
self.client._imap.status.return_value = (
"OK",
[b"foo (MESSAGES 3 RECENT 0 UIDNEXT 4 UIDVALIDITY 1435636895 UNSEEN 0)"],
)
out = self.client.folder_status("foo")
self.client._imap.status.assert_called_once_with(
b'"foo"', "(MESSAGES RECENT UIDNEXT UIDVALIDITY UNSEEN)"
)
self.assertDictEqual(
out,
{
b"MESSAGES": 3,
b"RECENT": 0,
b"UIDNEXT": 4,
b"UIDVALIDITY": 1435636895,
b"UNSEEN": 0,
},
)
def test_literal(self):
self.client._imap.status.return_value = (
"OK",
[(b"{3}", b"foo"), b" (UIDNEXT 4)"],
)
out = self.client.folder_status("foo", ["UIDNEXT"])
self.client._imap.status.assert_called_once_with(b'"foo"', "(UIDNEXT)")
self.assertDictEqual(out, {b"UIDNEXT": 4})
def test_extra_response(self):
# In production, we've seen folder names containing spaces come back
# like this and be broken into two components in the tuple.
server_response = [b"My files (UIDNEXT 24369)"]
mock = Mock(return_value=server_response)
self.client._command_and_check = mock
resp = self.client.folder_status("My files", ["UIDNEXT"])
self.assertEqual(resp, {b"UIDNEXT": 24369})
# We've also seen the response contain mailboxes we didn't
# ask for. In all known cases, the desired mailbox is last.
server_response = [b"sent (UIDNEXT 123)\nINBOX (UIDNEXT 24369)"]
mock = Mock(return_value=server_response)
self.client._command_and_check = mock
resp = self.client.folder_status("INBOX", ["UIDNEXT"])
self.assertEqual(resp, {b"UIDNEXT": 24369})
|
tests/test_folder_status.py
|
from __future__ import unicode_literals
from .imapclient_test import IMAPClientTest
from .util import Mock
class TestFolderStatus(IMAPClientTest):
def test_basic(self):
self.client._imap.status.return_value = (
"OK",
[b"foo (MESSAGES 3 RECENT 0 UIDNEXT 4 UIDVALIDITY 1435636895 UNSEEN 0)"],
)
out = self.client.folder_status("foo")
self.client._imap.status.assert_called_once_with(
b'"foo"', "(MESSAGES RECENT UIDNEXT UIDVALIDITY UNSEEN)"
)
self.assertDictEqual(
out,
{
b"MESSAGES": 3,
b"RECENT": 0,
b"UIDNEXT": 4,
b"UIDVALIDITY": 1435636895,
b"UNSEEN": 0,
},
)
def test_literal(self):
self.client._imap.status.return_value = (
"OK",
[(b"{3}", b"foo"), b" (UIDNEXT 4)"],
)
out = self.client.folder_status("foo", ["UIDNEXT"])
self.client._imap.status.assert_called_once_with(b'"foo"', "(UIDNEXT)")
self.assertDictEqual(out, {b"UIDNEXT": 4})
def test_extra_response(self):
# In production, we've seen folder names containing spaces come back
# like this and be broken into two components in the tuple.
server_response = [b"My files (UIDNEXT 24369)"]
mock = Mock(return_value=server_response)
self.client._command_and_check = mock
resp = self.client.folder_status("My files", ["UIDNEXT"])
self.assertEqual(resp, {b"UIDNEXT": 24369})
# We've also seen the response contain mailboxes we didn't
# ask for. In all known cases, the desired mailbox is last.
server_response = [b"sent (UIDNEXT 123)\nINBOX (UIDNEXT 24369)"]
mock = Mock(return_value=server_response)
self.client._command_and_check = mock
resp = self.client.folder_status("INBOX", ["UIDNEXT"])
self.assertEqual(resp, {b"UIDNEXT": 24369})
| 0.670177 | 0.291428 |
from datetime import datetime
from math import floor
import requests
from requests.exceptions import HTTPError
import json
import schedule # https://schedule.readthedocs.io/en/stable/
import time
def stahniOdjezdy(limit=1000):
# https://golemioapi.docs.apiary.io/#reference/public-transport/departure-boards/get-departure-board?console=1
# https://api.golemio.cz/api-keys/dashboard
with open('APIkey.secret','r') as f:
accessToken=f.read()
headers = {
'Content-Type': 'application/json; charset=utf-8',
'x-access-token': accessToken
}
minutPred = 0
minutPo = 60*24*7
id1 = 'U861Z1P' # VosmikovychA
id2 = 'U861Z2P' # VosmikovychB
url1 = 'https://api.golemio.cz/v2/departureboards/'
url1+= '?ids=' + id1 + '&limit=' + str(limit)
url1 += '&minutesBefore='+str(minutPred)+'&minutesAfter='+str(minutPo)
url2 = 'https://api.golemio.cz/v2/departureboards/'
url2+= '?ids=' + id2 + '&limit='+str(limit)
url2 += '&minutesBefore='+str(minutPred)+'&minutesAfter='+str(minutPo)
try:
# api gives max 100 entries, even tho stated limit is 1000
response1 = requests.get(url1, headers=headers)
response1.raise_for_status()
# access JSOn content
response2 = requests.get(url2, headers=headers)
response2.raise_for_status()
vystup = response2.json()+response1.json()
return(vystup)
except HTTPError as http_err:
print('HTTP error occurred:',http_err)
except Exception as err:
print('Other error occurred:',err)
def ulozOdjezdy():
try:
odjezdy = stahniOdjezdy()
zapis = json.dumps(odjezdy)
with open("odjezdyCache.txt",'w',encoding = 'utf-8') as f:
f.write(zapis)
except Exception as err:
print('ulozOdjezd error occurred:', err)
def vypisOdjezdy():
odjezdNejdrive = 0 # min
pocetOdjezduMax = 20
dolu = []
nahoru = []
doluUq = []
nahoruUq = []
try:
vystup = stahniOdjezdy(limit=pocetOdjezduMax*2)
except:
print("Error: stazeni odjezdu pro vypis")
try:
with open("odjezdyCache.txt",'r',encoding = 'utf-8') as fCt:
vystup = json.load(fCt)
except:
print("Error: nacteni odjezdu ze souboru")
pocetOdjezdu = 0
for polozka in vystup:
# deleting ':' in timezone
polozkaCas = datetime.strptime(polozka['departure_timestamp']['predicted'].replace("+01:00","+0100"),"%Y-%m-%dT%H:%M:%S.%f%z")
aktualniCas = datetime.now(polozkaCas.tzinfo)
casDoOdjezdu = polozkaCas-aktualniCas
linka = polozka["route"]["short_name"]
if len(linka) < 2:
linka=" "+linka
if abs(casDoOdjezdu.total_seconds())<60:
doOdjezdu = str(floor(casDoOdjezdu.total_seconds()))+"s"
doOdjezduVal = floor(casDoOdjezdu.total_seconds())
else:
doOdjezdu = str(floor(casDoOdjezdu.total_seconds()/60))+"m"
doOdjezduVal = floor(casDoOdjezdu.total_seconds()/60)
if doOdjezduVal>0:
doOdjezdu = " "+doOdjezdu
if abs(doOdjezduVal)<10:
doOdjezdu = " "+doOdjezdu
odjezd = linka+" "+doOdjezdu+" |"
if casDoOdjezdu.total_seconds()/60<odjezdNejdrive:
continue
if polozka['stop']['platform_code']=="A":
dolu.append(odjezd)
doluUq.append({linka:odjezd})
else:
nahoru.append(odjezd)
nahoruUq.append({linka:odjezd})
pocetOdjezdu=pocetOdjezdu+1
print("nahoru:",len(nahoru))
print("dolu:",len(dolu))
# print("\u25BC |",*dolu)
# print("\u25B2 |",*nahoru)
txt = "\u25BC |"+dolu[0]+dolu[1]+dolu[2]+"\n\u25BC |"+dolu[3]+dolu[4]+dolu[5]
txt = txt+"\n\u25B2 |"+nahoru[0]+nahoru[1]+nahoru[2]+"\n\u25B2 |"+nahoru[3]+nahoru[4]+nahoru[5]
print(txt)
try:
ulozOdjezdy()
schedule.every(10).seconds.do(vypisOdjezdy)
schedule.every(1).hours.do(ulozOdjezdy)
while 1:
schedule.run_pending()
time.sleep(1)
finally:
schedule.clear()
|
PyPragueDepartures.py
|
from datetime import datetime
from math import floor
import requests
from requests.exceptions import HTTPError
import json
import schedule # https://schedule.readthedocs.io/en/stable/
import time
def stahniOdjezdy(limit=1000):
# https://golemioapi.docs.apiary.io/#reference/public-transport/departure-boards/get-departure-board?console=1
# https://api.golemio.cz/api-keys/dashboard
with open('APIkey.secret','r') as f:
accessToken=f.read()
headers = {
'Content-Type': 'application/json; charset=utf-8',
'x-access-token': accessToken
}
minutPred = 0
minutPo = 60*24*7
id1 = 'U861Z1P' # VosmikovychA
id2 = 'U861Z2P' # VosmikovychB
url1 = 'https://api.golemio.cz/v2/departureboards/'
url1+= '?ids=' + id1 + '&limit=' + str(limit)
url1 += '&minutesBefore='+str(minutPred)+'&minutesAfter='+str(minutPo)
url2 = 'https://api.golemio.cz/v2/departureboards/'
url2+= '?ids=' + id2 + '&limit='+str(limit)
url2 += '&minutesBefore='+str(minutPred)+'&minutesAfter='+str(minutPo)
try:
# api gives max 100 entries, even tho stated limit is 1000
response1 = requests.get(url1, headers=headers)
response1.raise_for_status()
# access JSOn content
response2 = requests.get(url2, headers=headers)
response2.raise_for_status()
vystup = response2.json()+response1.json()
return(vystup)
except HTTPError as http_err:
print('HTTP error occurred:',http_err)
except Exception as err:
print('Other error occurred:',err)
def ulozOdjezdy():
try:
odjezdy = stahniOdjezdy()
zapis = json.dumps(odjezdy)
with open("odjezdyCache.txt",'w',encoding = 'utf-8') as f:
f.write(zapis)
except Exception as err:
print('ulozOdjezd error occurred:', err)
def vypisOdjezdy():
odjezdNejdrive = 0 # min
pocetOdjezduMax = 20
dolu = []
nahoru = []
doluUq = []
nahoruUq = []
try:
vystup = stahniOdjezdy(limit=pocetOdjezduMax*2)
except:
print("Error: stazeni odjezdu pro vypis")
try:
with open("odjezdyCache.txt",'r',encoding = 'utf-8') as fCt:
vystup = json.load(fCt)
except:
print("Error: nacteni odjezdu ze souboru")
pocetOdjezdu = 0
for polozka in vystup:
# deleting ':' in timezone
polozkaCas = datetime.strptime(polozka['departure_timestamp']['predicted'].replace("+01:00","+0100"),"%Y-%m-%dT%H:%M:%S.%f%z")
aktualniCas = datetime.now(polozkaCas.tzinfo)
casDoOdjezdu = polozkaCas-aktualniCas
linka = polozka["route"]["short_name"]
if len(linka) < 2:
linka=" "+linka
if abs(casDoOdjezdu.total_seconds())<60:
doOdjezdu = str(floor(casDoOdjezdu.total_seconds()))+"s"
doOdjezduVal = floor(casDoOdjezdu.total_seconds())
else:
doOdjezdu = str(floor(casDoOdjezdu.total_seconds()/60))+"m"
doOdjezduVal = floor(casDoOdjezdu.total_seconds()/60)
if doOdjezduVal>0:
doOdjezdu = " "+doOdjezdu
if abs(doOdjezduVal)<10:
doOdjezdu = " "+doOdjezdu
odjezd = linka+" "+doOdjezdu+" |"
if casDoOdjezdu.total_seconds()/60<odjezdNejdrive:
continue
if polozka['stop']['platform_code']=="A":
dolu.append(odjezd)
doluUq.append({linka:odjezd})
else:
nahoru.append(odjezd)
nahoruUq.append({linka:odjezd})
pocetOdjezdu=pocetOdjezdu+1
print("nahoru:",len(nahoru))
print("dolu:",len(dolu))
# print("\u25BC |",*dolu)
# print("\u25B2 |",*nahoru)
txt = "\u25BC |"+dolu[0]+dolu[1]+dolu[2]+"\n\u25BC |"+dolu[3]+dolu[4]+dolu[5]
txt = txt+"\n\u25B2 |"+nahoru[0]+nahoru[1]+nahoru[2]+"\n\u25B2 |"+nahoru[3]+nahoru[4]+nahoru[5]
print(txt)
try:
ulozOdjezdy()
schedule.every(10).seconds.do(vypisOdjezdy)
schedule.every(1).hours.do(ulozOdjezdy)
while 1:
schedule.run_pending()
time.sleep(1)
finally:
schedule.clear()
| 0.200362 | 0.207938 |
from finviz.async_connector import Connector
from lxml import html
from lxml import etree
import requests
import urllib3
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
TABLE = {
'Overview': '110',
'Valuation': '120',
'Ownership': '130',
'Performance': '140',
'Custom': '150',
'Financial': '160',
'Technical': '170'
}
def http_request(url, payload=None):
if payload is None:
payload = {}
content = requests.get(url, params=payload, verify=False)
content.raise_for_status() # Raise HTTPError for bad requests (4xx or 5xx)
return content, content.url
class Screener(object):
def __init__(self, tickers=None, filters=None, rows=None, order='', signal='', table='Overview'):
if tickers is None:
self.tickers = []
else:
self.tickers = tickers
if filters is None:
self.filters = []
else:
self.filters = filters
self.rows = rows
self.order = order
self.signal = signal
self.table = table
self.page_content = None
self.url = None
self.headers = None
self.page_urls = None
self.data = None
self.__search_screener()
def to_csv(self, directory=None):
from save_data import export_to_csv
if directory is None:
directory = os.getcwd()
export_to_csv(self.headers, self.data, directory)
def __get_total_rows(self):
total_element = self.page_content.cssselect('td[width="140"]')
self.rows = int(etree.tostring(total_element[0]).decode("utf-8").split('</b>')[1].split(' ')[0])
def __get_page_urls(self):
try:
total_pages = int([i.text.split('/')[1] for i in self.page_content.cssselect('option[value="1"]')][0])
except IndexError: # No results found
return None
urls = []
for page_number in range(1, total_pages + 1):
sequence = 1 + (page_number - 1) * 20
if sequence - 20 <= self.rows < sequence:
break
else:
urls.append(self.url + '&r={}'.format(str(sequence)))
self.page_urls = urls
def __get_table_headers(self):
first_row = self.page_content.cssselect('tr[valign="middle"]')
headers = []
for table_content in first_row[0]:
if table_content.text is None:
sorted_text_list = etree.tostring(table_content.cssselect('img')[0]).decode("utf-8").split('/>')
headers.append(sorted_text_list[1])
else:
headers.append(table_content.text)
self.headers = headers
def __get_table_data(self, page=None):
def parse_row(line):
row_data = []
for tags in line:
if tags.text is not None:
row_data.append(tags.text)
else:
row_data.append([span.text for span in tags.cssselect('span')][0])
return row_data
data_sets = []
page = html.fromstring(page)
all_rows = [i.cssselect('a') for i in page.cssselect('tr[valign="top"]')[1:]]
for row in all_rows:
if int(row[0].text) is self.rows:
values = dict(zip(self.headers, parse_row(row)))
data_sets.append(values)
break
else:
values = dict(zip(self.headers, parse_row(row)))
data_sets.append(values)
return data_sets
def __search_screener(self):
payload = {
'v': TABLE[self.table],
't': ','.join(self.tickers),
'f': ','.join(self.filters),
'o': self.order,
's': self.signal
}
self.page_content, self.url = http_request('https://finviz.com/screener.ashx', payload)
self.page_content = html.fromstring(self.page_content.text) # Parses the page with the default lxml parser
self.__get_table_headers()
if self.rows is None:
self.__get_total_rows()
self.__get_page_urls()
if self.page_urls is None:
raise Exception("No results matching the criteria: {}"
.format(self.url.split('?', 1)[1]))
async_connector = Connector(self.__get_table_data, self.page_urls)
self.data = async_connector.run_connector()
|
finviz/screener.py
|
from finviz.async_connector import Connector
from lxml import html
from lxml import etree
import requests
import urllib3
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
TABLE = {
'Overview': '110',
'Valuation': '120',
'Ownership': '130',
'Performance': '140',
'Custom': '150',
'Financial': '160',
'Technical': '170'
}
def http_request(url, payload=None):
if payload is None:
payload = {}
content = requests.get(url, params=payload, verify=False)
content.raise_for_status() # Raise HTTPError for bad requests (4xx or 5xx)
return content, content.url
class Screener(object):
def __init__(self, tickers=None, filters=None, rows=None, order='', signal='', table='Overview'):
if tickers is None:
self.tickers = []
else:
self.tickers = tickers
if filters is None:
self.filters = []
else:
self.filters = filters
self.rows = rows
self.order = order
self.signal = signal
self.table = table
self.page_content = None
self.url = None
self.headers = None
self.page_urls = None
self.data = None
self.__search_screener()
def to_csv(self, directory=None):
from save_data import export_to_csv
if directory is None:
directory = os.getcwd()
export_to_csv(self.headers, self.data, directory)
def __get_total_rows(self):
total_element = self.page_content.cssselect('td[width="140"]')
self.rows = int(etree.tostring(total_element[0]).decode("utf-8").split('</b>')[1].split(' ')[0])
def __get_page_urls(self):
try:
total_pages = int([i.text.split('/')[1] for i in self.page_content.cssselect('option[value="1"]')][0])
except IndexError: # No results found
return None
urls = []
for page_number in range(1, total_pages + 1):
sequence = 1 + (page_number - 1) * 20
if sequence - 20 <= self.rows < sequence:
break
else:
urls.append(self.url + '&r={}'.format(str(sequence)))
self.page_urls = urls
def __get_table_headers(self):
first_row = self.page_content.cssselect('tr[valign="middle"]')
headers = []
for table_content in first_row[0]:
if table_content.text is None:
sorted_text_list = etree.tostring(table_content.cssselect('img')[0]).decode("utf-8").split('/>')
headers.append(sorted_text_list[1])
else:
headers.append(table_content.text)
self.headers = headers
def __get_table_data(self, page=None):
def parse_row(line):
row_data = []
for tags in line:
if tags.text is not None:
row_data.append(tags.text)
else:
row_data.append([span.text for span in tags.cssselect('span')][0])
return row_data
data_sets = []
page = html.fromstring(page)
all_rows = [i.cssselect('a') for i in page.cssselect('tr[valign="top"]')[1:]]
for row in all_rows:
if int(row[0].text) is self.rows:
values = dict(zip(self.headers, parse_row(row)))
data_sets.append(values)
break
else:
values = dict(zip(self.headers, parse_row(row)))
data_sets.append(values)
return data_sets
def __search_screener(self):
payload = {
'v': TABLE[self.table],
't': ','.join(self.tickers),
'f': ','.join(self.filters),
'o': self.order,
's': self.signal
}
self.page_content, self.url = http_request('https://finviz.com/screener.ashx', payload)
self.page_content = html.fromstring(self.page_content.text) # Parses the page with the default lxml parser
self.__get_table_headers()
if self.rows is None:
self.__get_total_rows()
self.__get_page_urls()
if self.page_urls is None:
raise Exception("No results matching the criteria: {}"
.format(self.url.split('?', 1)[1]))
async_connector = Connector(self.__get_table_data, self.page_urls)
self.data = async_connector.run_connector()
| 0.394784 | 0.089455 |
from django.utils.translation import ugettext_noop as _
from api import status
from api.api_views import APIView
from api.exceptions import ObjectNotFound, PreconditionRequired, ObjectAlreadyExists
from api.task.response import SuccessTaskResponse
from api.utils.db import get_object
from api.dc.storage.serializers import DcNodeStorageSerializer, ExtendedDcNodeStorageSerializer
from api.dc.messages import LOG_STORAGE_ATTACH, LOG_STORAGE_DETACH
from vms.models import NodeStorage, DcNode
class DcStorageView(APIView):
serializer = DcNodeStorageSerializer
order_by_default = ('node__hostname', 'zpool')
order_by_field_map = {'hostname': 'node__hostname', 'zpool': 'zpool'}
def __init__(self, request, name, data):
super(DcStorageView, self).__init__(request)
self.data = data
self.name = name
dc = request.dc
if name:
try:
zpool, hostname = name.split('@')
if not (zpool and hostname):
raise ValueError
except ValueError:
raise ObjectNotFound(model=NodeStorage)
attrs = {'node__hostname': hostname, 'zpool': zpool}
if request.method != 'POST':
attrs['dc'] = dc
ns = get_object(request, NodeStorage, attrs, sr=('node', 'storage', 'storage__owner',),
exists_ok=True, noexists_fail=True)
ns.set_dc(dc)
try: # Bug #chili-525 + checks if node is attached to Dc (must be!)
ns.set_dc_node(DcNode.objects.get(node=ns.node, dc=dc))
except DcNode.DoesNotExist:
raise PreconditionRequired(_('Compute node is not available'))
else: # many
ns = NodeStorage.objects.filter(dc=dc).order_by(*self.order_by)
if self.full or self.extended:
dc_nodes = {dn.node.hostname: dn for dn in DcNode.objects.select_related('node').filter(dc=request.dc)}
ns = ns.select_related('node', 'storage', 'storage__owner')
for i in ns: # Bug #chili-525
i.set_dc_node(dc_nodes.get(i.node.hostname, None))
i.set_dc(dc)
self.ns = ns
def get(self, many=False):
if self.extended:
serializer = ExtendedDcNodeStorageSerializer
else:
serializer = self.serializer
if many or not self.name:
if self.full or self.extended:
if self.ns:
# noinspection PyUnresolvedReferences
res = serializer(self.request, self.ns, many=True).data
else:
res = []
else:
res = ['@'.join(i) for i in self.ns.values_list('zpool', 'node__hostname')]
else:
# noinspection PyUnresolvedReferences
res = serializer(self.request, self.ns).data
return SuccessTaskResponse(self.request, res)
def post(self):
ns, dc = self.ns, self.request.dc
if ns.dc.filter(id=dc.id).exists():
raise ObjectAlreadyExists(model=NodeStorage)
ser = self.serializer(self.request, ns)
ns.dc.add(dc)
return SuccessTaskResponse(self.request, ser.data, obj=ns, status=status.HTTP_201_CREATED,
detail_dict=ser.detail_dict(), msg=LOG_STORAGE_ATTACH)
def delete(self):
ns, dc = self.ns, self.request.dc
for vm in dc.vm_set.filter(node=ns.node):
if ns.zpool in vm.get_used_disk_pools(): # active + current
raise PreconditionRequired(_('Storage is used by some VMs'))
if dc.backup_set.filter(zpool=ns).exists():
raise PreconditionRequired(_('Storage is used by some VM backups'))
ser = self.serializer(self.request, ns)
ns.dc.remove(dc)
return SuccessTaskResponse(self.request, None, obj=ns, detail_dict=ser.detail_dict(), msg=LOG_STORAGE_DETACH)
|
api/dc/storage/api_views.py
|
from django.utils.translation import ugettext_noop as _
from api import status
from api.api_views import APIView
from api.exceptions import ObjectNotFound, PreconditionRequired, ObjectAlreadyExists
from api.task.response import SuccessTaskResponse
from api.utils.db import get_object
from api.dc.storage.serializers import DcNodeStorageSerializer, ExtendedDcNodeStorageSerializer
from api.dc.messages import LOG_STORAGE_ATTACH, LOG_STORAGE_DETACH
from vms.models import NodeStorage, DcNode
class DcStorageView(APIView):
serializer = DcNodeStorageSerializer
order_by_default = ('node__hostname', 'zpool')
order_by_field_map = {'hostname': 'node__hostname', 'zpool': 'zpool'}
def __init__(self, request, name, data):
super(DcStorageView, self).__init__(request)
self.data = data
self.name = name
dc = request.dc
if name:
try:
zpool, hostname = name.split('@')
if not (zpool and hostname):
raise ValueError
except ValueError:
raise ObjectNotFound(model=NodeStorage)
attrs = {'node__hostname': hostname, 'zpool': zpool}
if request.method != 'POST':
attrs['dc'] = dc
ns = get_object(request, NodeStorage, attrs, sr=('node', 'storage', 'storage__owner',),
exists_ok=True, noexists_fail=True)
ns.set_dc(dc)
try: # Bug #chili-525 + checks if node is attached to Dc (must be!)
ns.set_dc_node(DcNode.objects.get(node=ns.node, dc=dc))
except DcNode.DoesNotExist:
raise PreconditionRequired(_('Compute node is not available'))
else: # many
ns = NodeStorage.objects.filter(dc=dc).order_by(*self.order_by)
if self.full or self.extended:
dc_nodes = {dn.node.hostname: dn for dn in DcNode.objects.select_related('node').filter(dc=request.dc)}
ns = ns.select_related('node', 'storage', 'storage__owner')
for i in ns: # Bug #chili-525
i.set_dc_node(dc_nodes.get(i.node.hostname, None))
i.set_dc(dc)
self.ns = ns
def get(self, many=False):
if self.extended:
serializer = ExtendedDcNodeStorageSerializer
else:
serializer = self.serializer
if many or not self.name:
if self.full or self.extended:
if self.ns:
# noinspection PyUnresolvedReferences
res = serializer(self.request, self.ns, many=True).data
else:
res = []
else:
res = ['@'.join(i) for i in self.ns.values_list('zpool', 'node__hostname')]
else:
# noinspection PyUnresolvedReferences
res = serializer(self.request, self.ns).data
return SuccessTaskResponse(self.request, res)
def post(self):
ns, dc = self.ns, self.request.dc
if ns.dc.filter(id=dc.id).exists():
raise ObjectAlreadyExists(model=NodeStorage)
ser = self.serializer(self.request, ns)
ns.dc.add(dc)
return SuccessTaskResponse(self.request, ser.data, obj=ns, status=status.HTTP_201_CREATED,
detail_dict=ser.detail_dict(), msg=LOG_STORAGE_ATTACH)
def delete(self):
ns, dc = self.ns, self.request.dc
for vm in dc.vm_set.filter(node=ns.node):
if ns.zpool in vm.get_used_disk_pools(): # active + current
raise PreconditionRequired(_('Storage is used by some VMs'))
if dc.backup_set.filter(zpool=ns).exists():
raise PreconditionRequired(_('Storage is used by some VM backups'))
ser = self.serializer(self.request, ns)
ns.dc.remove(dc)
return SuccessTaskResponse(self.request, None, obj=ns, detail_dict=ser.detail_dict(), msg=LOG_STORAGE_DETACH)
| 0.390243 | 0.096025 |
from astropy import units as u, constants as const
import numpy as np
from ..nuclear import nuclear_binding_energy, nuclear_reaction_energy, mass_energy
from plasmapy.utils.pytest_helpers import run_test, run_test_equivalent_calls
from plasmapy.atomic.exceptions import AtomicError, InvalidParticleError
import pytest
test_nuclear_table = [
[nuclear_binding_energy, 'p', {}, 0 * u.J],
[nuclear_binding_energy, 'n', {}, 0 * u.J],
[nuclear_binding_energy, 'p', {}, 0 * u.J],
[nuclear_binding_energy, "H", {}, AtomicError],
[nuclear_binding_energy, 'He-99', {}, InvalidParticleError],
[nuclear_binding_energy, "He", {"mass_numb": 99}, InvalidParticleError],
[nuclear_binding_energy, 3.1415926535j, {}, TypeError],
[mass_energy, 'e-', {}, (const.m_e * const.c ** 2).to(u.J)],
[mass_energy, 'p+', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'H-1', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'H-1 0+', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'n', {}, (const.m_n * const.c ** 2).to(u.J)],
[nuclear_reaction_energy, (), {'reactants': ['n'], 'products': 3}, TypeError],
[nuclear_reaction_energy, (), {'reactants': ['n'], 'products': ['He-4']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['h'], 'products': ['H-1']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['e-', 'n'], 'products': ['p+']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['e+', 'n'], 'products': ['p-']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['ksdf'], 'products': ['H-3']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['H'], 'products': ['H-1']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['p'], 'products': ['n', 'n', 'e-']}, AtomicError],
[nuclear_reaction_energy, 'H + H --> H', {}, AtomicError],
[nuclear_reaction_energy, 'H + H', {}, AtomicError],
[nuclear_reaction_energy, 1, {}, TypeError],
[nuclear_reaction_energy, 'H-1 + H-1 --> H-1', {}, AtomicError],
[nuclear_reaction_energy, 'p --> n', {}, AtomicError],
[nuclear_reaction_energy, 'p --> p', {'reactants': 'p', 'products': 'p'}, AtomicError],
]
@pytest.mark.parametrize('test_inputs', test_nuclear_table)
def test_nuclear(test_inputs):
run_test(*test_inputs, rtol=1e-3)
test_nuclear_equivalent_calls_table = [
[nuclear_binding_energy, ['He-4', {}], ['alpha', {}], ['He', {'mass_numb': 4}]],
]
@pytest.mark.parametrize('test_inputs', test_nuclear_equivalent_calls_table)
def test_nuclear_equivalent_calls(test_inputs):
run_test_equivalent_calls(test_inputs)
def test_nuclear_binding_energy_D_T():
before = nuclear_binding_energy("D") + nuclear_binding_energy("T")
after = nuclear_binding_energy("alpha")
E_in_MeV = (after - before).to(u.MeV).value # D + T --> alpha + n + E
assert np.isclose(E_in_MeV, 17.58, rtol=0.01)
def test_nuclear_reaction_energy():
reaction1 = 'D + T --> alpha + n'
reaction2 = 'T + D -> n + alpha'
released_energy1 = nuclear_reaction_energy(reaction1)
released_energy2 = nuclear_reaction_energy(reaction2)
assert np.isclose((released_energy1.to(u.MeV)).value, 17.58, rtol=0.01)
assert released_energy1 == released_energy2
assert nuclear_reaction_energy('n + p+ --> n + p+ + p- + p+') == \
nuclear_reaction_energy('n + p+ --> n + 2*p+ + p-')
nuclear_reaction_energy('neutron + antineutron --> neutron + antineutron')
def test_nuclear_reaction_energy_triple_alpha():
triple_alpha1 = 'alpha + He-4 --> Be-8'
triple_alpha2 = 'Be-8 + alpha --> carbon-12'
energy_triplealpha1 = nuclear_reaction_energy(triple_alpha1)
energy_triplealpha2 = nuclear_reaction_energy(triple_alpha2)
assert np.isclose(energy_triplealpha1.to(u.keV).value, -91.8, atol=0.1)
assert np.isclose(energy_triplealpha2.to(u.MeV).value, 7.367, atol=0.1)
reactants = ['He-4', 'alpha']
products = ['Be-8']
energy = nuclear_reaction_energy(reactants=reactants, products=products)
assert np.isclose(energy.to(u.keV).value, -91.8, atol=0.1)
def test_nuclear_reaction_energy_alpha_decay():
alpha_decay_example = 'U-238 --> Th-234 + alpha'
energy_alpha_decay = nuclear_reaction_energy(alpha_decay_example)
assert np.isclose(energy_alpha_decay.to(u.MeV).value, 4.26975, atol=1e-5)
def test_nuclear_reaction_energy_triple_alpha_r():
triple_alpha1_r = '4*He-4 --> 2*Be-8'
energy_triplealpha1_r = nuclear_reaction_energy(triple_alpha1_r)
assert np.isclose(energy_triplealpha1_r.to(u.keV).value,
-91.8 * 2, atol=0.1)
def test_nuclear_reaction_energy_beta():
energy1 = nuclear_reaction_energy(reactants=['n'], products=['p', 'e-'])
assert np.isclose(energy1.to(u.MeV).value, 0.78, atol=0.01)
energy2 = nuclear_reaction_energy(
reactants=['Mg-23'], products=['Na-23', 'e+'])
assert np.isclose(energy2.to(u.MeV).value, 3.034591, atol=1e-5)
# (reactants, products, expectedMeV, tol)
nuclear_reaction_energy_kwargs_table = [
('H-1', 'p', 0.0, 0.0),
(['B-10', 'n'], ['Li-7', 'He-4'], 2.8, 0.06),
(['Li-6', 'D'], ['2*alpha'], 22.2, 0.06),
(['C-12', 'p'], 'N-13', 1.95, 0.006),
(['N-13'], ['C-13', 'e+'], 1.20, 0.006),
(['C-13', 'hydrogen-1'], ['Nitrogen-14'], 7.54, 0.006),
(['N-14', 'H-1'], ['O-15'], 7.35, 0.006),
(['O-15'], ['N-15', 'e+'], 1.73, 0.006),
(('N-15', 'H-1'), ('C-12', 'He-4'), 4.96, 0.006),
]
@pytest.mark.parametrize(
"reactants, products, expectedMeV, tol",
nuclear_reaction_energy_kwargs_table)
def test_nuclear_reaction_energy_kwargs(reactants, products, expectedMeV, tol):
energy = nuclear_reaction_energy(reactants=reactants, products=products).si
expected = (expectedMeV * u.MeV).si
assert np.isclose(expected.value, energy.value, atol=tol)
|
plasmapy/atomic/tests/test_nuclear.py
|
from astropy import units as u, constants as const
import numpy as np
from ..nuclear import nuclear_binding_energy, nuclear_reaction_energy, mass_energy
from plasmapy.utils.pytest_helpers import run_test, run_test_equivalent_calls
from plasmapy.atomic.exceptions import AtomicError, InvalidParticleError
import pytest
test_nuclear_table = [
[nuclear_binding_energy, 'p', {}, 0 * u.J],
[nuclear_binding_energy, 'n', {}, 0 * u.J],
[nuclear_binding_energy, 'p', {}, 0 * u.J],
[nuclear_binding_energy, "H", {}, AtomicError],
[nuclear_binding_energy, 'He-99', {}, InvalidParticleError],
[nuclear_binding_energy, "He", {"mass_numb": 99}, InvalidParticleError],
[nuclear_binding_energy, 3.1415926535j, {}, TypeError],
[mass_energy, 'e-', {}, (const.m_e * const.c ** 2).to(u.J)],
[mass_energy, 'p+', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'H-1', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'H-1 0+', {}, (const.m_p * const.c ** 2).to(u.J)],
[mass_energy, 'n', {}, (const.m_n * const.c ** 2).to(u.J)],
[nuclear_reaction_energy, (), {'reactants': ['n'], 'products': 3}, TypeError],
[nuclear_reaction_energy, (), {'reactants': ['n'], 'products': ['He-4']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['h'], 'products': ['H-1']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['e-', 'n'], 'products': ['p+']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['e+', 'n'], 'products': ['p-']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['ksdf'], 'products': ['H-3']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['H'], 'products': ['H-1']}, AtomicError],
[nuclear_reaction_energy, (), {'reactants': ['p'], 'products': ['n', 'n', 'e-']}, AtomicError],
[nuclear_reaction_energy, 'H + H --> H', {}, AtomicError],
[nuclear_reaction_energy, 'H + H', {}, AtomicError],
[nuclear_reaction_energy, 1, {}, TypeError],
[nuclear_reaction_energy, 'H-1 + H-1 --> H-1', {}, AtomicError],
[nuclear_reaction_energy, 'p --> n', {}, AtomicError],
[nuclear_reaction_energy, 'p --> p', {'reactants': 'p', 'products': 'p'}, AtomicError],
]
@pytest.mark.parametrize('test_inputs', test_nuclear_table)
def test_nuclear(test_inputs):
run_test(*test_inputs, rtol=1e-3)
test_nuclear_equivalent_calls_table = [
[nuclear_binding_energy, ['He-4', {}], ['alpha', {}], ['He', {'mass_numb': 4}]],
]
@pytest.mark.parametrize('test_inputs', test_nuclear_equivalent_calls_table)
def test_nuclear_equivalent_calls(test_inputs):
run_test_equivalent_calls(test_inputs)
def test_nuclear_binding_energy_D_T():
before = nuclear_binding_energy("D") + nuclear_binding_energy("T")
after = nuclear_binding_energy("alpha")
E_in_MeV = (after - before).to(u.MeV).value # D + T --> alpha + n + E
assert np.isclose(E_in_MeV, 17.58, rtol=0.01)
def test_nuclear_reaction_energy():
reaction1 = 'D + T --> alpha + n'
reaction2 = 'T + D -> n + alpha'
released_energy1 = nuclear_reaction_energy(reaction1)
released_energy2 = nuclear_reaction_energy(reaction2)
assert np.isclose((released_energy1.to(u.MeV)).value, 17.58, rtol=0.01)
assert released_energy1 == released_energy2
assert nuclear_reaction_energy('n + p+ --> n + p+ + p- + p+') == \
nuclear_reaction_energy('n + p+ --> n + 2*p+ + p-')
nuclear_reaction_energy('neutron + antineutron --> neutron + antineutron')
def test_nuclear_reaction_energy_triple_alpha():
triple_alpha1 = 'alpha + He-4 --> Be-8'
triple_alpha2 = 'Be-8 + alpha --> carbon-12'
energy_triplealpha1 = nuclear_reaction_energy(triple_alpha1)
energy_triplealpha2 = nuclear_reaction_energy(triple_alpha2)
assert np.isclose(energy_triplealpha1.to(u.keV).value, -91.8, atol=0.1)
assert np.isclose(energy_triplealpha2.to(u.MeV).value, 7.367, atol=0.1)
reactants = ['He-4', 'alpha']
products = ['Be-8']
energy = nuclear_reaction_energy(reactants=reactants, products=products)
assert np.isclose(energy.to(u.keV).value, -91.8, atol=0.1)
def test_nuclear_reaction_energy_alpha_decay():
alpha_decay_example = 'U-238 --> Th-234 + alpha'
energy_alpha_decay = nuclear_reaction_energy(alpha_decay_example)
assert np.isclose(energy_alpha_decay.to(u.MeV).value, 4.26975, atol=1e-5)
def test_nuclear_reaction_energy_triple_alpha_r():
triple_alpha1_r = '4*He-4 --> 2*Be-8'
energy_triplealpha1_r = nuclear_reaction_energy(triple_alpha1_r)
assert np.isclose(energy_triplealpha1_r.to(u.keV).value,
-91.8 * 2, atol=0.1)
def test_nuclear_reaction_energy_beta():
energy1 = nuclear_reaction_energy(reactants=['n'], products=['p', 'e-'])
assert np.isclose(energy1.to(u.MeV).value, 0.78, atol=0.01)
energy2 = nuclear_reaction_energy(
reactants=['Mg-23'], products=['Na-23', 'e+'])
assert np.isclose(energy2.to(u.MeV).value, 3.034591, atol=1e-5)
# (reactants, products, expectedMeV, tol)
nuclear_reaction_energy_kwargs_table = [
('H-1', 'p', 0.0, 0.0),
(['B-10', 'n'], ['Li-7', 'He-4'], 2.8, 0.06),
(['Li-6', 'D'], ['2*alpha'], 22.2, 0.06),
(['C-12', 'p'], 'N-13', 1.95, 0.006),
(['N-13'], ['C-13', 'e+'], 1.20, 0.006),
(['C-13', 'hydrogen-1'], ['Nitrogen-14'], 7.54, 0.006),
(['N-14', 'H-1'], ['O-15'], 7.35, 0.006),
(['O-15'], ['N-15', 'e+'], 1.73, 0.006),
(('N-15', 'H-1'), ('C-12', 'He-4'), 4.96, 0.006),
]
@pytest.mark.parametrize(
"reactants, products, expectedMeV, tol",
nuclear_reaction_energy_kwargs_table)
def test_nuclear_reaction_energy_kwargs(reactants, products, expectedMeV, tol):
energy = nuclear_reaction_energy(reactants=reactants, products=products).si
expected = (expectedMeV * u.MeV).si
assert np.isclose(expected.value, energy.value, atol=tol)
| 0.846483 | 0.585012 |
import os
# Third party imports
from fastapi import Depends, APIRouter, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from dotenv import load_dotenv
# Local imports
from .auth_db import get_user
from .encryption import encrypt_text, decrypt_token, encrypt_token
from .classes import User
load_dotenv()
oauth = APIRouter()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
@oauth.post("/token")
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
"""
Token endpoint - Login here and get your token
Username and Password are required and must be valid, if correct you will get youre beautiful token
Returns:
Dict[str, str]
"""
user = await get_user(form_data.username)
if not user:
raise HTTPException(status_code=400, detail="Incorrect username or password")
hashed_password = await encrypt_text(form_data.password)
if hashed_password != user.hashed_password:
raise HTTPException(status_code=400, detail="Incorrect username or password")
key = os.environ["KEY"]
token = await encrypt_token(key, user.username)
return {"access_token": token, "token_type": "bearer"}
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
Decrypts a token to extract the user info from it, then it fetches the correct user from the db
If user doesnt exist or invalid token it returns and raises an HTTPException
:param token (str) : The users token
Return:
User : The user or it returns none and raises an exception
"""
key = os.environ["KEY"]
decrypted_token = await decrypt_token(key, token)
user = await get_user(decrypted_token)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
"""
Gets a user and checks if they are disabled
Returns:
if not disabled it returns the User
else it returns and raises an HTTPException
"""
if current_user.disabled == 1:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
|
api/auth/authenticate.py
|
import os
# Third party imports
from fastapi import Depends, APIRouter, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from dotenv import load_dotenv
# Local imports
from .auth_db import get_user
from .encryption import encrypt_text, decrypt_token, encrypt_token
from .classes import User
load_dotenv()
oauth = APIRouter()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
@oauth.post("/token")
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
"""
Token endpoint - Login here and get your token
Username and Password are required and must be valid, if correct you will get youre beautiful token
Returns:
Dict[str, str]
"""
user = await get_user(form_data.username)
if not user:
raise HTTPException(status_code=400, detail="Incorrect username or password")
hashed_password = await encrypt_text(form_data.password)
if hashed_password != user.hashed_password:
raise HTTPException(status_code=400, detail="Incorrect username or password")
key = os.environ["KEY"]
token = await encrypt_token(key, user.username)
return {"access_token": token, "token_type": "bearer"}
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
Decrypts a token to extract the user info from it, then it fetches the correct user from the db
If user doesnt exist or invalid token it returns and raises an HTTPException
:param token (str) : The users token
Return:
User : The user or it returns none and raises an exception
"""
key = os.environ["KEY"]
decrypted_token = await decrypt_token(key, token)
user = await get_user(decrypted_token)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
"""
Gets a user and checks if they are disabled
Returns:
if not disabled it returns the User
else it returns and raises an HTTPException
"""
if current_user.disabled == 1:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
| 0.695441 | 0.096663 |
import math
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca()
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
i += 1
plt.axis('equal')
axes.set_xlim(-1, 5)
axes.set_ylim(-1, 3)
plt.pause(1)
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
key: int, xy_tol=0.6, theta_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose2(key+1)
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
curr_xy = np.array([prev_est.x() + rotated_odom[0],
prev_est.y() + rotated_odom[1]])
curr_theta = prev_est.theta() + odom[2]
for k in range(1, key+1):
pose_xy = np.array([current_estimate.atPose2(k).x(),
current_estimate.atPose2(k).y()])
pose_theta = current_estimate.atPose2(k).theta()
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
return k
def Pose2SLAM_ISAM2_example():
"""Perform 2D SLAM given the ground truth changes in pose as well as
simple loop closure detection."""
plt.ion()
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xy_sigma = 0.3
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
prior_theta_sigma = 5
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xy_sigma = 0.2
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
odometry_theta_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
prior_xy_sigma,
prior_theta_sigma*np.pi/180]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
odometry_xy_sigma,
odometry_theta_sigma*np.pi/180]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth odometry measurements of the robot during the trajectory.
true_odometry = [(2, 0, 0),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2)]
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
for true_odom in true_odometry]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(true_odometry)):
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
noisy_odom_y,
noisy_odom_theta))
initial_estimate.insert(i + 2, computed_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimzation.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference on the trajectory.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
for i in range(1, len(true_odometry)+1):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
plt.ioff()
plt.show()
if __name__ == "__main__":
Pose2SLAM_ISAM2_example()
|
python/gtsam/examples/Pose2ISAM2Example.py
|
import math
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca()
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
i += 1
plt.axis('equal')
axes.set_xlim(-1, 5)
axes.set_ylim(-1, 3)
plt.pause(1)
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
key: int, xy_tol=0.6, theta_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose2(key+1)
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
curr_xy = np.array([prev_est.x() + rotated_odom[0],
prev_est.y() + rotated_odom[1]])
curr_theta = prev_est.theta() + odom[2]
for k in range(1, key+1):
pose_xy = np.array([current_estimate.atPose2(k).x(),
current_estimate.atPose2(k).y()])
pose_theta = current_estimate.atPose2(k).theta()
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
return k
def Pose2SLAM_ISAM2_example():
"""Perform 2D SLAM given the ground truth changes in pose as well as
simple loop closure detection."""
plt.ion()
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xy_sigma = 0.3
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
prior_theta_sigma = 5
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xy_sigma = 0.2
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
odometry_theta_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
prior_xy_sigma,
prior_theta_sigma*np.pi/180]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
odometry_xy_sigma,
odometry_theta_sigma*np.pi/180]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth odometry measurements of the robot during the trajectory.
true_odometry = [(2, 0, 0),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2)]
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
for true_odom in true_odometry]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(true_odometry)):
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
noisy_odom_y,
noisy_odom_theta))
initial_estimate.insert(i + 2, computed_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimzation.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference on the trajectory.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
for i in range(1, len(true_odometry)+1):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
plt.ioff()
plt.show()
if __name__ == "__main__":
Pose2SLAM_ISAM2_example()
| 0.869977 | 0.750256 |
from modules.extractors.doc import DocExtractor
from modules.metadata import Metadata
import rtf2xml.ParseRtf
class RtfExtractor(DocExtractor):
"""Class for handling RTF file data extraction.
We will mostly be using the same logic as for MSDoc files, but we need
another way to extract metadata, as wVSummary doesn't work on RTF
"""
NS_RTF = "http://rtf2xml.sourceforge.net/"
def _namespace(self, element):
import re
m = re.match('\{.*\}', element.tag)
return m.group(0) if m else ''
def _tag_name(self, element):
import re
m = re.match('\{.*\}(.*)', element.tag)
return m.group(1) if m else ''
def get_metadata(self):
"""Returns a metadata.Metadata object
See http://www.biblioscape.com/rtf15_spec.htm
for RTF metadata specification
"""
import os
temp_filename = os.path.join("temp", "tmp.rtf.xml")
parse_obj = rtf2xml.ParseRtf.ParseRtf(in_file=self.path,
out_file=temp_filename)
parse_obj.parse_rtf()
metadata = Metadata()
import xml.etree.ElementTree as ET
tree = ET.parse(temp_filename)
root = tree.getroot()
section = root.find(".//{%s}doc-information" % self.NS_RTF)
if len(section) > 0:
for tag in section.iterfind(".//*"):
tag_name = self._tag_name(tag)
if tag.text is not None:
metadata.add({tag_name: tag.text})
elif tag.get("year") is not None and tag.get("year") != "0":
date_parts = []
date_parts.append(tag.get("year"))
date_parts.append(tag.get("month").zfill(2) or "01")
date_parts.append(tag.get("day").zfill(2) or "01")
date_str = "-".join(date_parts)
metadata.add({tag_name: date_str})
os.unlink(temp_filename)
return metadata
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
|
modules/extractors/rtf.py
|
from modules.extractors.doc import DocExtractor
from modules.metadata import Metadata
import rtf2xml.ParseRtf
class RtfExtractor(DocExtractor):
"""Class for handling RTF file data extraction.
We will mostly be using the same logic as for MSDoc files, but we need
another way to extract metadata, as wVSummary doesn't work on RTF
"""
NS_RTF = "http://rtf2xml.sourceforge.net/"
def _namespace(self, element):
import re
m = re.match('\{.*\}', element.tag)
return m.group(0) if m else ''
def _tag_name(self, element):
import re
m = re.match('\{.*\}(.*)', element.tag)
return m.group(1) if m else ''
def get_metadata(self):
"""Returns a metadata.Metadata object
See http://www.biblioscape.com/rtf15_spec.htm
for RTF metadata specification
"""
import os
temp_filename = os.path.join("temp", "tmp.rtf.xml")
parse_obj = rtf2xml.ParseRtf.ParseRtf(in_file=self.path,
out_file=temp_filename)
parse_obj.parse_rtf()
metadata = Metadata()
import xml.etree.ElementTree as ET
tree = ET.parse(temp_filename)
root = tree.getroot()
section = root.find(".//{%s}doc-information" % self.NS_RTF)
if len(section) > 0:
for tag in section.iterfind(".//*"):
tag_name = self._tag_name(tag)
if tag.text is not None:
metadata.add({tag_name: tag.text})
elif tag.get("year") is not None and tag.get("year") != "0":
date_parts = []
date_parts.append(tag.get("year"))
date_parts.append(tag.get("month").zfill(2) or "01")
date_parts.append(tag.get("day").zfill(2) or "01")
date_str = "-".join(date_parts)
metadata.add({tag_name: date_str})
os.unlink(temp_filename)
return metadata
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
| 0.555918 | 0.302404 |
__author__ = '<NAME>'
import argparse
from RouToolPa.Routines import MatplotlibRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
type=lambda s : s.split(","),
help="Comma separated list of two input files with data")
parser.add_argument("-d", "--index", action="store", dest="index",
type=lambda s: map(int, s.split(",")),
help="Zero based indexes of file columns to use. Default: all ")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
help="Prefix of output files")
parser.add_argument("-s", "--separator", action="store", dest="separator", default=None,
help="Separator between values in input file. Default - any space symbol")
parser.add_argument("-l", "--xlabel", action="store", dest="xlabel", type=lambda s: s.split(","),
help="Comma-separated list of X labels")
parser.add_argument("-y", "--ylabel", action="store", dest="ylabel", type=lambda s: s.split(","),
help="Comma-separated list of Y labels")
parser.add_argument("-b", "--number_of_bins", action="store", dest="number_of_bins",
type=lambda s: map(int, s.split(",")),
help="Comma-separated list of bin numbers in histograms. "
"Incompatible with -w/--width_of_bins option. Default - 30")
parser.add_argument("-w", "--width_of_bins", action="store", dest="width_of_bins",
type=lambda s: map(float, s.split(",")),
help="Comma-separated list of bin widths in histograms. "
"Incompatible with -b/--number_of_bins option. Not set by default")
parser.add_argument("-n", "--min_value", action="store", dest="min_value", type=lambda s: map(float, s.split(",")),
default=0,
help="Comma-separated list of minimum value to show. Default - 1")
parser.add_argument("-x", "--max_value", action="store", dest="max_value", type=lambda s: map(float, s.split(",")),
help="Comma-separated list of maximum value to show. Default - length of longest sequence")
parser.add_argument("-g", "--logbase", action="store", dest="logbase", type=int, default=10,
help="Logbase to use for log-scaled histograms")
parser.add_argument("-e", "--extensions", action="store", dest="extensions", type=lambda x: x.split(","),
default=["png"],
help="Comma-separated list of extensions for histogram files. Default: png only")
parser.add_argument("-t", "--title_list", action="store", dest="title_list", type=lambda s: s.split(","),
help="Comma-separated ist of two title for histograms")
parser.add_argument("-v", "--share_x_axis", action="store_true", dest="share_x_axis", default=False,
help="Share X axis in all histograms. Default: False")
parser.add_argument("-z", "--share_y_axis", action="store_true", dest="share_y_axis", default=False,
help="Share Y axis in all histograms. Default: False")
args = parser.parse_args()
if args.index is None:
args.index = [None for i in range(0, len(args.input))]
if args.max_value is None:
args.max_value = [None for i in range(0, len(args.input))]
MatplotlibRoutines.draw_tetra_histogram_with_two_logscaled_from_file(args.input, args.index, args.output_prefix,
figsize=(10, 10),
number_of_bins_list=args.number_of_bins,
width_of_bins_list=args.width_of_bins,
max_threshold_list=args.max_value,
min_threshold_list=args.min_value,
xlabel=args.xlabel, ylabel=args.ylabel,
title_list=args.title_list,
logbase=args.logbase, label_list=None,
extensions=args.extensions, suptitle=None,
separator=args.separator,
share_y_axis=args.share_y_axis,
share_x_axis=args.share_x_axis)
"""
Example:
~/Dropbox/MAVR/scripts/draw/draw_tetra_histogram_with_two_logscaled.py -i kirill.dn.ds.w.tab,solenodon.raw_alns.all.tab -d 3,3 -o dnds.ratio.log -l 'dN/dS' -y "Number of genes" -w 20 -n 0 -x 999 -t "11 species,4 species"
"""
"""
if (args.number_of_bins is not None) and (args.width_of_bins is not None):
raise AttributeError("Options -w/--width_of_bins and -b/--number_of_bins mustn't be set simultaneously")
lengths = np.fromfile(args.input_file, sep=args.separator)
max_len = max(lengths)
if args.max_length is None:
args.max_length = max_len
if (args.max_length != max_len) and (args.min_length != 1):
filtered = []
for entry in lengths:
if args.min_length <= entry <= args.max_length:
filtered.append(entry)
else:
filtered = lengths
figure = plt.figure(1, figsize=(6, 6))
subplot = plt.subplot(1, 1, 1)
if args.number_of_bins:
bins = args.number_of_bins
elif args.width_of_bins:
bins = np.arange(args.min_length, args.max_length, args.width_of_bins)
#print bins
#bins[0] += 1
bins = np.append(bins, [args.max_length])
else:
bins = 30
n, bins, patches = plt.hist(lengths, bins=bins)
bin_centers = (bins + ((bins[1] - bins[0])/2))[:-1]
#print bin_centers
#print len(n)
#print len(bin_centers)
plt.xlim(xmin=args.min_length, xmax=args.max_length)
if args.xlabel:
plt.xlabel(args.xlabel)
if args.ylabel:
plt.ylabel(args.ylabel)
if args.title:
plt.title(args.title)
for ext in args.extensions:
plt.savefig("%s.%s" % (args.output_prefix, ext))
subplot.set_yscale('log', basey=args.logbase)
#subplot.set_xscale('log', basex=args.logbase)
for ext in args.extensions:
plt.savefig("%s.logscale.%s" % (args.output_prefix, ext))
# save histo values
np.savetxt("%s.histo" % args.output_prefix, zip(bin_centers, n), fmt="%i\t%i")
"""
|
scripts/draw/draw_tetra_histogram_with_two_logscaled.py
|
__author__ = '<NAME>'
import argparse
from RouToolPa.Routines import MatplotlibRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
type=lambda s : s.split(","),
help="Comma separated list of two input files with data")
parser.add_argument("-d", "--index", action="store", dest="index",
type=lambda s: map(int, s.split(",")),
help="Zero based indexes of file columns to use. Default: all ")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
help="Prefix of output files")
parser.add_argument("-s", "--separator", action="store", dest="separator", default=None,
help="Separator between values in input file. Default - any space symbol")
parser.add_argument("-l", "--xlabel", action="store", dest="xlabel", type=lambda s: s.split(","),
help="Comma-separated list of X labels")
parser.add_argument("-y", "--ylabel", action="store", dest="ylabel", type=lambda s: s.split(","),
help="Comma-separated list of Y labels")
parser.add_argument("-b", "--number_of_bins", action="store", dest="number_of_bins",
type=lambda s: map(int, s.split(",")),
help="Comma-separated list of bin numbers in histograms. "
"Incompatible with -w/--width_of_bins option. Default - 30")
parser.add_argument("-w", "--width_of_bins", action="store", dest="width_of_bins",
type=lambda s: map(float, s.split(",")),
help="Comma-separated list of bin widths in histograms. "
"Incompatible with -b/--number_of_bins option. Not set by default")
parser.add_argument("-n", "--min_value", action="store", dest="min_value", type=lambda s: map(float, s.split(",")),
default=0,
help="Comma-separated list of minimum value to show. Default - 1")
parser.add_argument("-x", "--max_value", action="store", dest="max_value", type=lambda s: map(float, s.split(",")),
help="Comma-separated list of maximum value to show. Default - length of longest sequence")
parser.add_argument("-g", "--logbase", action="store", dest="logbase", type=int, default=10,
help="Logbase to use for log-scaled histograms")
parser.add_argument("-e", "--extensions", action="store", dest="extensions", type=lambda x: x.split(","),
default=["png"],
help="Comma-separated list of extensions for histogram files. Default: png only")
parser.add_argument("-t", "--title_list", action="store", dest="title_list", type=lambda s: s.split(","),
help="Comma-separated ist of two title for histograms")
parser.add_argument("-v", "--share_x_axis", action="store_true", dest="share_x_axis", default=False,
help="Share X axis in all histograms. Default: False")
parser.add_argument("-z", "--share_y_axis", action="store_true", dest="share_y_axis", default=False,
help="Share Y axis in all histograms. Default: False")
args = parser.parse_args()
if args.index is None:
args.index = [None for i in range(0, len(args.input))]
if args.max_value is None:
args.max_value = [None for i in range(0, len(args.input))]
MatplotlibRoutines.draw_tetra_histogram_with_two_logscaled_from_file(args.input, args.index, args.output_prefix,
figsize=(10, 10),
number_of_bins_list=args.number_of_bins,
width_of_bins_list=args.width_of_bins,
max_threshold_list=args.max_value,
min_threshold_list=args.min_value,
xlabel=args.xlabel, ylabel=args.ylabel,
title_list=args.title_list,
logbase=args.logbase, label_list=None,
extensions=args.extensions, suptitle=None,
separator=args.separator,
share_y_axis=args.share_y_axis,
share_x_axis=args.share_x_axis)
"""
Example:
~/Dropbox/MAVR/scripts/draw/draw_tetra_histogram_with_two_logscaled.py -i kirill.dn.ds.w.tab,solenodon.raw_alns.all.tab -d 3,3 -o dnds.ratio.log -l 'dN/dS' -y "Number of genes" -w 20 -n 0 -x 999 -t "11 species,4 species"
"""
"""
if (args.number_of_bins is not None) and (args.width_of_bins is not None):
raise AttributeError("Options -w/--width_of_bins and -b/--number_of_bins mustn't be set simultaneously")
lengths = np.fromfile(args.input_file, sep=args.separator)
max_len = max(lengths)
if args.max_length is None:
args.max_length = max_len
if (args.max_length != max_len) and (args.min_length != 1):
filtered = []
for entry in lengths:
if args.min_length <= entry <= args.max_length:
filtered.append(entry)
else:
filtered = lengths
figure = plt.figure(1, figsize=(6, 6))
subplot = plt.subplot(1, 1, 1)
if args.number_of_bins:
bins = args.number_of_bins
elif args.width_of_bins:
bins = np.arange(args.min_length, args.max_length, args.width_of_bins)
#print bins
#bins[0] += 1
bins = np.append(bins, [args.max_length])
else:
bins = 30
n, bins, patches = plt.hist(lengths, bins=bins)
bin_centers = (bins + ((bins[1] - bins[0])/2))[:-1]
#print bin_centers
#print len(n)
#print len(bin_centers)
plt.xlim(xmin=args.min_length, xmax=args.max_length)
if args.xlabel:
plt.xlabel(args.xlabel)
if args.ylabel:
plt.ylabel(args.ylabel)
if args.title:
plt.title(args.title)
for ext in args.extensions:
plt.savefig("%s.%s" % (args.output_prefix, ext))
subplot.set_yscale('log', basey=args.logbase)
#subplot.set_xscale('log', basex=args.logbase)
for ext in args.extensions:
plt.savefig("%s.logscale.%s" % (args.output_prefix, ext))
# save histo values
np.savetxt("%s.histo" % args.output_prefix, zip(bin_centers, n), fmt="%i\t%i")
"""
| 0.328206 | 0.171755 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class reverse_metric(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /isis-state/router-isis-config/reverse-metric. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IS-IS system level reverse-metric configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__reverse_metric_value','__rev_metric_whole_lan','__rev_metric_te_def_metric','__rev_metric_tlv_type',)
_yang_name = 'reverse-metric'
_rest_name = 'reverse-metric'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rev_metric_whole_lan = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__rev_metric_te_def_metric = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__rev_metric_tlv_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__reverse_metric_value = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'isis-state', u'router-isis-config', u'reverse-metric']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'isis-state', u'router-isis-config', u'reverse-metric']
def _get_reverse_metric_value(self):
"""
Getter method for reverse_metric_value, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/reverse_metric_value (uint32)
YANG Description: IS-IS reverse metric value
"""
return self.__reverse_metric_value
def _set_reverse_metric_value(self, v, load=False):
"""
Setter method for reverse_metric_value, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/reverse_metric_value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_value() directly.
YANG Description: IS-IS reverse metric value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reverse_metric_value must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__reverse_metric_value = t
if hasattr(self, '_set'):
self._set()
def _unset_reverse_metric_value(self):
self.__reverse_metric_value = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_rev_metric_whole_lan(self):
"""
Getter method for rev_metric_whole_lan, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_whole_lan (isis-status)
YANG Description: If IS-IS metric to be changed across whole LAN
"""
return self.__rev_metric_whole_lan
def _set_rev_metric_whole_lan(self, v, load=False):
"""
Setter method for rev_metric_whole_lan, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_whole_lan (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_whole_lan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_whole_lan() directly.
YANG Description: If IS-IS metric to be changed across whole LAN
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_whole_lan must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__rev_metric_whole_lan = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_whole_lan(self):
self.__rev_metric_whole_lan = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_rev_metric_te_def_metric(self):
"""
Getter method for rev_metric_te_def_metric, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_te_def_metric (isis-status)
YANG Description: If TE default-metric subtlv has to be updated
"""
return self.__rev_metric_te_def_metric
def _set_rev_metric_te_def_metric(self, v, load=False):
"""
Setter method for rev_metric_te_def_metric, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_te_def_metric (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_te_def_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_te_def_metric() directly.
YANG Description: If TE default-metric subtlv has to be updated
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_te_def_metric must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__rev_metric_te_def_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_te_def_metric(self):
self.__rev_metric_te_def_metric = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_rev_metric_tlv_type(self):
"""
Getter method for rev_metric_tlv_type, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_tlv_type (uint32)
YANG Description: IS-IS reverse metric TLV type
"""
return self.__rev_metric_tlv_type
def _set_rev_metric_tlv_type(self, v, load=False):
"""
Setter method for rev_metric_tlv_type, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_tlv_type (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_tlv_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_tlv_type() directly.
YANG Description: IS-IS reverse metric TLV type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_tlv_type must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__rev_metric_tlv_type = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_tlv_type(self):
self.__rev_metric_tlv_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
reverse_metric_value = __builtin__.property(_get_reverse_metric_value)
rev_metric_whole_lan = __builtin__.property(_get_rev_metric_whole_lan)
rev_metric_te_def_metric = __builtin__.property(_get_rev_metric_te_def_metric)
rev_metric_tlv_type = __builtin__.property(_get_rev_metric_tlv_type)
_pyangbind_elements = {'reverse_metric_value': reverse_metric_value, 'rev_metric_whole_lan': rev_metric_whole_lan, 'rev_metric_te_def_metric': rev_metric_te_def_metric, 'rev_metric_tlv_type': rev_metric_tlv_type, }
|
pybind/slxos/v16r_1_00b/isis_state/router_isis_config/reverse_metric/__init__.py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class reverse_metric(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /isis-state/router-isis-config/reverse-metric. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IS-IS system level reverse-metric configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__reverse_metric_value','__rev_metric_whole_lan','__rev_metric_te_def_metric','__rev_metric_tlv_type',)
_yang_name = 'reverse-metric'
_rest_name = 'reverse-metric'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rev_metric_whole_lan = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__rev_metric_te_def_metric = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__rev_metric_tlv_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__reverse_metric_value = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'isis-state', u'router-isis-config', u'reverse-metric']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'isis-state', u'router-isis-config', u'reverse-metric']
def _get_reverse_metric_value(self):
"""
Getter method for reverse_metric_value, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/reverse_metric_value (uint32)
YANG Description: IS-IS reverse metric value
"""
return self.__reverse_metric_value
def _set_reverse_metric_value(self, v, load=False):
"""
Setter method for reverse_metric_value, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/reverse_metric_value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_reverse_metric_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reverse_metric_value() directly.
YANG Description: IS-IS reverse metric value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """reverse_metric_value must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__reverse_metric_value = t
if hasattr(self, '_set'):
self._set()
def _unset_reverse_metric_value(self):
self.__reverse_metric_value = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="reverse-metric-value", rest_name="reverse-metric-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_rev_metric_whole_lan(self):
"""
Getter method for rev_metric_whole_lan, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_whole_lan (isis-status)
YANG Description: If IS-IS metric to be changed across whole LAN
"""
return self.__rev_metric_whole_lan
def _set_rev_metric_whole_lan(self, v, load=False):
"""
Setter method for rev_metric_whole_lan, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_whole_lan (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_whole_lan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_whole_lan() directly.
YANG Description: If IS-IS metric to be changed across whole LAN
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_whole_lan must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__rev_metric_whole_lan = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_whole_lan(self):
self.__rev_metric_whole_lan = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-whole-lan", rest_name="rev-metric-whole-lan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_rev_metric_te_def_metric(self):
"""
Getter method for rev_metric_te_def_metric, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_te_def_metric (isis-status)
YANG Description: If TE default-metric subtlv has to be updated
"""
return self.__rev_metric_te_def_metric
def _set_rev_metric_te_def_metric(self, v, load=False):
"""
Setter method for rev_metric_te_def_metric, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_te_def_metric (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_te_def_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_te_def_metric() directly.
YANG Description: If TE default-metric subtlv has to be updated
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_te_def_metric must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__rev_metric_te_def_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_te_def_metric(self):
self.__rev_metric_te_def_metric = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="rev-metric-te-def-metric", rest_name="rev-metric-te-def-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_rev_metric_tlv_type(self):
"""
Getter method for rev_metric_tlv_type, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_tlv_type (uint32)
YANG Description: IS-IS reverse metric TLV type
"""
return self.__rev_metric_tlv_type
def _set_rev_metric_tlv_type(self, v, load=False):
"""
Setter method for rev_metric_tlv_type, mapped from YANG variable /isis_state/router_isis_config/reverse_metric/rev_metric_tlv_type (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rev_metric_tlv_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rev_metric_tlv_type() directly.
YANG Description: IS-IS reverse metric TLV type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rev_metric_tlv_type must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__rev_metric_tlv_type = t
if hasattr(self, '_set'):
self._set()
def _unset_rev_metric_tlv_type(self):
self.__rev_metric_tlv_type = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rev-metric-tlv-type", rest_name="rev-metric-tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
reverse_metric_value = __builtin__.property(_get_reverse_metric_value)
rev_metric_whole_lan = __builtin__.property(_get_rev_metric_whole_lan)
rev_metric_te_def_metric = __builtin__.property(_get_rev_metric_te_def_metric)
rev_metric_tlv_type = __builtin__.property(_get_rev_metric_tlv_type)
_pyangbind_elements = {'reverse_metric_value': reverse_metric_value, 'rev_metric_whole_lan': rev_metric_whole_lan, 'rev_metric_te_def_metric': rev_metric_te_def_metric, 'rev_metric_tlv_type': rev_metric_tlv_type, }
| 0.680348 | 0.066448 |
from typing import Dict, List, Optional, Tuple, Union
import re
SYNTAX_ERROR = "SyntaxError"
RUNTIME_ERROR = "RuntimeError"
class Number:
def __init__(self, value: int) -> None:
self.value = value
def run(self, variables: Dict[str, int]) -> int:
return self.value
class Operator:
def __init__(self, left, right) -> None:
self.left = left
self.right = right
def add_num(self, variables: Dict[str, int]) -> None:
if self.left not in variables:
variables[self.left] = 0
if self.right not in variables:
variables[self.right] = 0
class And(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 and int(variables[self.right]) != 0 else 0
class Or(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 or int(variables[self.right]) != 0 else 0
class Nand(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if not (int(variables[self.left]) != 0 and int(variables[self.right]) != 0) else 0
class Add(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) + int(variables[self.right])
class Sub(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) - int(variables[self.right])
class Mul(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) * int(variables[self.right])
class Div(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(int(variables[self.left]) / int(variables[self.right]))
class Lt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) < int(variables[self.right]) else 0
class Gt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if variables[self.left] > variables[self.right] else 0
class Eq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) == int(variables[self.right]) else 0
class Leq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) <= int(variables[self.right]) else 0
class Geq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) >= int(variables[self.right]) else 0
class Call(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Union[int, str]:
func_to_run = [fun for fun in functions if fun.function_name == self.left]
if len(func_to_run) == 0:
return "NameError"
if len(func_to_run[0].arguments_names) != len(self.right):
return "TypeError"
arguments = []
for var in self.right:
if var not in variables:
variables[var] = 0
arguments.append(variables[var])
rt_val = func_to_run[0].run(arguments, functions)
return rt_val
built_in_func = {
"add": Add,
"sub": Sub,
"mul": Mul,
"div": Div,
"lt" : Lt,
"gt" : Gt,
"eq" : Eq,
"leq": Leq,
"geq": Geq,
"and": And,
"or" : Or,
"nand": Nand,
}
class Expression:
def __init__(self) -> None:
self.variable: Optional[str] = None
self.right: Optional[Operator] = None
self.code_line_num: Optional[int] = None
def check(self, functions: List['Function']) -> Optional[int]:
if isinstance(self.right, Call):
fun = [fun for fun in functions if fun.function_name == self.right.left and len(fun.arguments_names) == len(self.right.right)]
if len(fun) != 1:
return self.code_line_num
return None
def build(self, index: int, line: str, functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
left, right = line.split("=")
self.left = left.split()[0]
if not check_variable(self.left):
return (self.code_line_num, SYNTAX_ERROR)
right_parts = right.split()
if len(right_parts) == 0:
return (self.code_line_num, SYNTAX_ERROR)
elif len(right_parts) == 1 and right_parts[0].lstrip("-").isdigit():
self.right = Number(int(right_parts[0]))
elif len(right_parts) == 3 and right_parts[0] in built_in_func:
if not (check_variable(right_parts[1]) and check_variable(right_parts[2])):
return (self.code_line_num, SYNTAX_ERROR)
self.right = built_in_func[right_parts[0]](right_parts[1], right_parts[2])
else:
if right_parts[0] in built_in_func:
return (self.code_line_num, SYNTAX_ERROR)
for var in right_parts[1:]:
if not check_variable(var):
return (self.code_line_num, SYNTAX_ERROR)
self.right = Call(right_parts[0], right_parts[1:])
return index + 1
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
try:
if isinstance(self.right, Call):
rt_val = self.right.run(variables, functions)
else:
rt_val = self.right.run(variables)
except:
return (self.code_line_num, RUNTIME_ERROR)
if not isinstance(rt_val, int):
return (self.code_line_num, rt_val)
variables[self.left] = rt_val
class Scope:
def __init__(self) -> None:
self.objects_to_run: List[Union["Scope", "Operator", "Number"]] = []
def check_valid_line(self, line: str, pattern: str) -> bool:
return re.search(pattern, line) is not None or line.isspace() or line == ""
def check(self, functions: List['Function']) -> Optional[int]:
for object_to_run in self.objects_to_run:
rt_val = object_to_run.check(functions)
if rt_val is not None:
return rt_val
return None
def build_scope(self, index: int, code_lines: List[str],
functions: List['Function'], indent: str) \
-> Union[int, Tuple[int, str]]:
pattern = "^" + indent + "[a-zA-Z].*$"
while index < len(code_lines) and self.check_valid_line(code_lines[index], pattern):
line = code_lines[index]
if line.isspace() or line == "":
index += 1
continue
line_parts = line.split()
if len(line_parts) < 2:
return (index + 1, SYNTAX_ERROR)
elif len(line_parts) == 2:
if line_parts[0] != "if" and line_parts[1] != "while":
return (index + 1, SYNTAX_ERROR)
new_object = PredicateScope()
rt_val = new_object.build(index, code_lines, functions, indent + " ")
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
elif line_parts[1] == "=":
new_object = Expression()
rt_val = new_object.build(index, line, functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
else:
return (index + 1, SYNTAX_ERROR)
return index
class PredicateScope(Scope):
def __init__(self) -> None:
super().__init__()
self.predicate_var: Optional[str] = None
self.code_line_num: Optional[int] = None
self.type_scp: Optional[str] = None
def build(self, index: int, code_lines: List[str], functions: List['Function'], indent: str) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
line = code_lines[index]
line_parts = line.split()
self.type_scp = "if" if line_parts[0] == "if" else "while"
if not check_variable(line_parts[1]):
return (index + 1, SYNTAX_ERROR)
self.predicate_var = line_parts[1]
return self.build_scope(index + 1, code_lines, functions, indent)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
if self.predicate_var not in variables:
variables[self.predicate_var] = 0
return None
if self.type_scp == "if" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
while self.type_scp == "while" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
class Function(Scope):
def __init__(self) -> None:
super().__init__()
self.function_name: Optional[str] = None
self.arguments_names: List[str] = []
self.line_num: Optional[int] = None
def check_function_name(self, functions: List['Function']) -> bool:
if re.search("^[a-zA-Z][a-zA-Z0-9_]*$", self.function_name) is None:
return False
if self.function_name in built_in_func:
return False
if any(map(lambda func: func.function_name == self.function_name, functions)):
return False
return True
def build(self, index: int, code_lines: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.line_num = index + 1
line = code_lines[index]
header = line.split()
if len(header) < 2:
return (self.line_num, SYNTAX_ERROR)
_, self.function_name, *arguments = header
self.arguments_names = arguments
for var in self.arguments_names:
if not check_variable(var):
return (self.line_num, SYNTAX_ERROR)
if not self.check_function_name(functions):
return (self.line_num, SYNTAX_ERROR)
functions.append(self)
return self.build_scope(index + 1, code_lines, functions, " ")
def run(self, variables: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
if len(self.arguments_names) != len(variables):
return (self.line_num, RUNTIME_ERROR)
variables_dict = {}
variables_dict[self.function_name] = 0
for var, val in zip(self.arguments_names, variables):
variables_dict[var] = val
for line in self.objects_to_run:
rt_val = line.run(variables_dict, functions)
if rt_val is not None:
return rt_val
return variables_dict[self.function_name]
class Interpreter:
def __init__(self, code_str: str) -> None:
self.code_lines: List[str] = code_str.split("\n")
self.functions: List["Function"] = []
def func_check(self) -> Optional[int]:
for fun in self.functions:
rt_val = fun.check(self.functions)
if rt_val is not None:
return rt_val
return None
def build(self) -> Optional[Tuple[int, str]]:
index = 0
while index < len(self.code_lines):
line = self.code_lines[index]
if line.isspace() or line == "":
index += 1
elif line.startswith("def"):
new_function = Function()
rt_val = new_function.build(index, self.code_lines, self.functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
else:
return (index + 1, SYNTAX_ERROR)
rt_val = self.func_check()
if rt_val is not None:
return (rt_val, SYNTAX_ERROR)
def run(self, func_name: str, variables: List[str]) -> Union[int, Tuple[int, str]]:
function_to_run = [fun for fun in self.functions if fun.function_name == func_name]
if len(function_to_run) != 1:
return (0, RUNTIME_ERROR)
return function_to_run[0].run(variables, self.functions)
def check_variable(name: str) -> bool:
return re.search("^[a-zA-Z][a-zA-Z0-9_]*$", name) is not None
def do_rec(code: str, func_name: str, *args) -> Union[int, Tuple[int, str]]:
interpreter = Interpreter(code)
rt_val = interpreter.build()
if rt_val is not None:
return rt_val
return interpreter.run(func_name, args)
|
rec.py
|
from typing import Dict, List, Optional, Tuple, Union
import re
SYNTAX_ERROR = "SyntaxError"
RUNTIME_ERROR = "RuntimeError"
class Number:
def __init__(self, value: int) -> None:
self.value = value
def run(self, variables: Dict[str, int]) -> int:
return self.value
class Operator:
def __init__(self, left, right) -> None:
self.left = left
self.right = right
def add_num(self, variables: Dict[str, int]) -> None:
if self.left not in variables:
variables[self.left] = 0
if self.right not in variables:
variables[self.right] = 0
class And(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 and int(variables[self.right]) != 0 else 0
class Or(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 or int(variables[self.right]) != 0 else 0
class Nand(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if not (int(variables[self.left]) != 0 and int(variables[self.right]) != 0) else 0
class Add(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) + int(variables[self.right])
class Sub(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) - int(variables[self.right])
class Mul(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) * int(variables[self.right])
class Div(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(int(variables[self.left]) / int(variables[self.right]))
class Lt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) < int(variables[self.right]) else 0
class Gt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if variables[self.left] > variables[self.right] else 0
class Eq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) == int(variables[self.right]) else 0
class Leq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) <= int(variables[self.right]) else 0
class Geq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) >= int(variables[self.right]) else 0
class Call(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Union[int, str]:
func_to_run = [fun for fun in functions if fun.function_name == self.left]
if len(func_to_run) == 0:
return "NameError"
if len(func_to_run[0].arguments_names) != len(self.right):
return "TypeError"
arguments = []
for var in self.right:
if var not in variables:
variables[var] = 0
arguments.append(variables[var])
rt_val = func_to_run[0].run(arguments, functions)
return rt_val
built_in_func = {
"add": Add,
"sub": Sub,
"mul": Mul,
"div": Div,
"lt" : Lt,
"gt" : Gt,
"eq" : Eq,
"leq": Leq,
"geq": Geq,
"and": And,
"or" : Or,
"nand": Nand,
}
class Expression:
def __init__(self) -> None:
self.variable: Optional[str] = None
self.right: Optional[Operator] = None
self.code_line_num: Optional[int] = None
def check(self, functions: List['Function']) -> Optional[int]:
if isinstance(self.right, Call):
fun = [fun for fun in functions if fun.function_name == self.right.left and len(fun.arguments_names) == len(self.right.right)]
if len(fun) != 1:
return self.code_line_num
return None
def build(self, index: int, line: str, functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
left, right = line.split("=")
self.left = left.split()[0]
if not check_variable(self.left):
return (self.code_line_num, SYNTAX_ERROR)
right_parts = right.split()
if len(right_parts) == 0:
return (self.code_line_num, SYNTAX_ERROR)
elif len(right_parts) == 1 and right_parts[0].lstrip("-").isdigit():
self.right = Number(int(right_parts[0]))
elif len(right_parts) == 3 and right_parts[0] in built_in_func:
if not (check_variable(right_parts[1]) and check_variable(right_parts[2])):
return (self.code_line_num, SYNTAX_ERROR)
self.right = built_in_func[right_parts[0]](right_parts[1], right_parts[2])
else:
if right_parts[0] in built_in_func:
return (self.code_line_num, SYNTAX_ERROR)
for var in right_parts[1:]:
if not check_variable(var):
return (self.code_line_num, SYNTAX_ERROR)
self.right = Call(right_parts[0], right_parts[1:])
return index + 1
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
try:
if isinstance(self.right, Call):
rt_val = self.right.run(variables, functions)
else:
rt_val = self.right.run(variables)
except:
return (self.code_line_num, RUNTIME_ERROR)
if not isinstance(rt_val, int):
return (self.code_line_num, rt_val)
variables[self.left] = rt_val
class Scope:
def __init__(self) -> None:
self.objects_to_run: List[Union["Scope", "Operator", "Number"]] = []
def check_valid_line(self, line: str, pattern: str) -> bool:
return re.search(pattern, line) is not None or line.isspace() or line == ""
def check(self, functions: List['Function']) -> Optional[int]:
for object_to_run in self.objects_to_run:
rt_val = object_to_run.check(functions)
if rt_val is not None:
return rt_val
return None
def build_scope(self, index: int, code_lines: List[str],
functions: List['Function'], indent: str) \
-> Union[int, Tuple[int, str]]:
pattern = "^" + indent + "[a-zA-Z].*$"
while index < len(code_lines) and self.check_valid_line(code_lines[index], pattern):
line = code_lines[index]
if line.isspace() or line == "":
index += 1
continue
line_parts = line.split()
if len(line_parts) < 2:
return (index + 1, SYNTAX_ERROR)
elif len(line_parts) == 2:
if line_parts[0] != "if" and line_parts[1] != "while":
return (index + 1, SYNTAX_ERROR)
new_object = PredicateScope()
rt_val = new_object.build(index, code_lines, functions, indent + " ")
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
elif line_parts[1] == "=":
new_object = Expression()
rt_val = new_object.build(index, line, functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
else:
return (index + 1, SYNTAX_ERROR)
return index
class PredicateScope(Scope):
def __init__(self) -> None:
super().__init__()
self.predicate_var: Optional[str] = None
self.code_line_num: Optional[int] = None
self.type_scp: Optional[str] = None
def build(self, index: int, code_lines: List[str], functions: List['Function'], indent: str) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
line = code_lines[index]
line_parts = line.split()
self.type_scp = "if" if line_parts[0] == "if" else "while"
if not check_variable(line_parts[1]):
return (index + 1, SYNTAX_ERROR)
self.predicate_var = line_parts[1]
return self.build_scope(index + 1, code_lines, functions, indent)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
if self.predicate_var not in variables:
variables[self.predicate_var] = 0
return None
if self.type_scp == "if" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
while self.type_scp == "while" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
class Function(Scope):
def __init__(self) -> None:
super().__init__()
self.function_name: Optional[str] = None
self.arguments_names: List[str] = []
self.line_num: Optional[int] = None
def check_function_name(self, functions: List['Function']) -> bool:
if re.search("^[a-zA-Z][a-zA-Z0-9_]*$", self.function_name) is None:
return False
if self.function_name in built_in_func:
return False
if any(map(lambda func: func.function_name == self.function_name, functions)):
return False
return True
def build(self, index: int, code_lines: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.line_num = index + 1
line = code_lines[index]
header = line.split()
if len(header) < 2:
return (self.line_num, SYNTAX_ERROR)
_, self.function_name, *arguments = header
self.arguments_names = arguments
for var in self.arguments_names:
if not check_variable(var):
return (self.line_num, SYNTAX_ERROR)
if not self.check_function_name(functions):
return (self.line_num, SYNTAX_ERROR)
functions.append(self)
return self.build_scope(index + 1, code_lines, functions, " ")
def run(self, variables: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
if len(self.arguments_names) != len(variables):
return (self.line_num, RUNTIME_ERROR)
variables_dict = {}
variables_dict[self.function_name] = 0
for var, val in zip(self.arguments_names, variables):
variables_dict[var] = val
for line in self.objects_to_run:
rt_val = line.run(variables_dict, functions)
if rt_val is not None:
return rt_val
return variables_dict[self.function_name]
class Interpreter:
def __init__(self, code_str: str) -> None:
self.code_lines: List[str] = code_str.split("\n")
self.functions: List["Function"] = []
def func_check(self) -> Optional[int]:
for fun in self.functions:
rt_val = fun.check(self.functions)
if rt_val is not None:
return rt_val
return None
def build(self) -> Optional[Tuple[int, str]]:
index = 0
while index < len(self.code_lines):
line = self.code_lines[index]
if line.isspace() or line == "":
index += 1
elif line.startswith("def"):
new_function = Function()
rt_val = new_function.build(index, self.code_lines, self.functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
else:
return (index + 1, SYNTAX_ERROR)
rt_val = self.func_check()
if rt_val is not None:
return (rt_val, SYNTAX_ERROR)
def run(self, func_name: str, variables: List[str]) -> Union[int, Tuple[int, str]]:
function_to_run = [fun for fun in self.functions if fun.function_name == func_name]
if len(function_to_run) != 1:
return (0, RUNTIME_ERROR)
return function_to_run[0].run(variables, self.functions)
def check_variable(name: str) -> bool:
return re.search("^[a-zA-Z][a-zA-Z0-9_]*$", name) is not None
def do_rec(code: str, func_name: str, *args) -> Union[int, Tuple[int, str]]:
interpreter = Interpreter(code)
rt_val = interpreter.build()
if rt_val is not None:
return rt_val
return interpreter.run(func_name, args)
| 0.84916 | 0.419172 |
import numpy as np
from atom.api import (Enum, Str, set_default)
from exopy.tasks.api import SimpleTask, validators
ARR_VAL = validators.Feval(types=np.ndarray)
class ArrayExtremaTask(SimpleTask):
""" Store the pair(s) of index/value for the extrema(s) of an array.
Wait for any parallel operation before execution.
"""
#: Name of the target in the database.
target_array = Str().tag(pref=True, feval=ARR_VAL)
#: Name of the column into which the extrema should be looked for.
column_name = Str().tag(pref=True)
#: Flag indicating which extremum shiul be lookd for.
mode = Enum('Max', 'Min', 'Max & min').tag(pref=True)
database_entries = set_default({'max_ind': 0, 'max_value': 1.0})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Find extrema of database array and store index/value pairs.
"""
array = self.format_and_eval_string(self.target_array)
if self.column_name:
array = array[self.column_name]
if self.mode == 'Max' or self.mode == 'Max & min':
ind = np.argmax(array)
val = array[ind]
self.write_in_database('max_ind', ind)
self.write_in_database('max_value', val)
if self.mode == 'Min' or self.mode == 'Max & min':
ind = np.argmin(array)
val = array[ind]
self.write_in_database('min_ind', ind)
self.write_in_database('min_value', val)
def check(self, *args, **kwargs):
""" Check the target array can be found and has the right column.
"""
test, traceback = super(ArrayExtremaTask, self).check(*args, **kwargs)
if not test:
return test, traceback
array = self.format_and_eval_string(self.target_array)
err_path = self.get_error_path()
if self.column_name:
if array.dtype.names:
names = array.dtype.names
if self.column_name not in names:
msg = 'No column named {} in array. (column are : {})'
traceback[err_path] = msg.format(self.column_name, names)
return False, traceback
else:
traceback[err_path] = 'Array has no named columns'
return False, traceback
else:
if array.dtype.names:
msg = 'The target array has names columns : {}. Choose one'
traceback[err_path] = msg.format(array.dtype.names)
return False, traceback
elif len(array.shape) > 1:
msg = 'Must use 1d array when using non record arrays.'
traceback[err_path] = msg
return False, traceback
return test, traceback
def _post_setattr_mode(self, old, new):
""" Update the database entries according to the mode.
"""
if new == 'Max':
self.database_entries = {'max_ind': 0, 'max_value': 2.0}
elif new == 'Min':
self.database_entries = {'min_ind': 0, 'min_value': 1.0}
else:
self.database_entries = {'max_ind': 0, 'max_value': 2.0,
'min_ind': 0, 'min_value': 1.0}
class ArrayFindValueTask(SimpleTask):
""" Store the index of the first occurence of a value in an array.
Wait for any parallel operation before execution.
"""
#: Name of the target in the database.
target_array = Str().tag(pref=True, feval=ARR_VAL)
#: Name of the column into which the extrema should be looked for.
column_name = Str().tag(pref=True)
#: Value which should be looked for in the array.
value = Str().tag(pref=True, feval=validators.Feval())
database_entries = set_default({'index': 0})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Find index of value array and store index in database.
"""
array = self.format_and_eval_string(self.target_array)
if self.column_name:
array = array[self.column_name]
val = self.format_and_eval_string(self.value)
try:
ind = np.where(np.abs(array - val) < 1e-12)[0][0]
except IndexError as e:
msg = 'Could not find {} in array {} ({})'
raise ValueError(msg.format(val, self.target_array, array)) from e
self.write_in_database('index', ind)
def check(self, *args, **kwargs):
""" Check the target array can be found and has the right column.
"""
test, traceback = super(ArrayFindValueTask, self).check(*args,
**kwargs)
if not test:
return test, traceback
err_path = self.get_error_path()
array = self.format_and_eval_string(self.target_array)
if self.column_name:
if array.dtype.names:
names = array.dtype.names
if self.column_name not in names:
msg = 'No column named {} in array. (column are : {})'
traceback[err_path] = msg.format(self.column_name, names)
return False, traceback
else:
traceback[err_path] = 'Array has no named columns'
return False, traceback
else:
if array.dtype.names:
msg = 'The target array has names columns : {}. Choose one'
traceback[err_path] = msg.format(array.dtype.names)
return False, traceback
elif len(array.shape) > 1:
msg = 'Must use 1d array when using non record arrays.'
traceback[err_path] = msg
return False, traceback
return test, traceback
|
exopy_hqc_legacy/tasks/tasks/util/array_tasks.py
|
import numpy as np
from atom.api import (Enum, Str, set_default)
from exopy.tasks.api import SimpleTask, validators
ARR_VAL = validators.Feval(types=np.ndarray)
class ArrayExtremaTask(SimpleTask):
""" Store the pair(s) of index/value for the extrema(s) of an array.
Wait for any parallel operation before execution.
"""
#: Name of the target in the database.
target_array = Str().tag(pref=True, feval=ARR_VAL)
#: Name of the column into which the extrema should be looked for.
column_name = Str().tag(pref=True)
#: Flag indicating which extremum shiul be lookd for.
mode = Enum('Max', 'Min', 'Max & min').tag(pref=True)
database_entries = set_default({'max_ind': 0, 'max_value': 1.0})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Find extrema of database array and store index/value pairs.
"""
array = self.format_and_eval_string(self.target_array)
if self.column_name:
array = array[self.column_name]
if self.mode == 'Max' or self.mode == 'Max & min':
ind = np.argmax(array)
val = array[ind]
self.write_in_database('max_ind', ind)
self.write_in_database('max_value', val)
if self.mode == 'Min' or self.mode == 'Max & min':
ind = np.argmin(array)
val = array[ind]
self.write_in_database('min_ind', ind)
self.write_in_database('min_value', val)
def check(self, *args, **kwargs):
""" Check the target array can be found and has the right column.
"""
test, traceback = super(ArrayExtremaTask, self).check(*args, **kwargs)
if not test:
return test, traceback
array = self.format_and_eval_string(self.target_array)
err_path = self.get_error_path()
if self.column_name:
if array.dtype.names:
names = array.dtype.names
if self.column_name not in names:
msg = 'No column named {} in array. (column are : {})'
traceback[err_path] = msg.format(self.column_name, names)
return False, traceback
else:
traceback[err_path] = 'Array has no named columns'
return False, traceback
else:
if array.dtype.names:
msg = 'The target array has names columns : {}. Choose one'
traceback[err_path] = msg.format(array.dtype.names)
return False, traceback
elif len(array.shape) > 1:
msg = 'Must use 1d array when using non record arrays.'
traceback[err_path] = msg
return False, traceback
return test, traceback
def _post_setattr_mode(self, old, new):
""" Update the database entries according to the mode.
"""
if new == 'Max':
self.database_entries = {'max_ind': 0, 'max_value': 2.0}
elif new == 'Min':
self.database_entries = {'min_ind': 0, 'min_value': 1.0}
else:
self.database_entries = {'max_ind': 0, 'max_value': 2.0,
'min_ind': 0, 'min_value': 1.0}
class ArrayFindValueTask(SimpleTask):
""" Store the index of the first occurence of a value in an array.
Wait for any parallel operation before execution.
"""
#: Name of the target in the database.
target_array = Str().tag(pref=True, feval=ARR_VAL)
#: Name of the column into which the extrema should be looked for.
column_name = Str().tag(pref=True)
#: Value which should be looked for in the array.
value = Str().tag(pref=True, feval=validators.Feval())
database_entries = set_default({'index': 0})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Find index of value array and store index in database.
"""
array = self.format_and_eval_string(self.target_array)
if self.column_name:
array = array[self.column_name]
val = self.format_and_eval_string(self.value)
try:
ind = np.where(np.abs(array - val) < 1e-12)[0][0]
except IndexError as e:
msg = 'Could not find {} in array {} ({})'
raise ValueError(msg.format(val, self.target_array, array)) from e
self.write_in_database('index', ind)
def check(self, *args, **kwargs):
""" Check the target array can be found and has the right column.
"""
test, traceback = super(ArrayFindValueTask, self).check(*args,
**kwargs)
if not test:
return test, traceback
err_path = self.get_error_path()
array = self.format_and_eval_string(self.target_array)
if self.column_name:
if array.dtype.names:
names = array.dtype.names
if self.column_name not in names:
msg = 'No column named {} in array. (column are : {})'
traceback[err_path] = msg.format(self.column_name, names)
return False, traceback
else:
traceback[err_path] = 'Array has no named columns'
return False, traceback
else:
if array.dtype.names:
msg = 'The target array has names columns : {}. Choose one'
traceback[err_path] = msg.format(array.dtype.names)
return False, traceback
elif len(array.shape) > 1:
msg = 'Must use 1d array when using non record arrays.'
traceback[err_path] = msg
return False, traceback
return test, traceback
| 0.589598 | 0.442335 |
from dataclasses import dataclass, field
from typing import List
import pandas as pd
import xlwings as xw
import datetime
class PyMoiReader:
def read(self) -> pd.DataFrame:
pass
class CsvReader(PyMoiReader):
def __init__(self, fullname, delimiter=',', quotechar='"'):
self.fullname = fullname
self.delimiter = delimiter
self.quotechar = quotechar
def read(self):
df = pd.read_csv(self.fullname, delimiter=self.delimiter,
quotechar=self.quotechar)
return df
class ExcelReader(PyMoiReader):
def __init__(
self,
fullname,
seek_start: str,
names: list,
unit_row: int = 1,
sheetname: str = None,
):
self.fullname = fullname
self.seek_start = seek_start
self.unit_row = unit_row
self.sheetname = sheetname
self.names = names
self.parameters = []
self.count = 0
def read(self):
xw.App(visible=False)
self._wb = xw.Book(
self.fullname, read_only=True, ignore_read_only_recommended=True
)
# sheetnameが指定されていない場合は最初のシートを対象とする
self._sht = self._wb.sheets[self.sheetname if self.sheetname else 0]
# 読込み行数を取得
while self._sht.range(self.seek_start).offset(row_offset=self.count).value:
self.count += self.unit_row
buffer = []
for param in self.parameters:
# fixed, cell
if isinstance(param, StaticParameter):
if isinstance(param, FixedParameter):
ser = pd.Series([param.value] * self.count)
elif isinstance(param, CellParameter):
ser = pd.Series(
[self._sht.range(param.cell).value] * self.count)
buffer.append(ser)
# direction, repeat
elif isinstance(param, DynamicParameter):
for j in range(param.number):
# 始点セルと終点セルを取得
r1 = self._sht.range(
param.column +
str(self._sht.range(self.seek_start).row)
).offset(column_offset=j)
r2 = r1.offset(row_offset=self.count - 1)
ser = pd.Series(self._sht.range(r1, r2).value)
# Repeatの場合はnaを直前の値で埋める
if isinstance(param, RepeatParameter):
ser.ffill(inplace=True)
buffer.append(ser)
self._wb.close()
df = pd.DataFrame({k: v for k, v in zip(range(len(buffer)), buffer)})
df.columns = self.names
return df
@dataclass
class Parameter:
pass
@dataclass
class StaticParameter(Parameter):
pass
@dataclass
class DynamicParameter(Parameter):
pass
@dataclass
class FixedParameter(StaticParameter):
value: str
# TODO: システム日付と本日の違いは?
__reserved_params = {
"#システム日時": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
"#システム日付": datetime.datetime.today().strftime('%Y-%m-%d'),
"#本日": datetime.datetime.today().strftime('%Y-%m-%d'),
}
def __post_init__(self):
self.value = self.__reserved_params.get(self.value, self.value)
@dataclass
class CellParameter(StaticParameter):
cell: str
@dataclass
class DirectionParameter(DynamicParameter):
line: int
column: str
number: int
def __init__(self, line: int, column: str, number: int):
if line < 1:
raise ValueError(f"line must > 0 but {line}")
if number < 1:
raise ValueError(f'argument "number" must > 0 but {number}')
self.line = line
self.column = column
self.number = number
@dataclass
class RepeatParameter(DynamicParameter):
line: int
column: str
number: int
def __init__(self, line: int, column: str, number: int):
if line < 1:
raise ValueError
if number < 1:
raise ValueError
self.line = line
self.column = column
self.number = number
|
pymoi/reader.py
|
from dataclasses import dataclass, field
from typing import List
import pandas as pd
import xlwings as xw
import datetime
class PyMoiReader:
def read(self) -> pd.DataFrame:
pass
class CsvReader(PyMoiReader):
def __init__(self, fullname, delimiter=',', quotechar='"'):
self.fullname = fullname
self.delimiter = delimiter
self.quotechar = quotechar
def read(self):
df = pd.read_csv(self.fullname, delimiter=self.delimiter,
quotechar=self.quotechar)
return df
class ExcelReader(PyMoiReader):
def __init__(
self,
fullname,
seek_start: str,
names: list,
unit_row: int = 1,
sheetname: str = None,
):
self.fullname = fullname
self.seek_start = seek_start
self.unit_row = unit_row
self.sheetname = sheetname
self.names = names
self.parameters = []
self.count = 0
def read(self):
xw.App(visible=False)
self._wb = xw.Book(
self.fullname, read_only=True, ignore_read_only_recommended=True
)
# sheetnameが指定されていない場合は最初のシートを対象とする
self._sht = self._wb.sheets[self.sheetname if self.sheetname else 0]
# 読込み行数を取得
while self._sht.range(self.seek_start).offset(row_offset=self.count).value:
self.count += self.unit_row
buffer = []
for param in self.parameters:
# fixed, cell
if isinstance(param, StaticParameter):
if isinstance(param, FixedParameter):
ser = pd.Series([param.value] * self.count)
elif isinstance(param, CellParameter):
ser = pd.Series(
[self._sht.range(param.cell).value] * self.count)
buffer.append(ser)
# direction, repeat
elif isinstance(param, DynamicParameter):
for j in range(param.number):
# 始点セルと終点セルを取得
r1 = self._sht.range(
param.column +
str(self._sht.range(self.seek_start).row)
).offset(column_offset=j)
r2 = r1.offset(row_offset=self.count - 1)
ser = pd.Series(self._sht.range(r1, r2).value)
# Repeatの場合はnaを直前の値で埋める
if isinstance(param, RepeatParameter):
ser.ffill(inplace=True)
buffer.append(ser)
self._wb.close()
df = pd.DataFrame({k: v for k, v in zip(range(len(buffer)), buffer)})
df.columns = self.names
return df
@dataclass
class Parameter:
pass
@dataclass
class StaticParameter(Parameter):
pass
@dataclass
class DynamicParameter(Parameter):
pass
@dataclass
class FixedParameter(StaticParameter):
value: str
# TODO: システム日付と本日の違いは?
__reserved_params = {
"#システム日時": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
"#システム日付": datetime.datetime.today().strftime('%Y-%m-%d'),
"#本日": datetime.datetime.today().strftime('%Y-%m-%d'),
}
def __post_init__(self):
self.value = self.__reserved_params.get(self.value, self.value)
@dataclass
class CellParameter(StaticParameter):
cell: str
@dataclass
class DirectionParameter(DynamicParameter):
line: int
column: str
number: int
def __init__(self, line: int, column: str, number: int):
if line < 1:
raise ValueError(f"line must > 0 but {line}")
if number < 1:
raise ValueError(f'argument "number" must > 0 but {number}')
self.line = line
self.column = column
self.number = number
@dataclass
class RepeatParameter(DynamicParameter):
line: int
column: str
number: int
def __init__(self, line: int, column: str, number: int):
if line < 1:
raise ValueError
if number < 1:
raise ValueError
self.line = line
self.column = column
self.number = number
| 0.591841 | 0.226891 |
import datajoint as dj
schema = dj.Schema()
def activate(schema_name, create_schema=True, create_tables=True):
"""
activate(schema_name, create_schema=True, create_tables=True)
:param schema_name: schema name on the database server to activate the `lab` element
:param create_schema: when True (default), create schema in the database if it does not yet exist.
:param create_tables: when True (default), create tables in the database if they do not yet exist.
"""
schema.activate(schema_name, create_schema=create_schema, create_tables=create_tables)
@schema
class Lab(dj.Lookup):
definition = """
lab : varchar(24) # Abbreviated lab name
---
lab_name : varchar(255) # full lab name
institution : varchar(255)
address : varchar(255)
time_zone : varchar(64)
"""
@schema
class Location(dj.Lookup):
definition = """
# location of animal housing or experimental rigs
-> Lab
location : varchar(32)
---
location_description='' : varchar(255)
"""
@schema
class UserRole(dj.Lookup):
definition = """
user_role : varchar(16)
"""
@schema
class User(dj.Lookup):
definition = """
user : varchar(32)
---
user_email='' : varchar(128)
user_cellphone='' : varchar(32)
"""
@schema
class LabMembership(dj.Lookup):
definition = """
-> Lab
-> User
---
-> [nullable] UserRole
"""
@schema
class ProtocolType(dj.Lookup):
definition = """
protocol_type : varchar(32)
"""
@schema
class Protocol(dj.Lookup):
definition = """
# protocol approved by some institutions like IACUC, IRB
protocol : varchar(16)
---
-> ProtocolType
protocol_description='' : varchar(255)
"""
@schema
class Project(dj.Lookup):
definition = """
project : varchar(32)
---
project_description='' : varchar(1024)
"""
@schema
class ProjectUser(dj.Manual):
definition = """
-> Project
-> User
"""
@schema
class Source(dj.Lookup):
definition = """
# source or supplier of animals
source : varchar(32) # abbreviated source name
---
source_name : varchar(255)
contact_details='' : varchar(255)
source_description='' : varchar(255)
"""
|
element_lab/lab.py
|
import datajoint as dj
schema = dj.Schema()
def activate(schema_name, create_schema=True, create_tables=True):
"""
activate(schema_name, create_schema=True, create_tables=True)
:param schema_name: schema name on the database server to activate the `lab` element
:param create_schema: when True (default), create schema in the database if it does not yet exist.
:param create_tables: when True (default), create tables in the database if they do not yet exist.
"""
schema.activate(schema_name, create_schema=create_schema, create_tables=create_tables)
@schema
class Lab(dj.Lookup):
definition = """
lab : varchar(24) # Abbreviated lab name
---
lab_name : varchar(255) # full lab name
institution : varchar(255)
address : varchar(255)
time_zone : varchar(64)
"""
@schema
class Location(dj.Lookup):
definition = """
# location of animal housing or experimental rigs
-> Lab
location : varchar(32)
---
location_description='' : varchar(255)
"""
@schema
class UserRole(dj.Lookup):
definition = """
user_role : varchar(16)
"""
@schema
class User(dj.Lookup):
definition = """
user : varchar(32)
---
user_email='' : varchar(128)
user_cellphone='' : varchar(32)
"""
@schema
class LabMembership(dj.Lookup):
definition = """
-> Lab
-> User
---
-> [nullable] UserRole
"""
@schema
class ProtocolType(dj.Lookup):
definition = """
protocol_type : varchar(32)
"""
@schema
class Protocol(dj.Lookup):
definition = """
# protocol approved by some institutions like IACUC, IRB
protocol : varchar(16)
---
-> ProtocolType
protocol_description='' : varchar(255)
"""
@schema
class Project(dj.Lookup):
definition = """
project : varchar(32)
---
project_description='' : varchar(1024)
"""
@schema
class ProjectUser(dj.Manual):
definition = """
-> Project
-> User
"""
@schema
class Source(dj.Lookup):
definition = """
# source or supplier of animals
source : varchar(32) # abbreviated source name
---
source_name : varchar(255)
contact_details='' : varchar(255)
source_description='' : varchar(255)
"""
| 0.552057 | 0.365174 |
from flask import Flask, request, render_template
import pandas as pd
import pickle
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import re
app = Flask(__name__, template_folder="templates")
# Load the model
model = joblib.load('./models/model.p')
scaler = joblib.load('./models/scaler.p')
@app.route('/', methods=['GET','POST'])
def home():
if request.method == 'POST':
principal_balance = request.form.get('principal_balance')
principal_ptd = request.form.get('principal_ptd')
down = request.form.get('down')
NADA = request.form.get('NADA')
finance_charge = request.form.get('finance_charge')
term = request.form.get('term')
seasoning = request.form.get('seasoning')
DPD = request.form.get('DPD')
text = request.form.get('text')
data = [principal_balance,principal_ptd,down,NADA,finance_charge,term,seasoning,DPD,text]
input = pd.DataFrame([data],
columns=['principal_balance', 'principal_ptd', 'down', 'NADA', 'finance_charge','term','seasoning', 'DPD', 'content'])
input['LTV'] = float(input.principal_balance)/float(input.NADA)
input['WLTV'] = input.LTV*float(input.principal_balance)
input['down_%'] = float(input.down)/(float(input.principal_balance)+float(input.principal_ptd))
print(input)
df = input[['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV']]
# df = pd.DataFrame(input.content.str.split().tolist(), columns=['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV'])
# Make prediction
print(df)
rescaled_df = scaler.transform(df)
pred = model.predict(rescaled_df)
pred = np.round(pred, decimals=2)
#print(preds)
#pred = pred*100
print(pred)
return render_template('index.html', value=pred[0])
return render_template('index.html', value='')
if __name__ == '__main__':
app.run(port=3000, debug=True)
|
Deploying your ML Model/WebProject1/website.py
|
from flask import Flask, request, render_template
import pandas as pd
import pickle
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import re
app = Flask(__name__, template_folder="templates")
# Load the model
model = joblib.load('./models/model.p')
scaler = joblib.load('./models/scaler.p')
@app.route('/', methods=['GET','POST'])
def home():
if request.method == 'POST':
principal_balance = request.form.get('principal_balance')
principal_ptd = request.form.get('principal_ptd')
down = request.form.get('down')
NADA = request.form.get('NADA')
finance_charge = request.form.get('finance_charge')
term = request.form.get('term')
seasoning = request.form.get('seasoning')
DPD = request.form.get('DPD')
text = request.form.get('text')
data = [principal_balance,principal_ptd,down,NADA,finance_charge,term,seasoning,DPD,text]
input = pd.DataFrame([data],
columns=['principal_balance', 'principal_ptd', 'down', 'NADA', 'finance_charge','term','seasoning', 'DPD', 'content'])
input['LTV'] = float(input.principal_balance)/float(input.NADA)
input['WLTV'] = input.LTV*float(input.principal_balance)
input['down_%'] = float(input.down)/(float(input.principal_balance)+float(input.principal_ptd))
print(input)
df = input[['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV']]
# df = pd.DataFrame(input.content.str.split().tolist(), columns=['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV'])
# Make prediction
print(df)
rescaled_df = scaler.transform(df)
pred = model.predict(rescaled_df)
pred = np.round(pred, decimals=2)
#print(preds)
#pred = pred*100
print(pred)
return render_template('index.html', value=pred[0])
return render_template('index.html', value='')
if __name__ == '__main__':
app.run(port=3000, debug=True)
| 0.491212 | 0.075892 |
import pytz
from custom_components.hass_opensprinkler import CONF_CONFIG, CONF_STATIONS, DOMAIN
from datetime import datetime, timedelta
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
SCAN_INTERVAL = timedelta(seconds=5)
utc_tz = pytz.timezone('UTC')
def setup_platform(hass, config, add_devices, discovery_info=None):
opensprinkler = hass.data[DOMAIN][DOMAIN]
opensprinklerConfig = hass.data[DOMAIN][CONF_CONFIG]
stationIndexes = opensprinklerConfig[CONF_STATIONS] or []
sensors = []
for station in opensprinkler.stations():
if len(stationIndexes) == 0 or (station.index in stationIndexes):
sensors.append(StationSensor(station))
sensors.append(WaterLevelSensor(opensprinkler))
sensors.append(LastRunSensor(opensprinkler))
sensors.append(RainDelayStopTimeSensor(opensprinkler))
add_devices(sensors, True)
class StationSensor(Entity):
def __init__(self, station):
self._station = station
self._state = None
self._status = None
self._p_status = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._station.name
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._status = self._station.status()
self._p_status = self._station.p_status()
if(self._status==1):
if(self._p_status[0]==99):
self._state = "Running manual"
elif(self._p_status[0]==254):
self._state = "Running once prog."
elif(self._p_status[0]==0):
self._state = "Idle"
else:
self._state = "Running schedule"
else:
if(self._p_status[0]>0):
self._state = "Waiting for run"
else:
self._state = "Idle"
class WaterLevelSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Water Level"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return "%"
@property
def icon(self):
"""Return icon."""
return "mdi:water"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._state = self._opensprinkler.water_level()
class LastRunSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
self._last_run = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Last Run"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def icon(self):
"""Return icon."""
return "mdi:history"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._last_run = self._opensprinkler.last_run()
utcTime = datetime.fromtimestamp(self._last_run[3], utc_tz)
self._state = utcTime.strftime("%d/%m %H:%M")
class RainDelayStopTimeSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
self._rain_delay_stop_time = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Rain Delay Stop Time"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def icon(self):
"""Return icon."""
return "mdi:weather-rainy"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._rain_delay_stop_time = self._opensprinkler.rain_delay_stop_time()
if self._rain_delay_stop_time == 0:
self._state = 'Not in effect'
else:
utcTime = datetime.fromtimestamp(self._rain_delay_stop_time, utc_tz)
self._state = utcTime.strftime("%d/%m %H:%M")
|
hass_opensprinkler/sensor.py
|
import pytz
from custom_components.hass_opensprinkler import CONF_CONFIG, CONF_STATIONS, DOMAIN
from datetime import datetime, timedelta
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
SCAN_INTERVAL = timedelta(seconds=5)
utc_tz = pytz.timezone('UTC')
def setup_platform(hass, config, add_devices, discovery_info=None):
opensprinkler = hass.data[DOMAIN][DOMAIN]
opensprinklerConfig = hass.data[DOMAIN][CONF_CONFIG]
stationIndexes = opensprinklerConfig[CONF_STATIONS] or []
sensors = []
for station in opensprinkler.stations():
if len(stationIndexes) == 0 or (station.index in stationIndexes):
sensors.append(StationSensor(station))
sensors.append(WaterLevelSensor(opensprinkler))
sensors.append(LastRunSensor(opensprinkler))
sensors.append(RainDelayStopTimeSensor(opensprinkler))
add_devices(sensors, True)
class StationSensor(Entity):
def __init__(self, station):
self._station = station
self._state = None
self._status = None
self._p_status = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._station.name
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._status = self._station.status()
self._p_status = self._station.p_status()
if(self._status==1):
if(self._p_status[0]==99):
self._state = "Running manual"
elif(self._p_status[0]==254):
self._state = "Running once prog."
elif(self._p_status[0]==0):
self._state = "Idle"
else:
self._state = "Running schedule"
else:
if(self._p_status[0]>0):
self._state = "Waiting for run"
else:
self._state = "Idle"
class WaterLevelSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Water Level"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return "%"
@property
def icon(self):
"""Return icon."""
return "mdi:water"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._state = self._opensprinkler.water_level()
class LastRunSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
self._last_run = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Last Run"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def icon(self):
"""Return icon."""
return "mdi:history"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._last_run = self._opensprinkler.last_run()
utcTime = datetime.fromtimestamp(self._last_run[3], utc_tz)
self._state = utcTime.strftime("%d/%m %H:%M")
class RainDelayStopTimeSensor(Entity):
def __init__(self, opensprinkler):
self._opensprinkler = opensprinkler
self._state = None
self._rain_delay_stop_time = None
@property
def name(self):
"""Return the name of the binary sensor."""
return "Rain Delay Stop Time"
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return None
@property
def icon(self):
"""Return icon."""
return "mdi:weather-rainy"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@Throttle(SCAN_INTERVAL)
def update(self):
"""Fetch new state data for the sensor."""
self._rain_delay_stop_time = self._opensprinkler.rain_delay_stop_time()
if self._rain_delay_stop_time == 0:
self._state = 'Not in effect'
else:
utcTime = datetime.fromtimestamp(self._rain_delay_stop_time, utc_tz)
self._state = utcTime.strftime("%d/%m %H:%M")
| 0.722723 | 0.144149 |
import dao.PersonDAO as PersonDAO
import manager.PermissionManager as PermissionManager
def get_all():
get_first_name_last_name(None, None)
def get_first_name(first_name):
get_first_name_last_name(first_name, None)
def get_last_name(last_name):
get_first_name_last_name(None, last_name)
def get_first_name_last_name(first_name, last_name):
results = PersonDAO.get_all(first_name, last_name)
print(format_person_header())
for person in results:
print(format_person(person))
def get_id_username(person_id):
result = PersonDAO.get_id_or_username(person_id)
if result:
print(format_person_header())
print(format_person(result))
else:
print("User with '" + person_id + "' as ID or username not found!")
def login(username, password):
user = PersonDAO.get_username_password(username, password)
if user:
print("Welcome " + user['first_name'] + " " + user['last_name'])
return user
else:
print("Invalid username or password")
return None
def add_person(person):
if PermissionManager.has_permission_add_user(person):
print('''You will be guided to enter the new user's data, if you want to cancel type /cancel''')
first_name = input("First name >> ")
if first_name != '/cancel':
last_name = input("Last name >> ")
if last_name != '/cancel':
username = input("Username used to login >> ")
if username != '/cancel':
password = input("Password used to login >> ")
if password != '/cancel':
print("User data"
"\n========================"
"\nFirst name: " + first_name,
"\nLast name: " + last_name,
"\nUsername: " + username,
"\nPassword: " + password)
answer = input("Do you want to add user with data above (yes/no) >> ")
while answer != "yes" and answer != "no":
answer = input("Unrecognized '"
+ answer + "'\nDo you want to add user with data above (yes/no) >> ")
if answer == "yes":
return PersonDAO.add_person(first_name, last_name, username, password)
else:
print("Forbidden. You are missing necessary permissions.")
return False
def format_person_header():
return \
" ID| First name | Last name | Username \n" \
"-----+---------------------+--------------------------+----------------"
def format_person(person):
return u"{0:5}| {1:20}| {2:25}| {3:15}".format(
person['id'],
person['first_name'],
person['last_name'],
person['username'])
|
manager/PersonManager.py
|
import dao.PersonDAO as PersonDAO
import manager.PermissionManager as PermissionManager
def get_all():
get_first_name_last_name(None, None)
def get_first_name(first_name):
get_first_name_last_name(first_name, None)
def get_last_name(last_name):
get_first_name_last_name(None, last_name)
def get_first_name_last_name(first_name, last_name):
results = PersonDAO.get_all(first_name, last_name)
print(format_person_header())
for person in results:
print(format_person(person))
def get_id_username(person_id):
result = PersonDAO.get_id_or_username(person_id)
if result:
print(format_person_header())
print(format_person(result))
else:
print("User with '" + person_id + "' as ID or username not found!")
def login(username, password):
user = PersonDAO.get_username_password(username, password)
if user:
print("Welcome " + user['first_name'] + " " + user['last_name'])
return user
else:
print("Invalid username or password")
return None
def add_person(person):
if PermissionManager.has_permission_add_user(person):
print('''You will be guided to enter the new user's data, if you want to cancel type /cancel''')
first_name = input("First name >> ")
if first_name != '/cancel':
last_name = input("Last name >> ")
if last_name != '/cancel':
username = input("Username used to login >> ")
if username != '/cancel':
password = input("Password used to login >> ")
if password != '/cancel':
print("User data"
"\n========================"
"\nFirst name: " + first_name,
"\nLast name: " + last_name,
"\nUsername: " + username,
"\nPassword: " + password)
answer = input("Do you want to add user with data above (yes/no) >> ")
while answer != "yes" and answer != "no":
answer = input("Unrecognized '"
+ answer + "'\nDo you want to add user with data above (yes/no) >> ")
if answer == "yes":
return PersonDAO.add_person(first_name, last_name, username, password)
else:
print("Forbidden. You are missing necessary permissions.")
return False
def format_person_header():
return \
" ID| First name | Last name | Username \n" \
"-----+---------------------+--------------------------+----------------"
def format_person(person):
return u"{0:5}| {1:20}| {2:25}| {3:15}".format(
person['id'],
person['first_name'],
person['last_name'],
person['username'])
| 0.265024 | 0.105487 |
from abc import abstractmethod
from typing import (
List,
Union,
Dict,
Tuple
)
from mockintosh.constants import PYBARS, JINJA
from mockintosh.performance import PerformanceProfile
from mockintosh.exceptions import (
CommaInTagIsForbidden
)
from mockintosh.templating import TemplateRenderer
class ConfigService:
services = []
def __init__(
self,
_type: str,
name: Union[str, None],
internal_service_id: Union[int, None]
):
self.type = _type
self.name = name
self.external_file_paths = []
self._impl = None
if internal_service_id is None:
self.internal_service_id = len(ConfigService.services)
ConfigService.services.append(self)
else:
self.internal_service_id = internal_service_id
ConfigService.services[internal_service_id] = self
def get_name(self) -> str:
return self.name if self.name is not None else ''
@abstractmethod
def get_hint(self):
raise NotImplementedError
def add_external_file_path(self, external_file_path) -> None:
self.external_file_paths.append(external_file_path)
def destroy(self) -> None:
for external_file_path in self.external_file_paths:
external_file_path.destroy()
class ConfigContainsTag:
def forbid_comma_in_tag(self, data: list):
for row in data:
if isinstance(row, (str, ConfigExternalFilePath)):
return
elif isinstance(row, dict):
for key, value in row.items():
if key != 'tag':
continue
if ',' in value: # pragma: no cover
raise CommaInTagIsForbidden(value)
else:
if row.tag is not None and ',' in row.tag:
raise CommaInTagIsForbidden(row.tag)
class ConfigExternalFilePath:
files = []
def __init__(self, path: str, service: ConfigService = None):
self.path = path
self._index = len(ConfigExternalFilePath.files)
ConfigExternalFilePath.files.append(self)
if service is not None:
service.add_external_file_path(self)
def destroy(self) -> None:
ConfigExternalFilePath.files.pop(self._index)
for i, external_file_path in enumerate(ConfigExternalFilePath.files):
external_file_path._index = i
class ConfigDataset(ConfigContainsTag):
def __init__(self, payload: Union[List[dict], str, ConfigExternalFilePath]):
self.payload = payload
if isinstance(self.payload, list):
self.forbid_comma_in_tag(self.payload)
class ConfigSchema:
def __init__(self, payload: Union[dict, ConfigExternalFilePath]):
self.payload = payload
class ConfigHeaders:
def __init__(self, payload: Dict[str, Union[str, List[str], ConfigExternalFilePath]]):
self.payload = payload
class ConfigAmqpProperties:
def __init__(
self,
content_type=None,
content_encoding=None,
delivery_mode=None,
priority=None,
correlation_id=None,
reply_to=None,
expiration=None,
message_id=None,
timestamp=None,
_type=None,
user_id=None,
app_id=None,
cluster_id=None
):
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode = delivery_mode
self.priority = priority
self.correlation_id = correlation_id
self.reply_to = reply_to
self.expiration = expiration
self.message_id = message_id
self.timestamp = timestamp
self.type = _type
self.user_id = user_id
self.app_id = app_id
self.cluster_id = cluster_id
class ConfigConsume:
def __init__(
self,
queue: str,
group: Union[str, None] = None,
key: Union[str, None] = None,
schema: Union[ConfigSchema, None] = None,
value: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None,
capture: int = 1
):
self.queue = queue
self.group = group
self.key = key
self.schema = schema
self.value = value
self.headers = headers
self.amqp_properties = amqp_properties
self.capture = capture
class ConfigProduce:
def __init__(
self,
queue: str,
value: Union[str, ConfigExternalFilePath],
create: bool = False,
tag: Union[str, None] = None,
key: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None
):
self.queue = queue
self.value = value
self.create = create
self.tag = tag
self.key = key
self.headers = headers
self.amqp_properties = amqp_properties
class ConfigMultiProduce:
def __init__(self, produce_list: List[ConfigProduce]):
self.produce_list = produce_list
class ConfigActor:
def __init__(
self,
name: Union[str, None] = None,
dataset: Union[ConfigDataset, None] = None,
produce: Union[ConfigMultiProduce, ConfigProduce, None] = None,
consume: Union[ConfigConsume, None] = None,
delay: Union[int, float, None] = None,
limit: Union[int, None] = None,
multi_payloads_looped: bool = True,
dataset_looped: bool = True,
):
self.name = name
self.dataset = dataset
self.produce = produce
self.consume = consume
self.delay = delay
self.limit = limit
self.multi_payloads_looped = multi_payloads_looped
self.dataset_looped = dataset_looped
class ConfigAsyncService(ConfigService):
services = []
def __init__(
self,
_type: str,
address: Union[str, None] = None,
actors: List[ConfigActor] = [],
name: Union[str, None] = None,
ssl: bool = False,
internal_service_id: Union[int, None] = None
):
super().__init__(_type, name, internal_service_id)
ConfigAsyncService.services.append(self)
self.type = _type
self.address = address
self.actors = actors
self.ssl = ssl
def get_hint(self):
return '%s://%s' % (self.type, self.address) if self.name is None else self.name
def address_template_renderer(
self,
template_engine: str,
rendering_queue,
) -> Tuple[str, dict]:
if template_engine == PYBARS:
from mockintosh.hbs.methods import env
elif template_engine == JINJA:
from mockintosh.j2.methods import env
renderer = TemplateRenderer()
self.address, _ = renderer.render(
template_engine,
self.address,
rendering_queue,
inject_methods=[
env
]
)
class ConfigResponse:
def __init__(
self,
headers: Union[ConfigHeaders, None] = None,
status: Union[str, int] = 200,
body: Union[str, ConfigExternalFilePath, None] = None,
use_templating: bool = True,
templating_engine: str = PYBARS,
tag: Union[str, None] = None,
trigger_async_producer: Union[str, int, None] = None
):
self.headers = headers
self.status = status
self.body = body
self.use_templating = use_templating
self.templating_engine = templating_engine
self.tag = tag
self.trigger_async_producer = trigger_async_producer
def oas(self, status_data: dict):
new_headers = {k.title(): v for k, v in self.headers.payload.items()}
if 'Content-Type' in new_headers:
if new_headers['Content-Type'].startswith('application/json'):
status_data = {
'content': {
'application/json': {
'schema': {}
}
}
}
status_data['headers'] = {}
for key in new_headers.keys():
status_data['headers'][key] = {
'schema': {
'type': 'string'
}
}
class ConfigMultiResponse(ConfigContainsTag):
def __init__(self, payload: List[Union[ConfigResponse, ConfigExternalFilePath, str]]):
self.payload = payload
self.forbid_comma_in_tag(self.payload)
class ConfigBody:
def __init__(
self,
schema: ConfigSchema = None,
text: Union[str, None] = None,
graphql_query: Union[str, ConfigExternalFilePath, None] = None,
graphql_variables: Dict[str, str] = None,
urlencoded: Dict[str, str] = None,
multipart: Dict[str, str] = None
):
self.schema = schema
self.text = text
self.urlencoded = urlencoded
self.multipart = multipart
self.graphql_query = graphql_query
self.graphql_variables = graphql_variables
class ConfigEndpoint:
def __init__(
self,
path: str,
_id: Union[str, None] = None,
comment: Union[str, None] = None,
method: str = 'GET',
query_string: Dict[str, str] = {},
headers: Dict[str, str] = {},
body: Union[ConfigBody, None] = None,
dataset: Union[ConfigDataset, None] = None,
response: Union[ConfigResponse, ConfigExternalFilePath, str, ConfigMultiResponse, None] = None,
multi_responses_looped: bool = True,
dataset_looped: bool = True,
performance_profile: Union[str, None] = None
):
self.path = path
self.id = _id
self.comment = comment
self.method = method.upper()
self.query_string = query_string
self.headers = headers
self.body = body
self.dataset = dataset
self.response = response
self.multi_responses_looped = multi_responses_looped
self.dataset_looped = dataset_looped
self.performance_profile = performance_profile
class ConfigHttpService(ConfigService):
def __init__(
self,
port: int,
name: Union[str, None] = None,
hostname: Union[str, None] = None,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None,
management_root: Union[str, None] = None,
oas: Union[str, ConfigExternalFilePath, None] = None,
endpoints: List[ConfigEndpoint] = [],
performance_profile: Union[str, None] = None,
fallback_to: Union[str, None] = None,
internal_service_id: Union[int, None] = None
):
super().__init__('http', name, internal_service_id)
self.port = port
self.hostname = hostname
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
self.management_root = management_root
self.oas = oas
self.endpoints = endpoints
self.performance_profile = performance_profile
self.fallback_to = fallback_to
def get_hint(self):
return '%s://%s:%s%s' % (
'https' if self.ssl else 'http',
self.hostname if self.hostname is not None else (
'localhost'
),
self.port,
' - %s' % self.name if self.name is not None else ''
)
class ConfigGlobals:
def __init__(
self,
headers: Union[ConfigHeaders, None],
performance_profile: Union[str, None] = None
):
self.headers = headers
self.performance_profile = performance_profile
class ConfigManagement:
def __init__(
self,
port: str,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None
):
self.port = port
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
class ConfigPerformanceProfile:
def __init__(
self,
ratio: Union[int, float],
delay: Union[int, float] = 0.0,
faults: Union[dict, None] = None
):
self.ratio = ratio
self.delay = delay
self.faults = {} if faults is None else faults
self.actuator = PerformanceProfile(
self.ratio,
delay=self.delay,
faults=self.faults
)
class ConfigRoot:
def __init__(
self,
services: List[Union[ConfigHttpService, ConfigAsyncService]],
management: Union[ConfigManagement, None] = None,
templating_engine: str = PYBARS,
_globals: Union[ConfigGlobals, None] = None,
performance_profiles: Dict[str, ConfigPerformanceProfile] = {}
):
self.services = services
self.management = management
self.templating_engine = templating_engine
self.globals = _globals
self.performance_profiles = performance_profiles
|
mockintosh/config.py
|
from abc import abstractmethod
from typing import (
List,
Union,
Dict,
Tuple
)
from mockintosh.constants import PYBARS, JINJA
from mockintosh.performance import PerformanceProfile
from mockintosh.exceptions import (
CommaInTagIsForbidden
)
from mockintosh.templating import TemplateRenderer
class ConfigService:
services = []
def __init__(
self,
_type: str,
name: Union[str, None],
internal_service_id: Union[int, None]
):
self.type = _type
self.name = name
self.external_file_paths = []
self._impl = None
if internal_service_id is None:
self.internal_service_id = len(ConfigService.services)
ConfigService.services.append(self)
else:
self.internal_service_id = internal_service_id
ConfigService.services[internal_service_id] = self
def get_name(self) -> str:
return self.name if self.name is not None else ''
@abstractmethod
def get_hint(self):
raise NotImplementedError
def add_external_file_path(self, external_file_path) -> None:
self.external_file_paths.append(external_file_path)
def destroy(self) -> None:
for external_file_path in self.external_file_paths:
external_file_path.destroy()
class ConfigContainsTag:
def forbid_comma_in_tag(self, data: list):
for row in data:
if isinstance(row, (str, ConfigExternalFilePath)):
return
elif isinstance(row, dict):
for key, value in row.items():
if key != 'tag':
continue
if ',' in value: # pragma: no cover
raise CommaInTagIsForbidden(value)
else:
if row.tag is not None and ',' in row.tag:
raise CommaInTagIsForbidden(row.tag)
class ConfigExternalFilePath:
files = []
def __init__(self, path: str, service: ConfigService = None):
self.path = path
self._index = len(ConfigExternalFilePath.files)
ConfigExternalFilePath.files.append(self)
if service is not None:
service.add_external_file_path(self)
def destroy(self) -> None:
ConfigExternalFilePath.files.pop(self._index)
for i, external_file_path in enumerate(ConfigExternalFilePath.files):
external_file_path._index = i
class ConfigDataset(ConfigContainsTag):
def __init__(self, payload: Union[List[dict], str, ConfigExternalFilePath]):
self.payload = payload
if isinstance(self.payload, list):
self.forbid_comma_in_tag(self.payload)
class ConfigSchema:
def __init__(self, payload: Union[dict, ConfigExternalFilePath]):
self.payload = payload
class ConfigHeaders:
def __init__(self, payload: Dict[str, Union[str, List[str], ConfigExternalFilePath]]):
self.payload = payload
class ConfigAmqpProperties:
def __init__(
self,
content_type=None,
content_encoding=None,
delivery_mode=None,
priority=None,
correlation_id=None,
reply_to=None,
expiration=None,
message_id=None,
timestamp=None,
_type=None,
user_id=None,
app_id=None,
cluster_id=None
):
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode = delivery_mode
self.priority = priority
self.correlation_id = correlation_id
self.reply_to = reply_to
self.expiration = expiration
self.message_id = message_id
self.timestamp = timestamp
self.type = _type
self.user_id = user_id
self.app_id = app_id
self.cluster_id = cluster_id
class ConfigConsume:
def __init__(
self,
queue: str,
group: Union[str, None] = None,
key: Union[str, None] = None,
schema: Union[ConfigSchema, None] = None,
value: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None,
capture: int = 1
):
self.queue = queue
self.group = group
self.key = key
self.schema = schema
self.value = value
self.headers = headers
self.amqp_properties = amqp_properties
self.capture = capture
class ConfigProduce:
def __init__(
self,
queue: str,
value: Union[str, ConfigExternalFilePath],
create: bool = False,
tag: Union[str, None] = None,
key: Union[str, None] = None,
headers: Union[ConfigHeaders, None] = None,
amqp_properties: Union[ConfigAmqpProperties, None] = None
):
self.queue = queue
self.value = value
self.create = create
self.tag = tag
self.key = key
self.headers = headers
self.amqp_properties = amqp_properties
class ConfigMultiProduce:
def __init__(self, produce_list: List[ConfigProduce]):
self.produce_list = produce_list
class ConfigActor:
def __init__(
self,
name: Union[str, None] = None,
dataset: Union[ConfigDataset, None] = None,
produce: Union[ConfigMultiProduce, ConfigProduce, None] = None,
consume: Union[ConfigConsume, None] = None,
delay: Union[int, float, None] = None,
limit: Union[int, None] = None,
multi_payloads_looped: bool = True,
dataset_looped: bool = True,
):
self.name = name
self.dataset = dataset
self.produce = produce
self.consume = consume
self.delay = delay
self.limit = limit
self.multi_payloads_looped = multi_payloads_looped
self.dataset_looped = dataset_looped
class ConfigAsyncService(ConfigService):
services = []
def __init__(
self,
_type: str,
address: Union[str, None] = None,
actors: List[ConfigActor] = [],
name: Union[str, None] = None,
ssl: bool = False,
internal_service_id: Union[int, None] = None
):
super().__init__(_type, name, internal_service_id)
ConfigAsyncService.services.append(self)
self.type = _type
self.address = address
self.actors = actors
self.ssl = ssl
def get_hint(self):
return '%s://%s' % (self.type, self.address) if self.name is None else self.name
def address_template_renderer(
self,
template_engine: str,
rendering_queue,
) -> Tuple[str, dict]:
if template_engine == PYBARS:
from mockintosh.hbs.methods import env
elif template_engine == JINJA:
from mockintosh.j2.methods import env
renderer = TemplateRenderer()
self.address, _ = renderer.render(
template_engine,
self.address,
rendering_queue,
inject_methods=[
env
]
)
class ConfigResponse:
def __init__(
self,
headers: Union[ConfigHeaders, None] = None,
status: Union[str, int] = 200,
body: Union[str, ConfigExternalFilePath, None] = None,
use_templating: bool = True,
templating_engine: str = PYBARS,
tag: Union[str, None] = None,
trigger_async_producer: Union[str, int, None] = None
):
self.headers = headers
self.status = status
self.body = body
self.use_templating = use_templating
self.templating_engine = templating_engine
self.tag = tag
self.trigger_async_producer = trigger_async_producer
def oas(self, status_data: dict):
new_headers = {k.title(): v for k, v in self.headers.payload.items()}
if 'Content-Type' in new_headers:
if new_headers['Content-Type'].startswith('application/json'):
status_data = {
'content': {
'application/json': {
'schema': {}
}
}
}
status_data['headers'] = {}
for key in new_headers.keys():
status_data['headers'][key] = {
'schema': {
'type': 'string'
}
}
class ConfigMultiResponse(ConfigContainsTag):
def __init__(self, payload: List[Union[ConfigResponse, ConfigExternalFilePath, str]]):
self.payload = payload
self.forbid_comma_in_tag(self.payload)
class ConfigBody:
def __init__(
self,
schema: ConfigSchema = None,
text: Union[str, None] = None,
graphql_query: Union[str, ConfigExternalFilePath, None] = None,
graphql_variables: Dict[str, str] = None,
urlencoded: Dict[str, str] = None,
multipart: Dict[str, str] = None
):
self.schema = schema
self.text = text
self.urlencoded = urlencoded
self.multipart = multipart
self.graphql_query = graphql_query
self.graphql_variables = graphql_variables
class ConfigEndpoint:
def __init__(
self,
path: str,
_id: Union[str, None] = None,
comment: Union[str, None] = None,
method: str = 'GET',
query_string: Dict[str, str] = {},
headers: Dict[str, str] = {},
body: Union[ConfigBody, None] = None,
dataset: Union[ConfigDataset, None] = None,
response: Union[ConfigResponse, ConfigExternalFilePath, str, ConfigMultiResponse, None] = None,
multi_responses_looped: bool = True,
dataset_looped: bool = True,
performance_profile: Union[str, None] = None
):
self.path = path
self.id = _id
self.comment = comment
self.method = method.upper()
self.query_string = query_string
self.headers = headers
self.body = body
self.dataset = dataset
self.response = response
self.multi_responses_looped = multi_responses_looped
self.dataset_looped = dataset_looped
self.performance_profile = performance_profile
class ConfigHttpService(ConfigService):
def __init__(
self,
port: int,
name: Union[str, None] = None,
hostname: Union[str, None] = None,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None,
management_root: Union[str, None] = None,
oas: Union[str, ConfigExternalFilePath, None] = None,
endpoints: List[ConfigEndpoint] = [],
performance_profile: Union[str, None] = None,
fallback_to: Union[str, None] = None,
internal_service_id: Union[int, None] = None
):
super().__init__('http', name, internal_service_id)
self.port = port
self.hostname = hostname
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
self.management_root = management_root
self.oas = oas
self.endpoints = endpoints
self.performance_profile = performance_profile
self.fallback_to = fallback_to
def get_hint(self):
return '%s://%s:%s%s' % (
'https' if self.ssl else 'http',
self.hostname if self.hostname is not None else (
'localhost'
),
self.port,
' - %s' % self.name if self.name is not None else ''
)
class ConfigGlobals:
def __init__(
self,
headers: Union[ConfigHeaders, None],
performance_profile: Union[str, None] = None
):
self.headers = headers
self.performance_profile = performance_profile
class ConfigManagement:
def __init__(
self,
port: str,
ssl: bool = False,
ssl_cert_file: Union[str, None] = None,
ssl_key_file: Union[str, None] = None
):
self.port = port
self.ssl = ssl
self.ssl_cert_file = ssl_cert_file
self.ssl_key_file = ssl_key_file
class ConfigPerformanceProfile:
def __init__(
self,
ratio: Union[int, float],
delay: Union[int, float] = 0.0,
faults: Union[dict, None] = None
):
self.ratio = ratio
self.delay = delay
self.faults = {} if faults is None else faults
self.actuator = PerformanceProfile(
self.ratio,
delay=self.delay,
faults=self.faults
)
class ConfigRoot:
def __init__(
self,
services: List[Union[ConfigHttpService, ConfigAsyncService]],
management: Union[ConfigManagement, None] = None,
templating_engine: str = PYBARS,
_globals: Union[ConfigGlobals, None] = None,
performance_profiles: Dict[str, ConfigPerformanceProfile] = {}
):
self.services = services
self.management = management
self.templating_engine = templating_engine
self.globals = _globals
self.performance_profiles = performance_profiles
| 0.7773 | 0.134236 |
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataproc'
class AcceleratorConfig(_messages.Message):
r"""Specifies the type and number of accelerator cards attached to the
instances of an instance group (see GPUs on Compute Engine).
Fields:
acceleratorCount: The number of the accelerator cards of this type exposed
to this instance.
acceleratorTypeUri: Full URL, partial URI, or short name of the
accelerator type resource to expose to this instance. See Compute Engine
AcceleratorTypes( /compute/docs/reference/beta/acceleratorTypes)Examples
* https://www.googleapis.com/compute/beta/projects/[project_id]/zones
/us-east1-a/acceleratorTypes/nvidia-tesla-k80 *
projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80
* nvidia-tesla-k80Auto Zone Exception: If you are using the Cloud
Dataproc Auto Zone Placement feature, you must use the short name of the
accelerator type resource, for example, nvidia-tesla-k80.
"""
acceleratorCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
acceleratorTypeUri = _messages.StringField(2)
class AllocationAffinity(_messages.Message):
r"""Allocation Affinity for consuming Zonal allocation.
Enums:
ConsumeAllocationTypeValueValuesEnum:
Fields:
consumeAllocationType: A ConsumeAllocationTypeValueValuesEnum attribute.
key: Corresponds to the label key of Allocation resource.
values: Corresponds to the label values of allocation resource.
"""
class ConsumeAllocationTypeValueValuesEnum(_messages.Enum):
r"""ConsumeAllocationTypeValueValuesEnum enum type.
Values:
TYPE_UNSPECIFIED: <no description>
NO_ALLOCATION: Do not consume from any allocated capacity.
ANY_ALLOCATION: Consume any allocation available.
SPECIFIC_ALLOCATION: Must consume from a specific allocation. Must
specify key value fields for specifying the allocations.
"""
TYPE_UNSPECIFIED = 0
NO_ALLOCATION = 1
ANY_ALLOCATION = 2
SPECIFIC_ALLOCATION = 3
consumeAllocationType = _messages.EnumField('ConsumeAllocationTypeValueValuesEnum', 1)
key = _messages.StringField(2)
values = _messages.StringField(3, repeated=True)
class AutoscalingConfig(_messages.Message):
r"""Autoscaling Policy config associated with the cluster.
Fields:
policyUri: Optional. The autoscaling policy used by the cluster.Only
resource names including projectid and location (region) are valid.
Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/lo
cations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[proj
ect_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note
that the policy must be in the same project and Cloud Dataproc region.
"""
policyUri = _messages.StringField(1)
class AutoscalingPolicy(_messages.Message):
r"""Describes an autoscaling policy for Dataproc cluster autoscaler.
Fields:
basicAlgorithm: A BasicAutoscalingAlgorithm attribute.
id: Required. The policy id.The id must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end
with underscore or hyphen. Must consist of between 3 and 50 characters.
name: Output only. The "resource name" of the policy, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
secondaryWorkerConfig: Required. Describes how the autoscaler will operate
for secondary workers.
workerConfig: Required. Describes how the autoscaler will operate for
primary workers.
"""
basicAlgorithm = _messages.MessageField('BasicAutoscalingAlgorithm', 1)
id = _messages.StringField(2)
name = _messages.StringField(3)
secondaryWorkerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 4)
workerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 5)
class BasicAutoscalingAlgorithm(_messages.Message):
r"""Basic algorithm for autoscaling.
Fields:
cooldownPeriod: Required. Cooldown time in between scaling.
yarnConfig: Required. YARN autoscaling configuration.
"""
cooldownPeriod = _messages.StringField(1)
yarnConfig = _messages.MessageField('BasicYarnAutoscalingConfig', 2)
class BasicYarnAutoscalingConfig(_messages.Message):
r"""Basic autoscaling configurations for YARN.
Fields:
gracefulDecommissionTimeout: Optional. Timeout used during an autoscaling
event (cluster update) between 0 seconds (no graceful decommission) and
1 day.Default: 0s.
scaleDownFactor: Optional. Fraction of suggested decrease in workers to
scale down by between 0 and 1. Suggested decrease when scaling down is
determined by the amount of average available memory since the last
cooldown period.Default: 1.0.
scaleDownMinWorkerFraction: Optional. Minimum workers as a fraction of the
current cluster size to to scale down by between 0 and 1.Default: 0.0.
scaleUpFactor: Required. Fraction of suggested increase in workers to
scale up by between 0 and 1. Suggested increase when scaling up is
determined by the amount of average pending memory since the last
cooldown period.
scaleUpMinWorkerFraction: Optional. Minimum workers as a fraction of the
current cluster size to to scale up by between 0 and 1.Default: 0.0.
"""
gracefulDecommissionTimeout = _messages.StringField(1)
scaleDownFactor = _messages.FloatField(2)
scaleDownMinWorkerFraction = _messages.FloatField(3)
scaleUpFactor = _messages.FloatField(4)
scaleUpMinWorkerFraction = _messages.FloatField(5)
class Binding(_messages.Message):
r"""Associates members with a role.
Fields:
condition: Unimplemented. The condition that is associated with this
binding. NOTE: an unsatisfied condition will not allow user access via
current binding. Different bindings, including their conditions, are
examined independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. members can have the following values: allUsers: A special
identifier that represents anyone who is on the internet; with or
without a Google account. allAuthenticatedUsers: A special identifier
that represents anyone who is authenticated with a Google account or a
service account. user:{emailid}: An email address that represents a
specific Google account. For example, <EMAIL> .
serviceAccount:{emailid}: An email address that represents a service
account. For example, <EMAIL>.
group:{emailid}: An email address that represents a Google group. For
example, <EMAIL>. domain:{domain}: A Google Apps domain name
that represents all the users of that domain. For example, google.com
or example.com.
role: Role that is assigned to members. For example, roles/viewer,
roles/editor, or roles/owner.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelJobRequest(_messages.Message):
r"""A request to cancel a job."""
class Cluster(_messages.Message):
r"""Describes the identifying information, config, and status of a cluster
of Compute Engine instances.
Messages:
LabelsValue: Optional. The labels to associate with this cluster. Label
keys must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
Fields:
clusterName: Required. The cluster name. Cluster names within a project
must be unique. Names of deleted clusters can be reused.
clusterUuid: Output only. A cluster UUID (Unique Universal Identifier).
Cloud Dataproc generates this value when it creates the cluster.
config: Required. The cluster config. Note that Cloud Dataproc may set
default values, and values may change when clusters are updated.
labels: Optional. The labels to associate with this cluster. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
metrics: Output only. Contains cluster daemon metrics such as HDFS and
YARN stats.Beta Feature: This report is available for testing purposes
only. It may be changed before final release.
projectId: Required. The Google Cloud Platform project ID that the cluster
belongs to.
status: Output only. Cluster status.
statusHistory: Output only. The previous cluster status.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this cluster. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a cluster.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
config = _messages.MessageField('ClusterConfig', 3)
labels = _messages.MessageField('LabelsValue', 4)
metrics = _messages.MessageField('ClusterMetrics', 5)
projectId = _messages.StringField(6)
status = _messages.MessageField('ClusterStatus', 7)
statusHistory = _messages.MessageField('ClusterStatus', 8, repeated=True)
class ClusterConfig(_messages.Message):
r"""The cluster config.
Fields:
autoscalingConfig: Optional. Autoscaling config for the policy associated
with the cluster. Cluster does not autoscale if this field is unset.
configBucket: Optional. A Cloud Storage staging bucket used for sharing
generated SSH keys and config. If you do not specify a staging bucket,
Cloud Dataproc will determine an appropriate Cloud Storage location (US,
ASIA, or EU) for your cluster's staging bucket according to the Google
Compute Engine zone where your cluster is deployed, and then it will
create and manage this project-level, per-location bucket for you.
encryptionConfig: Optional. Encryption settings for the cluster.
endpointConfig: Optional. Port/endpoint configuration for this cluster
gceClusterConfig: Required. The shared Compute Engine config settings for
all instances in a cluster.
initializationActions: Optional. Commands to execute on each node after
config is completed. By default, executables are run on master and all
worker nodes. You can test a node's <code>role</code> metadata to run an
executable on a master or worker node, as shown below using curl (you
can also use wget): ROLE=$(curl -H Metadata-Flavor:Google
http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-
role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions
... else ... worker specific actions ... fi
lifecycleConfig: Optional. The config setting for auto delete cluster
schedule.
masterConfig: Optional. The Compute Engine config settings for the master
instance in a cluster.
secondaryWorkerConfig: Optional. The Compute Engine config settings for
additional worker instances in a cluster.
securityConfig: Optional. Security related configuration.
softwareConfig: Optional. The config settings for software inside the
cluster.
workerConfig: Optional. The Compute Engine config settings for worker
instances in a cluster.
"""
autoscalingConfig = _messages.MessageField('AutoscalingConfig', 1)
configBucket = _messages.StringField(2)
encryptionConfig = _messages.MessageField('EncryptionConfig', 3)
endpointConfig = _messages.MessageField('EndpointConfig', 4)
gceClusterConfig = _messages.MessageField('GceClusterConfig', 5)
initializationActions = _messages.MessageField('NodeInitializationAction', 6, repeated=True)
lifecycleConfig = _messages.MessageField('LifecycleConfig', 7)
masterConfig = _messages.MessageField('InstanceGroupConfig', 8)
secondaryWorkerConfig = _messages.MessageField('InstanceGroupConfig', 9)
securityConfig = _messages.MessageField('SecurityConfig', 10)
softwareConfig = _messages.MessageField('SoftwareConfig', 11)
workerConfig = _messages.MessageField('InstanceGroupConfig', 12)
class ClusterMetrics(_messages.Message):
r"""Contains cluster daemon metrics, such as HDFS and YARN stats.Beta
Feature: This report is available for testing purposes only. It may be
changed before final release.
Messages:
HdfsMetricsValue: The HDFS metrics.
YarnMetricsValue: The YARN metrics.
Fields:
hdfsMetrics: The HDFS metrics.
yarnMetrics: The YARN metrics.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HdfsMetricsValue(_messages.Message):
r"""The HDFS metrics.
Messages:
AdditionalProperty: An additional property for a HdfsMetricsValue
object.
Fields:
additionalProperties: Additional properties of type HdfsMetricsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HdfsMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class YarnMetricsValue(_messages.Message):
r"""The YARN metrics.
Messages:
AdditionalProperty: An additional property for a YarnMetricsValue
object.
Fields:
additionalProperties: Additional properties of type YarnMetricsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a YarnMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
hdfsMetrics = _messages.MessageField('HdfsMetricsValue', 1)
yarnMetrics = _messages.MessageField('YarnMetricsValue', 2)
class ClusterOperation(_messages.Message):
r"""The cluster operation triggered by a workflow.
Fields:
done: Output only. Indicates the operation is done.
error: Output only. Error, if operation failed.
operationId: Output only. The id of the cluster operation.
"""
done = _messages.BooleanField(1)
error = _messages.StringField(2)
operationId = _messages.StringField(3)
class ClusterOperationMetadata(_messages.Message):
r"""Metadata describing the operation.
Messages:
LabelsValue: Output only. Labels associated with the operation
Fields:
clusterName: Output only. Name of the cluster for the operation.
clusterUuid: Output only. Cluster UUID for the operation.
description: Output only. Short description of operation.
labels: Output only. Labels associated with the operation
operationType: Output only. The operation type.
status: Output only. Current operation status.
statusHistory: Output only. The previous operation status.
warnings: Output only. Errors encountered during operation execution.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Output only. Labels associated with the operation
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
description = _messages.StringField(3)
labels = _messages.MessageField('LabelsValue', 4)
operationType = _messages.StringField(5)
status = _messages.MessageField('ClusterOperationStatus', 6)
statusHistory = _messages.MessageField('ClusterOperationStatus', 7, repeated=True)
warnings = _messages.StringField(8, repeated=True)
class ClusterOperationStatus(_messages.Message):
r"""The status of the operation.
Enums:
StateValueValuesEnum: Output only. A message containing the operation
state.
Fields:
details: Output only. A message containing any operation metadata details.
innerState: Output only. A message containing the detailed operation
state.
state: Output only. A message containing the operation state.
stateStartTime: Output only. The time this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. A message containing the operation state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
details = _messages.StringField(1)
innerState = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
stateStartTime = _messages.StringField(4)
class ClusterSelector(_messages.Message):
r"""A selector that chooses target cluster for jobs based on metadata.
Messages:
ClusterLabelsValue: Required. The cluster labels. Cluster must have all
labels to match.
Fields:
clusterLabels: Required. The cluster labels. Cluster must have all labels
to match.
zone: Optional. The zone where workflow process executes. This parameter
does not affect the selection of the cluster.If unspecified, the zone of
the first cluster matching the selector is used.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ClusterLabelsValue(_messages.Message):
r"""Required. The cluster labels. Cluster must have all labels to match.
Messages:
AdditionalProperty: An additional property for a ClusterLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ClusterLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ClusterLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterLabels = _messages.MessageField('ClusterLabelsValue', 1)
zone = _messages.StringField(2)
class ClusterStatus(_messages.Message):
r"""The status of a cluster and its instances.
Enums:
StateValueValuesEnum: Output only. The cluster's state.
SubstateValueValuesEnum: Output only. Additional state information that
includes status reported by the agent.
Fields:
detail: Output only. Optional details of cluster's state.
state: Output only. The cluster's state.
stateStartTime: Output only. Time when this state was entered.
substate: Output only. Additional state information that includes status
reported by the agent.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The cluster's state.
Values:
UNKNOWN: The cluster state is unknown.
CREATING: The cluster is being created and set up. It is not ready for
use.
RUNNING: The cluster is currently running and healthy. It is ready for
use.
ERROR: The cluster encountered an error. It is not ready for use.
DELETING: The cluster is being deleted. It cannot be used.
UPDATING: The cluster is being updated. It continues to accept and
process jobs.
"""
UNKNOWN = 0
CREATING = 1
RUNNING = 2
ERROR = 3
DELETING = 4
UPDATING = 5
class SubstateValueValuesEnum(_messages.Enum):
r"""Output only. Additional state information that includes status
reported by the agent.
Values:
UNSPECIFIED: The cluster substate is unknown.
UNHEALTHY: The cluster is known to be in an unhealthy state (for
example, critical daemons are not running or HDFS capacity is
exhausted).Applies to RUNNING state.
STALE_STATUS: The agent-reported status is out of date (may occur if
Cloud Dataproc loses communication with Agent).Applies to RUNNING
state.
"""
UNSPECIFIED = 0
UNHEALTHY = 1
STALE_STATUS = 2
detail = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
substate = _messages.EnumField('SubstateValueValuesEnum', 4)
class DataprocProjectsLocationsAutoscalingPoliciesCreateRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesCreateRequest object.
Fields:
autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
request body.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}.
"""
autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
parent = _messages.StringField(2, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesGetRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesGetRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesListRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsLocationsWorkflowTemplatesCreateRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesCreateRequest object.
Fields:
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
parent = _messages.StringField(1, required=True)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)
class DataprocProjectsLocationsWorkflowTemplatesDeleteRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesDeleteRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to delete. If
specified, will only delete the template if the current server version
matches specified version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsLocationsWorkflowTemplatesGetRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesGetRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to retrieve. Only
previously instatiated versions can be retrieved.If unspecified,
retrieves the current version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest
object.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parent: Required. The "resource name" of the workflow template region, as
described in https://cloud.google.com/apis/design/resource_names of the
form projects/{project_id}/regions/{region}
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
instanceId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 4)
class DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest object.
Fields:
instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
resource to be passed as the request body.
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
"""
instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
name = _messages.StringField(2, required=True)
class DataprocProjectsLocationsWorkflowTemplatesListRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsAutoscalingPoliciesCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesCreateRequest object.
Fields:
autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
request body.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}.
"""
autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
parent = _messages.StringField(2, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesGetRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesListRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersCreateRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two CreateClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
cluster = _messages.MessageField('Cluster', 1)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
requestId = _messages.StringField(4)
class DataprocProjectsRegionsClustersDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersDeleteRequest object.
Fields:
clusterName: Required. The cluster name.
clusterUuid: Optional. Specifying the cluster_uuid means the RPC should
fail (with error NOT_FOUND) if cluster with specified UUID does not
exist.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two DeleteClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
clusterName = _messages.StringField(1, required=True)
clusterUuid = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
requestId = _messages.StringField(5)
class DataprocProjectsRegionsClustersDiagnoseRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersDiagnoseRequest object.
Fields:
clusterName: Required. The cluster name.
diagnoseClusterRequest: A DiagnoseClusterRequest resource to be passed as
the request body.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
clusterName = _messages.StringField(1, required=True)
diagnoseClusterRequest = _messages.MessageField('DiagnoseClusterRequest', 2)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsClustersGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsClustersGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersGetRequest object.
Fields:
clusterName: Required. The cluster name.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
clusterName = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersListRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersListRequest object.
Fields:
filter: Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:field = value AND field =
value ...where field is one of status.state, clusterName, or
labels.[KEY], and [KEY] is a label key. value can be * to match all
values. status.state can be one of the following: ACTIVE, INACTIVE,
CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the
CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING
and ERROR states. clusterName is the name of the cluster provided at
creation time. Only the logical AND operator is supported; space-
separated items are treated as having an implicit AND operator.Example
filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env
= staging AND labels.starred = *
pageSize: Optional. The standard List page size.
pageToken: Optional. The standard List page token.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
region = _messages.StringField(5, required=True)
class DataprocProjectsRegionsClustersPatchRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersPatchRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
clusterName: Required. The cluster name.
gracefulDecommissionTimeout: Optional. Timeout for graceful YARN
decomissioning. Graceful decommissioning allows removing nodes from the
cluster without interrupting jobs in progress. Timeout specifies how
long to wait for jobs in progress to finish before forcefully removing
nodes (and potentially interrupting jobs). Default timeout is 0 (for
forceful decommission), and the maximum allowed timeout is 1 day.Only
supported on Dataproc image versions 1.2 and higher.
projectId: Required. The ID of the Google Cloud Platform project the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two UpdateClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
updateMask: Required. Specifies the path, relative to Cluster, of the
field to update. For example, to change the number of workers in a
cluster to 5, the update_mask parameter would be specified as
config.worker_config.num_instances, and the PATCH request body would
specify the new value, as follows: { "config":{ "workerConfig":{
"numInstances":"5" } } } Similarly, to change the number of
preemptible workers in a cluster to 5, the update_mask parameter would
be config.secondary_worker_config.num_instances, and the PATCH request
body would be set as follows: { "config":{
"secondaryWorkerConfig":{ "numInstances":"5" } } }
<strong>Note:</strong> currently only the following fields can be
updated: <table> <tr>
<td><strong>Mask</strong></td><td><strong>Purpose</strong></td> </tr>
<tr> <td>labels</td><td>Updates labels</td> </tr> <tr>
<td>config.worker_config.num_instances</td><td>Resize primary worker
group</td> </tr> <tr>
<td>config.secondary_worker_config.num_instances</td><td>Resize
secondary worker group</td> </tr> <tr>
<td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
duration</td> </tr> <tr>
<td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
deletion timestamp</td> </tr> <tr>
<td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
duration</td> </tr> </table>
"""
cluster = _messages.MessageField('Cluster', 1)
clusterName = _messages.StringField(2, required=True)
gracefulDecommissionTimeout = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
region = _messages.StringField(5, required=True)
requestId = _messages.StringField(6)
updateMask = _messages.StringField(7)
class DataprocProjectsRegionsClustersSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsClustersTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsJobsCancelRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsCancelRequest object.
Fields:
cancelJobRequest: A CancelJobRequest resource to be passed as the request
body.
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
cancelJobRequest = _messages.MessageField('CancelJobRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsJobsDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsDeleteRequest object.
Fields:
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsJobsGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsGetRequest object.
Fields:
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsListRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsListRequest object.
Enums:
JobStateMatcherValueValuesEnum: Optional. Specifies enumerated categories
of jobs to list. (default = match ALL jobs).If filter is provided,
jobStateMatcher will be ignored.
Fields:
clusterName: Optional. If set, the returned jobs list includes only jobs
that were submitted to the named cluster.
filter: Optional. A filter constraining the jobs to list. Filters are
case-sensitive and have the following syntax:field = value AND field =
value ...where field is status.state or labels.[KEY], and [KEY] is a
label key. value can be * to match all values. status.state can be
either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported;
space-separated items are treated as having an implicit AND
operator.Example filter:status.state = ACTIVE AND labels.env = staging
AND labels.starred = *
jobStateMatcher: Optional. Specifies enumerated categories of jobs to
list. (default = match ALL jobs).If filter is provided, jobStateMatcher
will be ignored.
pageSize: Optional. The number of results to return in each response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
class JobStateMatcherValueValuesEnum(_messages.Enum):
r"""Optional. Specifies enumerated categories of jobs to list. (default =
match ALL jobs).If filter is provided, jobStateMatcher will be ignored.
Values:
ALL: <no description>
ACTIVE: <no description>
NON_ACTIVE: <no description>
"""
ALL = 0
ACTIVE = 1
NON_ACTIVE = 2
clusterName = _messages.StringField(1)
filter = _messages.StringField(2)
jobStateMatcher = _messages.EnumField('JobStateMatcherValueValuesEnum', 3)
pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(5)
projectId = _messages.StringField(6, required=True)
region = _messages.StringField(7, required=True)
class DataprocProjectsRegionsJobsPatchRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsPatchRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
updateMask: Required. Specifies the path, relative to <code>Job</code>, of
the field to update. For example, to update the labels of a Job the
<code>update_mask</code> parameter would be specified as
<code>labels</code>, and the PATCH request body would specify the new
value. <strong>Note:</strong> Currently, <code>labels</code> is the only
field that can be updated.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
updateMask = _messages.StringField(5)
class DataprocProjectsRegionsJobsSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsJobsSubmitRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsSubmitRequest object.
Fields:
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
submitJobRequest: A SubmitJobRequest resource to be passed as the request
body.
"""
projectId = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)
class DataprocProjectsRegionsJobsTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsOperationsCancelRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsListRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class DataprocProjectsRegionsOperationsSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsOperationsTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsWorkflowTemplatesCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesCreateRequest object.
Fields:
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
parent = _messages.StringField(1, required=True)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)
class DataprocProjectsRegionsWorkflowTemplatesDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesDeleteRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to delete. If
specified, will only delete the template if the current server version
matches specified version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsWorkflowTemplatesGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesGetRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to retrieve. Only
previously instatiated versions can be retrieved.If unspecified,
retrieves the current version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest
object.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parent: Required. The "resource name" of the workflow template region, as
described in https://cloud.google.com/apis/design/resource_names of the
form projects/{project_id}/regions/{region}
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
instanceId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 4)
class DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest object.
Fields:
instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
resource to be passed as the request body.
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
"""
instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
name = _messages.StringField(2, required=True)
class DataprocProjectsRegionsWorkflowTemplatesListRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DiagnoseClusterRequest(_messages.Message):
r"""A request to collect cluster diagnostic information."""
class DiagnoseClusterResults(_messages.Message):
r"""The location of diagnostic output.
Fields:
outputUri: Output only. The Cloud Storage URI of the diagnostic output.
The output report is a plain text file with a summary of collected
diagnostics.
"""
outputUri = _messages.StringField(1)
class DiskConfig(_messages.Message):
r"""Specifies the config of disk options for a group of VM instances.
Fields:
bootDiskSizeGb: Optional. Size in GB of the boot disk (default is 500GB).
bootDiskType: Optional. Type of the boot disk (default is "pd-standard").
Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-
standard" (Persistent Disk Hard Disk Drive).
numLocalSsds: Optional. Number of attached SSDs, from 0 to 4 (default is
0). If SSDs are not attached, the boot disk is used to store runtime
logs and HDFS
(https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If
one or more SSDs are attached, this runtime bulk data is spread across
them, and the boot disk contains only basic config and installed
binaries.
"""
bootDiskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
bootDiskType = _messages.StringField(2)
numLocalSsds = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for Empty is empty JSON object {}.
"""
class EncryptionConfig(_messages.Message):
r"""Encryption settings for the cluster.
Fields:
gcePdKmsKeyName: Optional. The Cloud KMS key name to use for PD disk
encryption for all instances in the cluster.
"""
gcePdKmsKeyName = _messages.StringField(1)
class EndpointConfig(_messages.Message):
r"""Endpoint config for this cluster
Messages:
HttpPortsValue: Output only. The map of port descriptions to URLs. Will
only be populated if enable_http_port_access is true.
Fields:
enableHttpPortAccess: Optional. If true, enable http access to specific
ports on the cluster from external sources. Defaults to false.
httpPorts: Output only. The map of port descriptions to URLs. Will only be
populated if enable_http_port_access is true.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HttpPortsValue(_messages.Message):
r"""Output only. The map of port descriptions to URLs. Will only be
populated if enable_http_port_access is true.
Messages:
AdditionalProperty: An additional property for a HttpPortsValue object.
Fields:
additionalProperties: Additional properties of type HttpPortsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HttpPortsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
enableHttpPortAccess = _messages.BooleanField(1)
httpPorts = _messages.MessageField('HttpPortsValue', 2)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account presence"
description: "Determines whether the request has a user account" expression:
"size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GceClusterConfig(_messages.Message):
r"""Common config settings for resources of Compute Engine cluster
instances, applicable to all instances in the cluster.
Messages:
MetadataValue: The Compute Engine metadata entries to add to all instances
(see Project and instance metadata
(https://cloud.google.com/compute/docs/storing-retrieving-
metadata#project_and_instance_metadata)).
Fields:
allocationAffinity: Allocation Affinity for consuming Zonal allocation.
internalIpOnly: Optional. If true, all instances in the cluster will only
have internal IP addresses. By default, clusters are not restricted to
internal IP addresses, and will have ephemeral external IP addresses
assigned to each instance. This internal_ip_only restriction can only be
enabled for subnetwork enabled networks, and all off-cluster
dependencies must be configured to be accessible without external IP
addresses.
metadata: The Compute Engine metadata entries to add to all instances (see
Project and instance metadata (https://cloud.google.com/compute/docs
/storing-retrieving-metadata#project_and_instance_metadata)).
networkUri: Optional. The Compute Engine network to be used for machine
communications. Cannot be specified with subnetwork_uri. If neither
network_uri nor subnetwork_uri is specified, the "default" network of
the project is used, if it exists. Cannot be a "Custom Subnet Network"
(see Using Subnetworks for more information).A full URL, partial URI, or
short name are valid. Examples: https://www.googleapis.com/compute/v1/pr
ojects/[project_id]/regions/global/default
projects/[project_id]/regions/global/default default
serviceAccount: Optional. The service account of the instances. Defaults
to the default Compute Engine service account. Custom service accounts
need permissions equivalent to the following IAM roles:
roles/logging.logWriter roles/storage.objectAdmin(see
https://cloud.google.com/compute/docs/access/service-
accounts#custom_service_accounts for more information). Example:
[account_id]@[project_id].iam.gserviceaccount.com
serviceAccountScopes: Optional. The URIs of service account scopes to be
included in Compute Engine instances. The following base set of scopes
is always included:
https://www.googleapis.com/auth/cloud.useraccounts.readonly
https://www.googleapis.com/auth/devstorage.read_write
https://www.googleapis.com/auth/logging.writeIf no scopes are specified,
the following defaults are also provided:
https://www.googleapis.com/auth/bigquery
https://www.googleapis.com/auth/bigtable.admin.table
https://www.googleapis.com/auth/bigtable.data
https://www.googleapis.com/auth/devstorage.full_control
subnetworkUri: Optional. The Compute Engine subnetwork to be used for
machine communications. Cannot be specified with network_uri.A full URL,
partial URI, or short name are valid. Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-
east1/sub0 projects/[project_id]/regions/us-east1/sub0 sub0
tags: The Compute Engine tags to add to all instances (see Tagging
instances).
zoneUri: Optional. The zone where the Compute Engine cluster will be
located. On a create request, it is required in the "global" region. If
omitted in a non-global Cloud Dataproc region, the service will pick a
zone in the corresponding Compute Engine region. On a get request, zone
will always be present.A full URL, partial URI, or short name are valid.
Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]
projects/[project_id]/zones/[zone] us-central1-f
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""The Compute Engine metadata entries to add to all instances (see
Project and instance metadata (https://cloud.google.com/compute/docs
/storing-retrieving-metadata#project_and_instance_metadata)).
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allocationAffinity = _messages.MessageField('AllocationAffinity', 1)
internalIpOnly = _messages.BooleanField(2)
metadata = _messages.MessageField('MetadataValue', 3)
networkUri = _messages.StringField(4)
serviceAccount = _messages.StringField(5)
serviceAccountScopes = _messages.StringField(6, repeated=True)
subnetworkUri = _messages.StringField(7)
tags = _messages.StringField(8, repeated=True)
zoneUri = _messages.StringField(9)
class GetIamPolicyRequest(_messages.Message):
r"""Request message for GetIamPolicy method."""
class HadoopJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Hadoop MapReduce
(https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-
mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN
(https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-
site/YARN.html).
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Hadoop drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, or .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as -libjars or -Dfoo=bar, that can be set as job
properties, since a collision may occur that causes an incorrect job
submission.
fileUris: Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to
be copied to the working directory of Hadoop drivers and distributed
tasks. Useful for naively parallel tasks.
jarFileUris: Optional. Jar file URIs to add to the CLASSPATHs of the
Hadoop driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file containing
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file containing the main class.
Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-
mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar'
'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
properties: Optional. A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Hadoop. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/hadoop/conf/*-site
and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class HiveJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
Fields:
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
and UDFs.
properties: Optional. A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains Hive queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names and values, used to configure
Hive. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in
user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Hive command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class InstanceGroupAutoscalingPolicyConfig(_messages.Message):
r"""Configuration for the size bounds of an instance group, including its
proportional size to other groups.
Fields:
maxInstances: Required. Maximum number of instances for this group. Must
be >= min_instances.
minInstances: Optional. Minimum number of instances for this group.Default
for primary workers is 2, default for secondary workers is 0.
weight: Optional. Weight for instance group. Determines fraction of total
workers in cluster that will be composed of instances from this instance
group (e.g. if primary workers have weight 2 and secondary workers have
weight 1, then the cluster should have approximately 2 primary workers
to each secondary worker. Cluster may not reach these exact weights if
constrained by min/max bounds or other autoscaling
configurations.Default 1. Note that all groups have equal an equal
weight by default, so the cluster will attempt to maintain an equal
number of workers in each group within configured size bounds per group.
"""
maxInstances = _messages.IntegerField(1, variant=_messages.Variant.INT32)
minInstances = _messages.IntegerField(2, variant=_messages.Variant.INT32)
weight = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class InstanceGroupConfig(_messages.Message):
r"""Optional. The config settings for Compute Engine resources in an
instance group, such as a master or worker group.
Fields:
accelerators: Optional. The Compute Engine accelerator configuration for
these instances.Beta Feature: This feature is still under development.
It may be changed before final release.
diskConfig: Optional. Disk option config settings.
imageUri: Optional. The Compute Engine image resource used for cluster
instances. It can be specified or may be inferred from
SoftwareConfig.image_version.
instanceNames: Output only. The list of instance names. Cloud Dataproc
derives the names from cluster_name, num_instances, and the instance
group.
isPreemptible: Optional. Specifies that this instance group contains
preemptible instances.
machineTypeUri: Optional. The Compute Engine machine type used for cluster
instances.A full URL, partial URI, or short name are valid. Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If
you are using the Cloud Dataproc Auto Zone Placement feature, you must
use the short name of the machine type resource, for example,
n1-standard-2.
managedGroupConfig: Output only. The config for Compute Engine Instance
Group Manager that manages this group. This is only used for preemptible
instance groups.
minCpuPlatform: Optional. Specifies the minimum cpu platform for the
Instance Group. See Cloud Dataproc→Minimum CPU Platform.
numInstances: Optional. The number of VM instances in the instance group.
For master instance groups, must be set to 1.
"""
accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
diskConfig = _messages.MessageField('DiskConfig', 2)
imageUri = _messages.StringField(3)
instanceNames = _messages.StringField(4, repeated=True)
isPreemptible = _messages.BooleanField(5)
machineTypeUri = _messages.StringField(6)
managedGroupConfig = _messages.MessageField('ManagedGroupConfig', 7)
minCpuPlatform = _messages.StringField(8)
numInstances = _messages.IntegerField(9, variant=_messages.Variant.INT32)
class InstantiateWorkflowTemplateRequest(_messages.Message):
r"""A request to instantiate a workflow template.
Messages:
ParametersValue: Optional. Map from parameter names to values that should
be used for those parameters. Values may not exceed 100 characters.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parameters: Optional. Map from parameter names to values that should be
used for those parameters. Values may not exceed 100 characters.
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
version: Optional. The version of workflow template to instantiate. If
specified, the workflow will be instantiated only if the current version
of the workflow template has the supplied version.This option cannot be
used to instantiate a previous version of workflow template.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""Optional. Map from parameter names to values that should be used for
those parameters. Values may not exceed 100 characters.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
instanceId = _messages.StringField(1)
parameters = _messages.MessageField('ParametersValue', 2)
requestId = _messages.StringField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class Job(_messages.Message):
r"""A Cloud Dataproc job resource.
Messages:
LabelsValue: Optional. The labels to associate with this job. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
Fields:
driverControlFilesUri: Output only. If present, the location of
miscellaneous control files which may be used as part of job setup and
handling. If not present, control files may be placed in the same
location as driver_output_uri.
driverOutputResourceUri: Output only. A URI pointing to the location of
the stdout of the job's driver program.
hadoopJob: Job is a Hadoop job.
hiveJob: Job is a Hive job.
jobUuid: Output only. A UUID that uniquely identifies a job within the
project over time. This is in contrast to a user-settable
reference.job_id that may be reused over time.
labels: Optional. The labels to associate with this job. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
pigJob: Job is a Pig job.
placement: Required. Job information, including how, when, and where to
run the job.
prestoJob: Job is a Presto job
pysparkJob: Job is a Pyspark job.
reference: Optional. The fully qualified reference to the job, which can
be used to obtain the equivalent REST path of the job resource. If this
property is not specified when a job is created, the server generates a
<code>job_id</code>.
scheduling: Optional. Job scheduling configuration.
sparkJob: Job is a Spark job.
sparkRJob: Job is a SparkR job.
sparkSqlJob: Job is a SparkSql job.
status: Output only. The job status. Additional application-specific
status information may be contained in the <code>type_job</code> and
<code>yarn_applications</code> fields.
statusHistory: Output only. The previous job status.
submittedBy: Output only. The email address of the user submitting the
job. For jobs submitted on the cluster, the address is
<code>username@hostname</code>.
yarnApplications: Output only. The collection of YARN applications spun up
by this job.Beta Feature: This report is available for testing purposes
only. It may be changed before final release.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this job. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a job.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverControlFilesUri = _messages.StringField(1)
driverOutputResourceUri = _messages.StringField(2)
hadoopJob = _messages.MessageField('HadoopJob', 3)
hiveJob = _messages.MessageField('HiveJob', 4)
jobUuid = _messages.StringField(5)
labels = _messages.MessageField('LabelsValue', 6)
pigJob = _messages.MessageField('PigJob', 7)
placement = _messages.MessageField('JobPlacement', 8)
prestoJob = _messages.MessageField('PrestoJob', 9)
pysparkJob = _messages.MessageField('PySparkJob', 10)
reference = _messages.MessageField('JobReference', 11)
scheduling = _messages.MessageField('JobScheduling', 12)
sparkJob = _messages.MessageField('SparkJob', 13)
sparkRJob = _messages.MessageField('SparkRJob', 14)
sparkSqlJob = _messages.MessageField('SparkSqlJob', 15)
status = _messages.MessageField('JobStatus', 16)
statusHistory = _messages.MessageField('JobStatus', 17, repeated=True)
submittedBy = _messages.StringField(18)
yarnApplications = _messages.MessageField('YarnApplication', 19, repeated=True)
class JobPlacement(_messages.Message):
r"""Cloud Dataproc job config.
Fields:
clusterName: Required. The name of the cluster where the job will be
submitted.
clusterUuid: Output only. A cluster UUID generated by the Cloud Dataproc
service when the job is submitted.
"""
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
class JobReference(_messages.Message):
r"""Encapsulates the full scoping used to reference a job.
Fields:
jobId: Optional. The job ID, which must be unique within the project. The
job ID is generated by the server upon job submission or provided by the
user as a means to perform retries without creating duplicate jobs. The
ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
or hyphens (-). The maximum length is 100 characters.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobScheduling(_messages.Message):
r"""Job scheduling options.
Fields:
maxFailuresPerHour: Optional. Maximum number of times per hour a driver
may be restarted as a result of driver terminating with non-zero code
before job is reported failed.A job may be reported as thrashing if
driver exits with non-zero code 4 times within 10 minute window.Maximum
value is 10.
"""
maxFailuresPerHour = _messages.IntegerField(1, variant=_messages.Variant.INT32)
class JobStatus(_messages.Message):
r"""Cloud Dataproc job status.
Enums:
StateValueValuesEnum: Output only. A state message specifying the overall
job state.
SubstateValueValuesEnum: Output only. Additional state information, which
includes status reported by the agent.
Fields:
details: Output only. Optional job state details, such as an error
description if the state is <code>ERROR</code>.
state: Output only. A state message specifying the overall job state.
stateStartTime: Output only. The time when this state was entered.
substate: Output only. Additional state information, which includes status
reported by the agent.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. A state message specifying the overall job state.
Values:
STATE_UNSPECIFIED: The job state is unknown.
PENDING: The job is pending; it has been submitted, but is not yet
running.
SETUP_DONE: Job has been received by the service and completed initial
setup; it will soon be submitted to the cluster.
RUNNING: The job is running on the cluster.
CANCEL_PENDING: A CancelJob request has been received, but is pending.
CANCEL_STARTED: Transient in-flight resources have been canceled, and
the request to cancel the running job has been issued to the cluster.
CANCELLED: The job cancellation was successful.
DONE: The job has completed successfully.
ERROR: The job has completed, but encountered an error.
ATTEMPT_FAILURE: Job attempt has failed. The detail field contains
failure details for this attempt.Applies to restartable jobs only.
"""
STATE_UNSPECIFIED = 0
PENDING = 1
SETUP_DONE = 2
RUNNING = 3
CANCEL_PENDING = 4
CANCEL_STARTED = 5
CANCELLED = 6
DONE = 7
ERROR = 8
ATTEMPT_FAILURE = 9
class SubstateValueValuesEnum(_messages.Enum):
r"""Output only. Additional state information, which includes status
reported by the agent.
Values:
UNSPECIFIED: The job substate is unknown.
SUBMITTED: The Job is submitted to the agent.Applies to RUNNING state.
QUEUED: The Job has been received and is awaiting execution (it may be
waiting for a condition to be met). See the "details" field for the
reason for the delay.Applies to RUNNING state.
STALE_STATUS: The agent-reported status is out of date, which may be
caused by a loss of communication between the agent and Cloud
Dataproc. If the agent does not send a timely update, the job will
fail.Applies to RUNNING state.
"""
UNSPECIFIED = 0
SUBMITTED = 1
QUEUED = 2
STALE_STATUS = 3
details = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
substate = _messages.EnumField('SubstateValueValuesEnum', 4)
class KerberosConfig(_messages.Message):
r"""Specifies Kerberos related configuration.
Fields:
crossRealmTrustAdminServer: Optional. The admin server (IP or hostname)
for the remote trusted realm in a cross realm trust relationship.
crossRealmTrustKdc: Optional. The KDC (IP or hostname) for the remote
trusted realm in a cross realm trust relationship.
crossRealmTrustRealm: Optional. The remote realm the Dataproc on-cluster
KDC will trust, should the user enable cross realm trust.
crossRealmTrustSharedPasswordUri: Optional. The GCS uri of a KMS encrypted
file containing the shared password between the on-cluster Kerberos
realm and the remote trusted realm, in a cross realm trust relationship.
enableKerberos: Optional. Flag to indicate whether to Kerberize the
cluster.
kdcDbKeyUri: Optional. The GCS uri of a KMS encrypted file containing the
master key of the KDC database.
keyPasswordUri: Optional. The GCS uri of a KMS encrypted file containing
the password to the user provided key. For the self-signed certificate,
this password is generated by Dataproc.
keystorePasswordUri: Optional. The GCS uri of a KMS encrypted file
containing the password to the user provided keystore. For the self-
signed certificate, this password is generated by Dataproc.
keystoreUri: Optional. The GCS uri of the keystore file used for SSL
encryption. If not provided, Dataproc will provide a self-signed
certificate.
kmsKeyUri: Required. The uri of the KMS key used to encrypt various
sensitive files.
rootPrincipalPasswordUri: Required. The GCS uri of a KMS encrypted file
containing the root principal password.
tgtLifetimeHours: Optional. The lifetime of the ticket granting ticket, in
hours. If not specified, or user specifies 0, then default value 10 will
be used.
truststorePasswordUri: Optional. The GCS uri of a KMS encrypted file
containing the password to the user provided truststore. For the self-
signed certificate, this password is generated by Dataproc.
truststoreUri: Optional. The GCS uri of the truststore file used for SSL
encryption. If not provided, Dataproc will provide a self-signed
certificate.
"""
crossRealmTrustAdminServer = _messages.StringField(1)
crossRealmTrustKdc = _messages.StringField(2)
crossRealmTrustRealm = _messages.StringField(3)
crossRealmTrustSharedPasswordUri = _messages.StringField(4)
enableKerberos = _messages.BooleanField(5)
kdcDbKeyUri = _messages.StringField(6)
keyPasswordUri = _messages.StringField(7)
keystorePasswordUri = _messages.StringField(8)
keystoreUri = _messages.StringField(9)
kmsKeyUri = _messages.StringField(10)
rootPrincipalPasswordUri = _messages.StringField(11)
tgtLifetimeHours = _messages.IntegerField(12, variant=_messages.Variant.INT32)
truststorePasswordUri = _messages.StringField(13)
truststoreUri = _messages.StringField(14)
class LifecycleConfig(_messages.Message):
r"""Specifies the cluster auto-delete schedule configuration.
Fields:
autoDeleteTime: Optional. The time when cluster will be auto-deleted.
autoDeleteTtl: Optional. The lifetime duration of cluster. The cluster
will be auto-deleted at the end of this period. Valid range: 10m,
14d.Example: "1d", to delete the cluster 1 day after its creation..
idleDeleteTtl: Optional. The duration to keep the cluster alive while
idling. Passing this threshold will cause the cluster to be deleted.
Valid range: 10m, 14d.Example: "10m", the minimum value, to delete the
cluster when it has had no jobs running for 10 minutes.
idleStartTime: Output only. The time when cluster became idle (most recent
job finished) and became eligible for deletion due to idleness.
"""
autoDeleteTime = _messages.StringField(1)
autoDeleteTtl = _messages.StringField(2)
idleDeleteTtl = _messages.StringField(3)
idleStartTime = _messages.StringField(4)
class ListAutoscalingPoliciesResponse(_messages.Message):
r"""A response to a request to list autoscaling policies in a project.
Fields:
nextPageToken: Output only. This token is included in the response if
there are more results to fetch.
policies: Output only. Autoscaling policies list.
"""
nextPageToken = _messages.StringField(1)
policies = _messages.MessageField('AutoscalingPolicy', 2, repeated=True)
class ListClustersResponse(_messages.Message):
r"""The list of all clusters in a project.
Fields:
clusters: Output only. The clusters in the project.
nextPageToken: Output only. This token is included in the response if
there are more results to fetch. To fetch additional results, provide
this value as the page_token in a subsequent
<code>ListClustersRequest</code>.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListJobsResponse(_messages.Message):
r"""A list of jobs in a project.
Fields:
jobs: Output only. Jobs list.
nextPageToken: Optional. This token is included in the response if there
are more results to fetch. To fetch additional results, provide this
value as the page_token in a subsequent <code>ListJobsRequest</code>.
"""
jobs = _messages.MessageField('Job', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListWorkflowTemplatesResponse(_messages.Message):
r"""A response to a request to list workflow templates in a project.
Fields:
nextPageToken: Output only. This token is included in the response if
there are more results to fetch. To fetch additional results, provide
this value as the page_token in a subsequent
<code>ListWorkflowTemplatesRequest</code>.
templates: Output only. WorkflowTemplates list.
"""
nextPageToken = _messages.StringField(1)
templates = _messages.MessageField('WorkflowTemplate', 2, repeated=True)
class LoggingConfig(_messages.Message):
r"""The runtime logging config of the job.
Messages:
DriverLogLevelsValue: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
Fields:
driverLogLevels: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DriverLogLevelsValue(_messages.Message):
r"""The per-package log levels for the driver. This may include "root"
package name to configure rootLogger. Examples: 'com.google = FATAL',
'root = INFO', 'org.apache = DEBUG'
Messages:
AdditionalProperty: An additional property for a DriverLogLevelsValue
object.
Fields:
additionalProperties: Additional properties of type DriverLogLevelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DriverLogLevelsValue object.
Enums:
ValueValueValuesEnum:
Fields:
key: Name of the additional property.
value: A ValueValueValuesEnum attribute.
"""
class ValueValueValuesEnum(_messages.Enum):
r"""ValueValueValuesEnum enum type.
Values:
LEVEL_UNSPECIFIED: <no description>
ALL: <no description>
TRACE: <no description>
DEBUG: <no description>
INFO: <no description>
WARN: <no description>
ERROR: <no description>
FATAL: <no description>
OFF: <no description>
"""
LEVEL_UNSPECIFIED = 0
ALL = 1
TRACE = 2
DEBUG = 3
INFO = 4
WARN = 5
ERROR = 6
FATAL = 7
OFF = 8
key = _messages.StringField(1)
value = _messages.EnumField('ValueValueValuesEnum', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverLogLevels = _messages.MessageField('DriverLogLevelsValue', 1)
class ManagedCluster(_messages.Message):
r"""Cluster that is managed by the workflow.
Messages:
LabelsValue: Optional. The labels to associate with this cluster.Label
keys must be between 1 and 63 characters long, and must conform to the
following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
be between 1 and 63 characters long, and must conform to the following
PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
can be associated with a given cluster.
Fields:
clusterName: Required. The cluster name prefix. A unique cluster name will
be formed by appending a random suffix.The name must contain only lower-
case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a
letter. Cannot begin or end with hyphen. Must consist of between 2 and
35 characters.
config: Required. The cluster configuration.
labels: Optional. The labels to associate with this cluster.Label keys
must be between 1 and 63 characters long, and must conform to the
following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
be between 1 and 63 characters long, and must conform to the following
PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
can be associated with a given cluster.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this cluster.Label keys must be
between 1 and 63 characters long, and must conform to the following PCRE
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following PCRE regular
expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
associated with a given cluster.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
config = _messages.MessageField('ClusterConfig', 2)
labels = _messages.MessageField('LabelsValue', 3)
class ManagedGroupConfig(_messages.Message):
r"""Specifies the resources used to actively manage an instance group.
Fields:
instanceGroupManagerName: Output only. The name of the Instance Group
Manager for this group.
instanceTemplateName: Output only. The name of the Instance Template used
for the Managed Instance Group.
"""
instanceGroupManagerName = _messages.StringField(1)
instanceTemplateName = _messages.StringField(2)
class NodeInitializationAction(_messages.Message):
r"""Specifies an executable to run on a fully configured node and a timeout
period for executable completion.
Fields:
executableFile: Required. Cloud Storage URI of executable file.
executionTimeout: Optional. Amount of time executable has to complete.
Default is 10 minutes. Cluster creation fails with an explanatory error
message (the name of the executable that caused the error and the
exceeded timeout period) if the executable is not completed at end of
the timeout period.
"""
executableFile = _messages.StringField(1)
executionTimeout = _messages.StringField(2)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as Delete, the
response is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
Fields:
done: If the value is false, it means the operation is still in progress.
If true, the operation is completed, and either error or response is
available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the name should have the format of operations/some/unique/name.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other methods,
the response should have the type XxxResponse, where Xxx is the original
method name. For example, if the original method name is TakeSnapshot(),
the inferred response type is TakeSnapshotResponse.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OrderedJob(_messages.Message):
r"""A job executed by the workflow.
Messages:
LabelsValue: Optional. The labels to associate with this job.Label keys
must be between 1 and 63 characters long, and must conform to the
following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can
be associated with a given job.
Fields:
hadoopJob: Job is a Hadoop job.
hiveJob: Job is a Hive job.
labels: Optional. The labels to associate with this job.Label keys must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following regular
expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
associated with a given job.
pigJob: Job is a Pig job.
prerequisiteStepIds: Optional. The optional list of prerequisite job
step_ids. If not specified, the job will start at the beginning of
workflow.
prestoJob: Job is a Presto job.
pysparkJob: Job is a Pyspark job.
scheduling: Optional. Job scheduling configuration.
sparkJob: Job is a Spark job.
sparkRJob: Job is a SparkR job.
sparkSqlJob: Job is a SparkSql job.
stepId: Required. The step id. The id must be unique among all jobs within
the template.The step id is used as prefix for job id, as job goog-
dataproc-workflow-step-id label, and in prerequisiteStepIds field from
other steps.The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). Cannot begin or end with underscore or
hyphen. Must consist of between 3 and 50 characters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this job.Label keys must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following regular expression:
\p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a
given job.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
hadoopJob = _messages.MessageField('HadoopJob', 1)
hiveJob = _messages.MessageField('HiveJob', 2)
labels = _messages.MessageField('LabelsValue', 3)
pigJob = _messages.MessageField('PigJob', 4)
prerequisiteStepIds = _messages.StringField(5, repeated=True)
prestoJob = _messages.MessageField('PrestoJob', 6)
pysparkJob = _messages.MessageField('PySparkJob', 7)
scheduling = _messages.MessageField('JobScheduling', 8)
sparkJob = _messages.MessageField('SparkJob', 9)
sparkRJob = _messages.MessageField('SparkRJob', 10)
sparkSqlJob = _messages.MessageField('SparkSqlJob', 11)
stepId = _messages.StringField(12)
class ParameterValidation(_messages.Message):
r"""Configuration for parameter validation.
Fields:
regex: Validation based on regular expressions.
values: Validation based on a list of allowed values.
"""
regex = _messages.MessageField('RegexValidation', 1)
values = _messages.MessageField('ValueValidation', 2)
class PigJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
Fields:
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
loggingConfig: Optional. The runtime log config for job execution.
properties: Optional. A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains the Pig queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Pig. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in
user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Pig command: name=[value]).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 3)
properties = _messages.MessageField('PropertiesValue', 4)
queryFileUri = _messages.StringField(5)
queryList = _messages.MessageField('QueryList', 6)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 7)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources.A Policy
consists of a list of bindings. A binding binds a list of members to a role,
where the members can be user accounts, Google groups, Google domains, and
service accounts. A role is a named list of permissions defined by IAM.JSON
Example { "bindings": [ { "role": "roles/owner",
"members": [ "user:<EMAIL>",
"group:<EMAIL>", "domain:google.com",
"serviceAccount:<EMAIL>" ] },
{ "role": "roles/viewer", "members": ["user:<EMAIL>"]
} ] } YAML Example bindings: - members: - user:<EMAIL> -
group:<EMAIL> - domain:google.com - serviceAccount:my-<EMAIL>-
<EMAIL> role: roles/owner - members: -
user:<EMAIL> role: roles/viewer For a description of IAM and its
features, see the IAM developer's guide (https://cloud.google.com/iam/docs).
Fields:
bindings: Associates a list of members to a role. bindings with no members
will result in an error.
etag: etag is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the etag in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An etag is returned in the response to getIamPolicy, and
systems are expected to put that etag in the request to setIamPolicy to
ensure that their change will be applied to the same version of the
policy.If no etag is provided in the call to setIamPolicy, then the
existing policy is overwritten blindly.
version: Deprecated.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class PrestoJob(_messages.Message):
r"""A Cloud Dataproc job for running Presto (https://prestosql.io/) queries
Messages:
PropertiesValue: Optional. A mapping of property names to values. Used to
set Presto session properties (https://prestodb.io/docs/current/sql/set-
session.html) Equivalent to using the --session flag in the Presto CLI
Fields:
clientTags: Optional. Presto client tags to attach to this query
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
loggingConfig: Optional. The runtime log config for job execution.
outputFormat: Optional. The format in which query output will be
displayed. See the Presto documentation for supported output formats
properties: Optional. A mapping of property names to values. Used to set
Presto session properties (https://prestodb.io/docs/current/sql/set-
session.html) Equivalent to using the --session flag in the Presto CLI
queryFileUri: The HCFS URI of the script that contains SQL queries.
queryList: A list of queries.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values. Used to set Presto
session properties (https://prestodb.io/docs/current/sql/set-session.html)
Equivalent to using the --session flag in the Presto CLI
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clientTags = _messages.StringField(1, repeated=True)
continueOnFailure = _messages.BooleanField(2)
loggingConfig = _messages.MessageField('LoggingConfig', 3)
outputFormat = _messages.StringField(4)
properties = _messages.MessageField('PropertiesValue', 5)
queryFileUri = _messages.StringField(6)
queryList = _messages.MessageField('QueryList', 7)
class PySparkJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache PySpark
(https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of Python drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
the Python driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainPythonFileUri: Required. The HCFS URI of the main Python file to use
as the driver. Must be a .py file.
properties: Optional. A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
pythonFileUris: Optional. HCFS file URIs of Python files to pass to the
PySpark framework. Supported file types: .py, .egg, and .zip.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
PySpark. Properties that conflict with values set by the Cloud Dataproc
API may be overwritten. Can include properties set in /etc/spark/conf
/spark-defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainPythonFileUri = _messages.StringField(6)
properties = _messages.MessageField('PropertiesValue', 7)
pythonFileUris = _messages.StringField(8, repeated=True)
class QueryList(_messages.Message):
r"""A list of queries to run on a cluster.
Fields:
queries: Required. The queries to execute. You do not need to terminate a
query with a semicolon. Multiple queries can be specified in one string
by separating each with a semicolon. Here is an example of an Cloud
Dataproc API snippet that uses a QueryList to specify a HiveJob:
"hiveJob": { "queryList": { "queries": [ "query1",
"query2", "query3;query4", ] } }
"""
queries = _messages.StringField(1, repeated=True)
class RegexValidation(_messages.Message):
r"""Validation based on regular expressions.
Fields:
regexes: Required. RE2 regular expressions used to validate the
parameter's value. The value must match the regex in its entirety
(substring matches are not sufficient).
"""
regexes = _messages.StringField(1, repeated=True)
class SecurityConfig(_messages.Message):
r"""Security related configuration, including encryption, Kerberos, etc.
Fields:
kerberosConfig: Kerberos related configuration.
"""
kerberosConfig = _messages.MessageField('KerberosConfig', 1)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for SetIamPolicy method.
Fields:
policy: REQUIRED: The complete policy to be applied to the resource. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class SoftwareConfig(_messages.Message):
r"""Specifies the selection and config of software inside the cluster.
Enums:
OptionalComponentsValueListEntryValuesEnum:
Messages:
PropertiesValue: Optional. The properties to set on daemon config
files.Property keys are specified in prefix:property format, such as
core:fs.defaultFS. The following are supported prefixes and their
mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
yarn: yarn-site.xmlFor more information, see Cluster properties.
Fields:
imageVersion: Optional. The version of software inside the cluster. It
must be one of the supported Cloud Dataproc Versions, such as "1.2"
(including a subminor version, such as "1.2.29"), or the "preview"
version. If unspecified, it defaults to the latest version.
optionalComponents: The set of optional components to activate on the
cluster.
properties: Optional. The properties to set on daemon config
files.Property keys are specified in prefix:property format, such as
core:fs.defaultFS. The following are supported prefixes and their
mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
yarn: yarn-site.xmlFor more information, see Cluster properties.
"""
class OptionalComponentsValueListEntryValuesEnum(_messages.Enum):
r"""OptionalComponentsValueListEntryValuesEnum enum type.
Values:
COMPONENT_UNSPECIFIED: <no description>
JUPYTER: <no description>
HIVE_WEBHCAT: <no description>
ZEPPELIN: <no description>
ANACONDA: <no description>
PRESTO: <no description>
KERBEROS: <no description>
"""
COMPONENT_UNSPECIFIED = 0
JUPYTER = 1
HIVE_WEBHCAT = 2
ZEPPELIN = 3
ANACONDA = 4
PRESTO = 5
KERBEROS = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. The properties to set on daemon config files.Property keys
are specified in prefix:property format, such as core:fs.defaultFS. The
following are supported prefixes and their mappings: capacity-scheduler:
capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml
hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig:
pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more
information, see Cluster properties.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
imageVersion = _messages.StringField(1)
optionalComponents = _messages.EnumField('OptionalComponentsValueListEntryValuesEnum', 2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
class SparkJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Spark drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of Spark drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
the Spark driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file that contains
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file that contains the main class.
properties: Optional. A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Spark. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/spark/conf/spark-
defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class SparkRJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache SparkR
(https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure SparkR. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Spark drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of R drivers and distributed tasks. Useful for naively
parallel tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainRFileUri: Required. The HCFS URI of the main R file to use as the
driver. Must be a .R file.
properties: Optional. A mapping of property names to values, used to
configure SparkR. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
SparkR. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/spark/conf/spark-
defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 4)
mainRFileUri = _messages.StringField(5)
properties = _messages.MessageField('PropertiesValue', 6)
class SparkSqlJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Spark SQL
(http://spark.apache.org/sql/) queries.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
Fields:
jarFileUris: Optional. HCFS URIs of jar files to be added to the Spark
CLASSPATH.
loggingConfig: Optional. The runtime log config for job execution.
properties: Optional. A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
queryFileUri: The HCFS URI of the script that contains SQL queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Spark SQL's SparkConf. Properties that conflict with values set by the
Cloud Dataproc API may be overwritten.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Spark SQL command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
jarFileUris = _messages.StringField(1, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 2)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The Status type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by gRPC (https://github.com/grpc). The error model is designed to be:
Simple to use and understand for most users Flexible enough to meet
unexpected needsOverviewThe Status message contains three pieces of data:
error code, error message, and error details. The error code should be an
enum value of google.rpc.Code, but it may accept additional error codes if
needed. The error message should be a developer-facing English message that
helps developers understand and resolve the error. If a localized user-
facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package google.rpc that can be used for common error
conditions.Language mappingThe Status message is the logical representation
of the error model, but it is not necessarily the actual wire format. When
the Status message is exposed in different client libraries and different
wire protocols, it can be mapped differently. For example, it will likely be
mapped to some exceptions in Java, but more likely mapped to some error
codes in C.Other usesThe error model and the Status message can be used in a
variety of environments, either with or without APIs, to provide a
consistent developer experience across different environments.Example uses
of this error model include: Partial errors. If a service needs to return
partial errors to the client, it may embed the Status in the normal response
to indicate the partial errors. Workflow errors. A typical workflow has
multiple steps. Each step may have a Status message for error reporting.
Batch operations. If a client uses batch request and batch response, the
Status message should be used directly inside batch response, one for each
error sub-response. Asynchronous operations. If an API call embeds
asynchronous operation results in its response, the status of those
operations should be represented directly using the Status message. Logging.
If some API errors are stored in logs, the message Status could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class SubmitJobRequest(_messages.Message):
r"""A request to submit a job.
Fields:
job: Required. The job resource.
requestId: Optional. A unique id used to identify the request. If the
server receives two SubmitJobRequest requests with the same id, then the
second request will be ignored and the first Job created and stored in
the backend is returned.It is recommended to always set this value to a
UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
job = _messages.MessageField('Job', 1)
requestId = _messages.StringField(2)
class TemplateParameter(_messages.Message):
r"""A configurable parameter that replaces one or more fields in the
template. Parameterizable fields: - Labels - File uris - Job properties -
Job arguments - Script variables - Main class (in HadoopJob and SparkJob) -
Zone (in ClusterSelector)
Fields:
description: Optional. Brief description of the parameter. Must not exceed
1024 characters.
fields: Required. Paths to all fields that the parameter replaces. A field
is allowed to appear in at most one parameter's list of field paths.A
field path is similar in syntax to a google.protobuf.FieldMask. For
example, a field path that references the zone field of a workflow
template's cluster selector would be specified as
placement.clusterSelector.zone.Also, field paths can reference fields
using the following syntax: Values in maps can be referenced by key:
labels'key' placement.clusterSelector.clusterLabels'key'
placement.managedCluster.labels'key'
placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key'
Jobs in the jobs list can be referenced by step-id: jobs'step-
id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs
'step-id'.pySparkJob.mainPythonFileUri jobs'step-
id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs
'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0
Items in repeated fields can be referenced by a zero-based index: jobs
'step-id'.sparkJob.args0 Other examples: jobs'step-
id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-
id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri
placement.clusterSelector.zoneIt may not be possible to parameterize
maps and repeated fields in their entirety since only individual map
values and individual items in repeated fields can be referenced. For
example, the following field paths are invalid:
placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args
name: Required. Parameter name. The parameter name is used as the key, and
paired with the parameter value, which are passed to the template when
the template is instantiated. The name must contain only capital letters
(A-Z), numbers (0-9), and underscores (_), and must not start with a
number. The maximum length is 40 characters.
validation: Optional. Validation rules to be applied to this parameter's
value.
"""
description = _messages.StringField(1)
fields = _messages.StringField(2, repeated=True)
name = _messages.StringField(3)
validation = _messages.MessageField('ParameterValidation', 4)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for TestIamPermissions method.
Fields:
permissions: The set of permissions to check for the resource. Permissions
with wildcards (such as '*' or 'storage.*') are not allowed. For more
information see IAM Overview
(https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for TestIamPermissions method.
Fields:
permissions: A subset of TestPermissionsRequest.permissions that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class ValueValidation(_messages.Message):
r"""Validation based on a list of allowed values.
Fields:
values: Required. List of allowed values for the parameter.
"""
values = _messages.StringField(1, repeated=True)
class WorkflowGraph(_messages.Message):
r"""The workflow graph.
Fields:
nodes: Output only. The workflow nodes.
"""
nodes = _messages.MessageField('WorkflowNode', 1, repeated=True)
class WorkflowMetadata(_messages.Message):
r"""A Cloud Dataproc workflow template resource.
Enums:
StateValueValuesEnum: Output only. The workflow state.
Messages:
ParametersValue: Map from parameter names to values that were used for
those parameters.
Fields:
clusterName: Output only. The name of the target cluster.
clusterUuid: Output only. The UUID of target cluster.
createCluster: Output only. The create cluster operation metadata.
deleteCluster: Output only. The delete cluster operation metadata.
endTime: Output only. Workflow end time.
graph: Output only. The workflow graph.
parameters: Map from parameter names to values that were used for those
parameters.
startTime: Output only. Workflow start time.
state: Output only. The workflow state.
template: Output only. The "resource name" of the template.
version: Output only. The version of template at the time of workflow
instantiation.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The workflow state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""Map from parameter names to values that were used for those
parameters.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
createCluster = _messages.MessageField('ClusterOperation', 3)
deleteCluster = _messages.MessageField('ClusterOperation', 4)
endTime = _messages.StringField(5)
graph = _messages.MessageField('WorkflowGraph', 6)
parameters = _messages.MessageField('ParametersValue', 7)
startTime = _messages.StringField(8)
state = _messages.EnumField('StateValueValuesEnum', 9)
template = _messages.StringField(10)
version = _messages.IntegerField(11, variant=_messages.Variant.INT32)
class WorkflowNode(_messages.Message):
r"""The workflow node.
Enums:
StateValueValuesEnum: Output only. The node state.
Fields:
error: Output only. The error detail.
jobId: Output only. The job id; populated after the node enters RUNNING
state.
prerequisiteStepIds: Output only. Node's prerequisite nodes.
state: Output only. The node state.
stepId: Output only. The name of the node.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The node state.
Values:
NODE_STATUS_UNSPECIFIED: State is unspecified.
BLOCKED: The node is awaiting prerequisite node to finish.
RUNNABLE: The node is runnable but not running.
RUNNING: The node is running.
COMPLETED: The node completed successfully.
FAILED: The node failed. A node can be marked FAILED because its
ancestor or peer failed.
"""
NODE_STATUS_UNSPECIFIED = 0
BLOCKED = 1
RUNNABLE = 2
RUNNING = 3
COMPLETED = 4
FAILED = 5
error = _messages.StringField(1)
jobId = _messages.StringField(2)
prerequisiteStepIds = _messages.StringField(3, repeated=True)
state = _messages.EnumField('StateValueValuesEnum', 4)
stepId = _messages.StringField(5)
class WorkflowTemplate(_messages.Message):
r"""A Cloud Dataproc workflow template resource.
Messages:
LabelsValue: Optional. The labels to associate with this template. These
labels will be propagated to all jobs and clusters created by the
workflow instance.Label keys must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values
may be empty, but, if present, must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
32 labels can be associated with a template.
Fields:
createTime: Output only. The time template was created.
id: Required. The template id.The id must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end
with underscore or hyphen. Must consist of between 3 and 50 characters..
jobs: Required. The Directed Acyclic Graph of Jobs to submit.
labels: Optional. The labels to associate with this template. These labels
will be propagated to all jobs and clusters created by the workflow
instance.Label keys must contain 1 to 63 characters, and must conform to
RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be
empty, but, if present, must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
32 labels can be associated with a template.
name: Output only. The "resource name" of the template, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
parameters: Optional. Template parameters whose values are substituted
into the template. Values for parameters must be provided when the
template is instantiated.
placement: Required. WorkflowTemplate scheduling information.
updateTime: Output only. The time template was last updated.
version: Optional. Used to perform a consistent read-modify-write.This
field should be left blank for a CreateWorkflowTemplate request. It is
required for an UpdateWorkflowTemplate request, and must match the
current server version. A typical update template flow would fetch the
current template with a GetWorkflowTemplate request, which will return
the current template with the version field filled in with the current
server version. The user updates other fields in the template, then
returns it as part of the UpdateWorkflowTemplate request.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this template. These labels
will be propagated to all jobs and clusters created by the workflow
instance.Label keys must contain 1 to 63 characters, and must conform to
RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty,
but, if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be
associated with a template.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
id = _messages.StringField(2)
jobs = _messages.MessageField('OrderedJob', 3, repeated=True)
labels = _messages.MessageField('LabelsValue', 4)
name = _messages.StringField(5)
parameters = _messages.MessageField('TemplateParameter', 6, repeated=True)
placement = _messages.MessageField('WorkflowTemplatePlacement', 7)
updateTime = _messages.StringField(8)
version = _messages.IntegerField(9, variant=_messages.Variant.INT32)
class WorkflowTemplatePlacement(_messages.Message):
r"""Specifies workflow execution target.Either managed_cluster or
cluster_selector is required.
Fields:
clusterSelector: Optional. A selector that chooses target cluster for jobs
based on metadata.The selector is evaluated at the time each job is
submitted.
managedCluster: Optional. A cluster that is managed by the workflow.
"""
clusterSelector = _messages.MessageField('ClusterSelector', 1)
managedCluster = _messages.MessageField('ManagedCluster', 2)
class YarnApplication(_messages.Message):
r"""A YARN application created by a job. Application information is a subset
of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</cod
e>.Beta Feature: This report is available for testing purposes only. It may
be changed before final release.
Enums:
StateValueValuesEnum: Required. The application state.
Fields:
name: Required. The application name.
progress: Required. The numerical progress of the application, from 1 to
100.
state: Required. The application state.
trackingUrl: Optional. The HTTP URL of the ApplicationMaster,
HistoryServer, or TimelineServer that provides application-specific
information. The URL uses the internal hostname, and requires a proxy
server for resolution and, possibly, access.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Required. The application state.
Values:
STATE_UNSPECIFIED: Status is unspecified.
NEW: Status is NEW.
NEW_SAVING: Status is NEW_SAVING.
SUBMITTED: Status is SUBMITTED.
ACCEPTED: Status is ACCEPTED.
RUNNING: Status is RUNNING.
FINISHED: Status is FINISHED.
FAILED: Status is FAILED.
KILLED: Status is KILLED.
"""
STATE_UNSPECIFIED = 0
NEW = 1
NEW_SAVING = 2
SUBMITTED = 3
ACCEPTED = 4
RUNNING = 5
FINISHED = 6
FAILED = 7
KILLED = 8
name = _messages.StringField(1)
progress = _messages.FloatField(2, variant=_messages.Variant.FLOAT)
state = _messages.EnumField('StateValueValuesEnum', 3)
trackingUrl = _messages.StringField(4)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
lib/googlecloudsdk/third_party/apis/dataproc/v1beta2/dataproc_v1beta2_messages.py
|
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataproc'
class AcceleratorConfig(_messages.Message):
r"""Specifies the type and number of accelerator cards attached to the
instances of an instance group (see GPUs on Compute Engine).
Fields:
acceleratorCount: The number of the accelerator cards of this type exposed
to this instance.
acceleratorTypeUri: Full URL, partial URI, or short name of the
accelerator type resource to expose to this instance. See Compute Engine
AcceleratorTypes( /compute/docs/reference/beta/acceleratorTypes)Examples
* https://www.googleapis.com/compute/beta/projects/[project_id]/zones
/us-east1-a/acceleratorTypes/nvidia-tesla-k80 *
projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80
* nvidia-tesla-k80Auto Zone Exception: If you are using the Cloud
Dataproc Auto Zone Placement feature, you must use the short name of the
accelerator type resource, for example, nvidia-tesla-k80.
"""
acceleratorCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
acceleratorTypeUri = _messages.StringField(2)
class AllocationAffinity(_messages.Message):
r"""Allocation Affinity for consuming Zonal allocation.
Enums:
ConsumeAllocationTypeValueValuesEnum:
Fields:
consumeAllocationType: A ConsumeAllocationTypeValueValuesEnum attribute.
key: Corresponds to the label key of Allocation resource.
values: Corresponds to the label values of allocation resource.
"""
class ConsumeAllocationTypeValueValuesEnum(_messages.Enum):
r"""ConsumeAllocationTypeValueValuesEnum enum type.
Values:
TYPE_UNSPECIFIED: <no description>
NO_ALLOCATION: Do not consume from any allocated capacity.
ANY_ALLOCATION: Consume any allocation available.
SPECIFIC_ALLOCATION: Must consume from a specific allocation. Must
specify key value fields for specifying the allocations.
"""
TYPE_UNSPECIFIED = 0
NO_ALLOCATION = 1
ANY_ALLOCATION = 2
SPECIFIC_ALLOCATION = 3
consumeAllocationType = _messages.EnumField('ConsumeAllocationTypeValueValuesEnum', 1)
key = _messages.StringField(2)
values = _messages.StringField(3, repeated=True)
class AutoscalingConfig(_messages.Message):
r"""Autoscaling Policy config associated with the cluster.
Fields:
policyUri: Optional. The autoscaling policy used by the cluster.Only
resource names including projectid and location (region) are valid.
Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/lo
cations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[proj
ect_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note
that the policy must be in the same project and Cloud Dataproc region.
"""
policyUri = _messages.StringField(1)
class AutoscalingPolicy(_messages.Message):
r"""Describes an autoscaling policy for Dataproc cluster autoscaler.
Fields:
basicAlgorithm: A BasicAutoscalingAlgorithm attribute.
id: Required. The policy id.The id must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end
with underscore or hyphen. Must consist of between 3 and 50 characters.
name: Output only. The "resource name" of the policy, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
secondaryWorkerConfig: Required. Describes how the autoscaler will operate
for secondary workers.
workerConfig: Required. Describes how the autoscaler will operate for
primary workers.
"""
basicAlgorithm = _messages.MessageField('BasicAutoscalingAlgorithm', 1)
id = _messages.StringField(2)
name = _messages.StringField(3)
secondaryWorkerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 4)
workerConfig = _messages.MessageField('InstanceGroupAutoscalingPolicyConfig', 5)
class BasicAutoscalingAlgorithm(_messages.Message):
r"""Basic algorithm for autoscaling.
Fields:
cooldownPeriod: Required. Cooldown time in between scaling.
yarnConfig: Required. YARN autoscaling configuration.
"""
cooldownPeriod = _messages.StringField(1)
yarnConfig = _messages.MessageField('BasicYarnAutoscalingConfig', 2)
class BasicYarnAutoscalingConfig(_messages.Message):
r"""Basic autoscaling configurations for YARN.
Fields:
gracefulDecommissionTimeout: Optional. Timeout used during an autoscaling
event (cluster update) between 0 seconds (no graceful decommission) and
1 day.Default: 0s.
scaleDownFactor: Optional. Fraction of suggested decrease in workers to
scale down by between 0 and 1. Suggested decrease when scaling down is
determined by the amount of average available memory since the last
cooldown period.Default: 1.0.
scaleDownMinWorkerFraction: Optional. Minimum workers as a fraction of the
current cluster size to to scale down by between 0 and 1.Default: 0.0.
scaleUpFactor: Required. Fraction of suggested increase in workers to
scale up by between 0 and 1. Suggested increase when scaling up is
determined by the amount of average pending memory since the last
cooldown period.
scaleUpMinWorkerFraction: Optional. Minimum workers as a fraction of the
current cluster size to to scale up by between 0 and 1.Default: 0.0.
"""
gracefulDecommissionTimeout = _messages.StringField(1)
scaleDownFactor = _messages.FloatField(2)
scaleDownMinWorkerFraction = _messages.FloatField(3)
scaleUpFactor = _messages.FloatField(4)
scaleUpMinWorkerFraction = _messages.FloatField(5)
class Binding(_messages.Message):
r"""Associates members with a role.
Fields:
condition: Unimplemented. The condition that is associated with this
binding. NOTE: an unsatisfied condition will not allow user access via
current binding. Different bindings, including their conditions, are
examined independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. members can have the following values: allUsers: A special
identifier that represents anyone who is on the internet; with or
without a Google account. allAuthenticatedUsers: A special identifier
that represents anyone who is authenticated with a Google account or a
service account. user:{emailid}: An email address that represents a
specific Google account. For example, <EMAIL> .
serviceAccount:{emailid}: An email address that represents a service
account. For example, <EMAIL>.
group:{emailid}: An email address that represents a Google group. For
example, <EMAIL>. domain:{domain}: A Google Apps domain name
that represents all the users of that domain. For example, google.com
or example.com.
role: Role that is assigned to members. For example, roles/viewer,
roles/editor, or roles/owner.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelJobRequest(_messages.Message):
r"""A request to cancel a job."""
class Cluster(_messages.Message):
r"""Describes the identifying information, config, and status of a cluster
of Compute Engine instances.
Messages:
LabelsValue: Optional. The labels to associate with this cluster. Label
keys must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
Fields:
clusterName: Required. The cluster name. Cluster names within a project
must be unique. Names of deleted clusters can be reused.
clusterUuid: Output only. A cluster UUID (Unique Universal Identifier).
Cloud Dataproc generates this value when it creates the cluster.
config: Required. The cluster config. Note that Cloud Dataproc may set
default values, and values may change when clusters are updated.
labels: Optional. The labels to associate with this cluster. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
metrics: Output only. Contains cluster daemon metrics such as HDFS and
YARN stats.Beta Feature: This report is available for testing purposes
only. It may be changed before final release.
projectId: Required. The Google Cloud Platform project ID that the cluster
belongs to.
status: Output only. Cluster status.
statusHistory: Output only. The previous cluster status.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this cluster. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a cluster.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
config = _messages.MessageField('ClusterConfig', 3)
labels = _messages.MessageField('LabelsValue', 4)
metrics = _messages.MessageField('ClusterMetrics', 5)
projectId = _messages.StringField(6)
status = _messages.MessageField('ClusterStatus', 7)
statusHistory = _messages.MessageField('ClusterStatus', 8, repeated=True)
class ClusterConfig(_messages.Message):
r"""The cluster config.
Fields:
autoscalingConfig: Optional. Autoscaling config for the policy associated
with the cluster. Cluster does not autoscale if this field is unset.
configBucket: Optional. A Cloud Storage staging bucket used for sharing
generated SSH keys and config. If you do not specify a staging bucket,
Cloud Dataproc will determine an appropriate Cloud Storage location (US,
ASIA, or EU) for your cluster's staging bucket according to the Google
Compute Engine zone where your cluster is deployed, and then it will
create and manage this project-level, per-location bucket for you.
encryptionConfig: Optional. Encryption settings for the cluster.
endpointConfig: Optional. Port/endpoint configuration for this cluster
gceClusterConfig: Required. The shared Compute Engine config settings for
all instances in a cluster.
initializationActions: Optional. Commands to execute on each node after
config is completed. By default, executables are run on master and all
worker nodes. You can test a node's <code>role</code> metadata to run an
executable on a master or worker node, as shown below using curl (you
can also use wget): ROLE=$(curl -H Metadata-Flavor:Google
http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-
role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions
... else ... worker specific actions ... fi
lifecycleConfig: Optional. The config setting for auto delete cluster
schedule.
masterConfig: Optional. The Compute Engine config settings for the master
instance in a cluster.
secondaryWorkerConfig: Optional. The Compute Engine config settings for
additional worker instances in a cluster.
securityConfig: Optional. Security related configuration.
softwareConfig: Optional. The config settings for software inside the
cluster.
workerConfig: Optional. The Compute Engine config settings for worker
instances in a cluster.
"""
autoscalingConfig = _messages.MessageField('AutoscalingConfig', 1)
configBucket = _messages.StringField(2)
encryptionConfig = _messages.MessageField('EncryptionConfig', 3)
endpointConfig = _messages.MessageField('EndpointConfig', 4)
gceClusterConfig = _messages.MessageField('GceClusterConfig', 5)
initializationActions = _messages.MessageField('NodeInitializationAction', 6, repeated=True)
lifecycleConfig = _messages.MessageField('LifecycleConfig', 7)
masterConfig = _messages.MessageField('InstanceGroupConfig', 8)
secondaryWorkerConfig = _messages.MessageField('InstanceGroupConfig', 9)
securityConfig = _messages.MessageField('SecurityConfig', 10)
softwareConfig = _messages.MessageField('SoftwareConfig', 11)
workerConfig = _messages.MessageField('InstanceGroupConfig', 12)
class ClusterMetrics(_messages.Message):
r"""Contains cluster daemon metrics, such as HDFS and YARN stats.Beta
Feature: This report is available for testing purposes only. It may be
changed before final release.
Messages:
HdfsMetricsValue: The HDFS metrics.
YarnMetricsValue: The YARN metrics.
Fields:
hdfsMetrics: The HDFS metrics.
yarnMetrics: The YARN metrics.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HdfsMetricsValue(_messages.Message):
r"""The HDFS metrics.
Messages:
AdditionalProperty: An additional property for a HdfsMetricsValue
object.
Fields:
additionalProperties: Additional properties of type HdfsMetricsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HdfsMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class YarnMetricsValue(_messages.Message):
r"""The YARN metrics.
Messages:
AdditionalProperty: An additional property for a YarnMetricsValue
object.
Fields:
additionalProperties: Additional properties of type YarnMetricsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a YarnMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
hdfsMetrics = _messages.MessageField('HdfsMetricsValue', 1)
yarnMetrics = _messages.MessageField('YarnMetricsValue', 2)
class ClusterOperation(_messages.Message):
r"""The cluster operation triggered by a workflow.
Fields:
done: Output only. Indicates the operation is done.
error: Output only. Error, if operation failed.
operationId: Output only. The id of the cluster operation.
"""
done = _messages.BooleanField(1)
error = _messages.StringField(2)
operationId = _messages.StringField(3)
class ClusterOperationMetadata(_messages.Message):
r"""Metadata describing the operation.
Messages:
LabelsValue: Output only. Labels associated with the operation
Fields:
clusterName: Output only. Name of the cluster for the operation.
clusterUuid: Output only. Cluster UUID for the operation.
description: Output only. Short description of operation.
labels: Output only. Labels associated with the operation
operationType: Output only. The operation type.
status: Output only. Current operation status.
statusHistory: Output only. The previous operation status.
warnings: Output only. Errors encountered during operation execution.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Output only. Labels associated with the operation
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
description = _messages.StringField(3)
labels = _messages.MessageField('LabelsValue', 4)
operationType = _messages.StringField(5)
status = _messages.MessageField('ClusterOperationStatus', 6)
statusHistory = _messages.MessageField('ClusterOperationStatus', 7, repeated=True)
warnings = _messages.StringField(8, repeated=True)
class ClusterOperationStatus(_messages.Message):
r"""The status of the operation.
Enums:
StateValueValuesEnum: Output only. A message containing the operation
state.
Fields:
details: Output only. A message containing any operation metadata details.
innerState: Output only. A message containing the detailed operation
state.
state: Output only. A message containing the operation state.
stateStartTime: Output only. The time this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. A message containing the operation state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
details = _messages.StringField(1)
innerState = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
stateStartTime = _messages.StringField(4)
class ClusterSelector(_messages.Message):
r"""A selector that chooses target cluster for jobs based on metadata.
Messages:
ClusterLabelsValue: Required. The cluster labels. Cluster must have all
labels to match.
Fields:
clusterLabels: Required. The cluster labels. Cluster must have all labels
to match.
zone: Optional. The zone where workflow process executes. This parameter
does not affect the selection of the cluster.If unspecified, the zone of
the first cluster matching the selector is used.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ClusterLabelsValue(_messages.Message):
r"""Required. The cluster labels. Cluster must have all labels to match.
Messages:
AdditionalProperty: An additional property for a ClusterLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ClusterLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ClusterLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterLabels = _messages.MessageField('ClusterLabelsValue', 1)
zone = _messages.StringField(2)
class ClusterStatus(_messages.Message):
r"""The status of a cluster and its instances.
Enums:
StateValueValuesEnum: Output only. The cluster's state.
SubstateValueValuesEnum: Output only. Additional state information that
includes status reported by the agent.
Fields:
detail: Output only. Optional details of cluster's state.
state: Output only. The cluster's state.
stateStartTime: Output only. Time when this state was entered.
substate: Output only. Additional state information that includes status
reported by the agent.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The cluster's state.
Values:
UNKNOWN: The cluster state is unknown.
CREATING: The cluster is being created and set up. It is not ready for
use.
RUNNING: The cluster is currently running and healthy. It is ready for
use.
ERROR: The cluster encountered an error. It is not ready for use.
DELETING: The cluster is being deleted. It cannot be used.
UPDATING: The cluster is being updated. It continues to accept and
process jobs.
"""
UNKNOWN = 0
CREATING = 1
RUNNING = 2
ERROR = 3
DELETING = 4
UPDATING = 5
class SubstateValueValuesEnum(_messages.Enum):
r"""Output only. Additional state information that includes status
reported by the agent.
Values:
UNSPECIFIED: The cluster substate is unknown.
UNHEALTHY: The cluster is known to be in an unhealthy state (for
example, critical daemons are not running or HDFS capacity is
exhausted).Applies to RUNNING state.
STALE_STATUS: The agent-reported status is out of date (may occur if
Cloud Dataproc loses communication with Agent).Applies to RUNNING
state.
"""
UNSPECIFIED = 0
UNHEALTHY = 1
STALE_STATUS = 2
detail = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
substate = _messages.EnumField('SubstateValueValuesEnum', 4)
class DataprocProjectsLocationsAutoscalingPoliciesCreateRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesCreateRequest object.
Fields:
autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
request body.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}.
"""
autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
parent = _messages.StringField(2, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesGetRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesGetRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsLocationsAutoscalingPoliciesListRequest(_messages.Message):
r"""A DataprocProjectsLocationsAutoscalingPoliciesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsLocationsWorkflowTemplatesCreateRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesCreateRequest object.
Fields:
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
parent = _messages.StringField(1, required=True)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)
class DataprocProjectsLocationsWorkflowTemplatesDeleteRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesDeleteRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to delete. If
specified, will only delete the template if the current server version
matches specified version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsLocationsWorkflowTemplatesGetRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesGetRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to retrieve. Only
previously instatiated versions can be retrieved.If unspecified,
retrieves the current version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest
object.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parent: Required. The "resource name" of the workflow template region, as
described in https://cloud.google.com/apis/design/resource_names of the
form projects/{project_id}/regions/{region}
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
instanceId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 4)
class DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest object.
Fields:
instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
resource to be passed as the request body.
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
"""
instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
name = _messages.StringField(2, required=True)
class DataprocProjectsLocationsWorkflowTemplatesListRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsAutoscalingPoliciesCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesCreateRequest object.
Fields:
autoscalingPolicy: A AutoscalingPolicy resource to be passed as the
request body.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}.
"""
autoscalingPolicy = _messages.MessageField('AutoscalingPolicy', 1)
parent = _messages.StringField(2, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesGetRequest object.
Fields:
name: Required. The "resource name" of the autoscaling policy, as
described in https://cloud.google.com/apis/design/resource_names of the
form
projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsAutoscalingPoliciesListRequest(_messages.Message):
r"""A DataprocProjectsRegionsAutoscalingPoliciesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersCreateRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two CreateClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
cluster = _messages.MessageField('Cluster', 1)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
requestId = _messages.StringField(4)
class DataprocProjectsRegionsClustersDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersDeleteRequest object.
Fields:
clusterName: Required. The cluster name.
clusterUuid: Optional. Specifying the cluster_uuid means the RPC should
fail (with error NOT_FOUND) if cluster with specified UUID does not
exist.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two DeleteClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
clusterName = _messages.StringField(1, required=True)
clusterUuid = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
requestId = _messages.StringField(5)
class DataprocProjectsRegionsClustersDiagnoseRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersDiagnoseRequest object.
Fields:
clusterName: Required. The cluster name.
diagnoseClusterRequest: A DiagnoseClusterRequest resource to be passed as
the request body.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
clusterName = _messages.StringField(1, required=True)
diagnoseClusterRequest = _messages.MessageField('DiagnoseClusterRequest', 2)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsClustersGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsClustersGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersGetRequest object.
Fields:
clusterName: Required. The cluster name.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
clusterName = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersListRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersListRequest object.
Fields:
filter: Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:field = value AND field =
value ...where field is one of status.state, clusterName, or
labels.[KEY], and [KEY] is a label key. value can be * to match all
values. status.state can be one of the following: ACTIVE, INACTIVE,
CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the
CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING
and ERROR states. clusterName is the name of the cluster provided at
creation time. Only the logical AND operator is supported; space-
separated items are treated as having an implicit AND operator.Example
filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env
= staging AND labels.starred = *
pageSize: Optional. The standard List page size.
pageToken: Optional. The standard List page token.
projectId: Required. The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
region = _messages.StringField(5, required=True)
class DataprocProjectsRegionsClustersPatchRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersPatchRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
clusterName: Required. The cluster name.
gracefulDecommissionTimeout: Optional. Timeout for graceful YARN
decomissioning. Graceful decommissioning allows removing nodes from the
cluster without interrupting jobs in progress. Timeout specifies how
long to wait for jobs in progress to finish before forcefully removing
nodes (and potentially interrupting jobs). Default timeout is 0 (for
forceful decommission), and the maximum allowed timeout is 1 day.Only
supported on Dataproc image versions 1.2 and higher.
projectId: Required. The ID of the Google Cloud Platform project the
cluster belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
requestId: Optional. A unique id used to identify the request. If the
server receives two UpdateClusterRequest requests with the same id, then
the second request will be ignored and the first
google.longrunning.Operation created and stored in the backend is
returned.It is recommended to always set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The id
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
updateMask: Required. Specifies the path, relative to Cluster, of the
field to update. For example, to change the number of workers in a
cluster to 5, the update_mask parameter would be specified as
config.worker_config.num_instances, and the PATCH request body would
specify the new value, as follows: { "config":{ "workerConfig":{
"numInstances":"5" } } } Similarly, to change the number of
preemptible workers in a cluster to 5, the update_mask parameter would
be config.secondary_worker_config.num_instances, and the PATCH request
body would be set as follows: { "config":{
"secondaryWorkerConfig":{ "numInstances":"5" } } }
<strong>Note:</strong> currently only the following fields can be
updated: <table> <tr>
<td><strong>Mask</strong></td><td><strong>Purpose</strong></td> </tr>
<tr> <td>labels</td><td>Updates labels</td> </tr> <tr>
<td>config.worker_config.num_instances</td><td>Resize primary worker
group</td> </tr> <tr>
<td>config.secondary_worker_config.num_instances</td><td>Resize
secondary worker group</td> </tr> <tr>
<td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
duration</td> </tr> <tr>
<td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
deletion timestamp</td> </tr> <tr>
<td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
duration</td> </tr> </table>
"""
cluster = _messages.MessageField('Cluster', 1)
clusterName = _messages.StringField(2, required=True)
gracefulDecommissionTimeout = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
region = _messages.StringField(5, required=True)
requestId = _messages.StringField(6)
updateMask = _messages.StringField(7)
class DataprocProjectsRegionsClustersSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsClustersTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsClustersTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsJobsCancelRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsCancelRequest object.
Fields:
cancelJobRequest: A CancelJobRequest resource to be passed as the request
body.
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
cancelJobRequest = _messages.MessageField('CancelJobRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsJobsDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsDeleteRequest object.
Fields:
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsJobsGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsGetRequest object.
Fields:
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsListRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsListRequest object.
Enums:
JobStateMatcherValueValuesEnum: Optional. Specifies enumerated categories
of jobs to list. (default = match ALL jobs).If filter is provided,
jobStateMatcher will be ignored.
Fields:
clusterName: Optional. If set, the returned jobs list includes only jobs
that were submitted to the named cluster.
filter: Optional. A filter constraining the jobs to list. Filters are
case-sensitive and have the following syntax:field = value AND field =
value ...where field is status.state or labels.[KEY], and [KEY] is a
label key. value can be * to match all values. status.state can be
either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported;
space-separated items are treated as having an implicit AND
operator.Example filter:status.state = ACTIVE AND labels.env = staging
AND labels.starred = *
jobStateMatcher: Optional. Specifies enumerated categories of jobs to
list. (default = match ALL jobs).If filter is provided, jobStateMatcher
will be ignored.
pageSize: Optional. The number of results to return in each response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
"""
class JobStateMatcherValueValuesEnum(_messages.Enum):
r"""Optional. Specifies enumerated categories of jobs to list. (default =
match ALL jobs).If filter is provided, jobStateMatcher will be ignored.
Values:
ALL: <no description>
ACTIVE: <no description>
NON_ACTIVE: <no description>
"""
ALL = 0
ACTIVE = 1
NON_ACTIVE = 2
clusterName = _messages.StringField(1)
filter = _messages.StringField(2)
jobStateMatcher = _messages.EnumField('JobStateMatcherValueValuesEnum', 3)
pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(5)
projectId = _messages.StringField(6, required=True)
region = _messages.StringField(7, required=True)
class DataprocProjectsRegionsJobsPatchRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsPatchRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: Required. The job ID.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
updateMask: Required. Specifies the path, relative to <code>Job</code>, of
the field to update. For example, to update the labels of a Job the
<code>update_mask</code> parameter would be specified as
<code>labels</code>, and the PATCH request body would specify the new
value. <strong>Note:</strong> Currently, <code>labels</code> is the only
field that can be updated.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
updateMask = _messages.StringField(5)
class DataprocProjectsRegionsJobsSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsJobsSubmitRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsSubmitRequest object.
Fields:
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
region: Required. The Cloud Dataproc region in which to handle the
request.
submitJobRequest: A SubmitJobRequest resource to be passed as the request
body.
"""
projectId = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)
class DataprocProjectsRegionsJobsTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsJobsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsOperationsCancelRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsListRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class DataprocProjectsRegionsOperationsSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsOperationsTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsOperationsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DataprocProjectsRegionsWorkflowTemplatesCreateRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesCreateRequest object.
Fields:
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
parent = _messages.StringField(1, required=True)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 2)
class DataprocProjectsRegionsWorkflowTemplatesDeleteRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesDeleteRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to delete. If
specified, will only delete the template if the current server version
matches specified version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DataprocProjectsRegionsWorkflowTemplatesGetRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesGetRequest object.
Fields:
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
version: Optional. The version of workflow template to retrieve. Only
previously instatiated versions can be retrieved.If unspecified,
retrieves the current version.
"""
name = _messages.StringField(1, required=True)
version = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest
object.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parent: Required. The "resource name" of the workflow template region, as
described in https://cloud.google.com/apis/design/resource_names of the
form projects/{project_id}/regions/{region}
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
workflowTemplate: A WorkflowTemplate resource to be passed as the request
body.
"""
instanceId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
workflowTemplate = _messages.MessageField('WorkflowTemplate', 4)
class DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest object.
Fields:
instantiateWorkflowTemplateRequest: A InstantiateWorkflowTemplateRequest
resource to be passed as the request body.
name: Required. The "resource name" of the workflow template, as described
in https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
"""
instantiateWorkflowTemplateRequest = _messages.MessageField('InstantiateWorkflowTemplateRequest', 1)
name = _messages.StringField(2, required=True)
class DataprocProjectsRegionsWorkflowTemplatesListRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesListRequest object.
Fields:
pageSize: Optional. The maximum number of results to return in each
response.
pageToken: Optional. The page token, returned by a previous call, to
request the next page of results.
parent: Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest(_messages.Message):
r"""A DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DiagnoseClusterRequest(_messages.Message):
r"""A request to collect cluster diagnostic information."""
class DiagnoseClusterResults(_messages.Message):
r"""The location of diagnostic output.
Fields:
outputUri: Output only. The Cloud Storage URI of the diagnostic output.
The output report is a plain text file with a summary of collected
diagnostics.
"""
outputUri = _messages.StringField(1)
class DiskConfig(_messages.Message):
r"""Specifies the config of disk options for a group of VM instances.
Fields:
bootDiskSizeGb: Optional. Size in GB of the boot disk (default is 500GB).
bootDiskType: Optional. Type of the boot disk (default is "pd-standard").
Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-
standard" (Persistent Disk Hard Disk Drive).
numLocalSsds: Optional. Number of attached SSDs, from 0 to 4 (default is
0). If SSDs are not attached, the boot disk is used to store runtime
logs and HDFS
(https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If
one or more SSDs are attached, this runtime bulk data is spread across
them, and the boot disk contains only basic config and installed
binaries.
"""
bootDiskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
bootDiskType = _messages.StringField(2)
numLocalSsds = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for Empty is empty JSON object {}.
"""
class EncryptionConfig(_messages.Message):
r"""Encryption settings for the cluster.
Fields:
gcePdKmsKeyName: Optional. The Cloud KMS key name to use for PD disk
encryption for all instances in the cluster.
"""
gcePdKmsKeyName = _messages.StringField(1)
class EndpointConfig(_messages.Message):
r"""Endpoint config for this cluster
Messages:
HttpPortsValue: Output only. The map of port descriptions to URLs. Will
only be populated if enable_http_port_access is true.
Fields:
enableHttpPortAccess: Optional. If true, enable http access to specific
ports on the cluster from external sources. Defaults to false.
httpPorts: Output only. The map of port descriptions to URLs. Will only be
populated if enable_http_port_access is true.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HttpPortsValue(_messages.Message):
r"""Output only. The map of port descriptions to URLs. Will only be
populated if enable_http_port_access is true.
Messages:
AdditionalProperty: An additional property for a HttpPortsValue object.
Fields:
additionalProperties: Additional properties of type HttpPortsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HttpPortsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
enableHttpPortAccess = _messages.BooleanField(1)
httpPorts = _messages.MessageField('HttpPortsValue', 2)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account presence"
description: "Determines whether the request has a user account" expression:
"size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GceClusterConfig(_messages.Message):
r"""Common config settings for resources of Compute Engine cluster
instances, applicable to all instances in the cluster.
Messages:
MetadataValue: The Compute Engine metadata entries to add to all instances
(see Project and instance metadata
(https://cloud.google.com/compute/docs/storing-retrieving-
metadata#project_and_instance_metadata)).
Fields:
allocationAffinity: Allocation Affinity for consuming Zonal allocation.
internalIpOnly: Optional. If true, all instances in the cluster will only
have internal IP addresses. By default, clusters are not restricted to
internal IP addresses, and will have ephemeral external IP addresses
assigned to each instance. This internal_ip_only restriction can only be
enabled for subnetwork enabled networks, and all off-cluster
dependencies must be configured to be accessible without external IP
addresses.
metadata: The Compute Engine metadata entries to add to all instances (see
Project and instance metadata (https://cloud.google.com/compute/docs
/storing-retrieving-metadata#project_and_instance_metadata)).
networkUri: Optional. The Compute Engine network to be used for machine
communications. Cannot be specified with subnetwork_uri. If neither
network_uri nor subnetwork_uri is specified, the "default" network of
the project is used, if it exists. Cannot be a "Custom Subnet Network"
(see Using Subnetworks for more information).A full URL, partial URI, or
short name are valid. Examples: https://www.googleapis.com/compute/v1/pr
ojects/[project_id]/regions/global/default
projects/[project_id]/regions/global/default default
serviceAccount: Optional. The service account of the instances. Defaults
to the default Compute Engine service account. Custom service accounts
need permissions equivalent to the following IAM roles:
roles/logging.logWriter roles/storage.objectAdmin(see
https://cloud.google.com/compute/docs/access/service-
accounts#custom_service_accounts for more information). Example:
[account_id]@[project_id].iam.gserviceaccount.com
serviceAccountScopes: Optional. The URIs of service account scopes to be
included in Compute Engine instances. The following base set of scopes
is always included:
https://www.googleapis.com/auth/cloud.useraccounts.readonly
https://www.googleapis.com/auth/devstorage.read_write
https://www.googleapis.com/auth/logging.writeIf no scopes are specified,
the following defaults are also provided:
https://www.googleapis.com/auth/bigquery
https://www.googleapis.com/auth/bigtable.admin.table
https://www.googleapis.com/auth/bigtable.data
https://www.googleapis.com/auth/devstorage.full_control
subnetworkUri: Optional. The Compute Engine subnetwork to be used for
machine communications. Cannot be specified with network_uri.A full URL,
partial URI, or short name are valid. Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-
east1/sub0 projects/[project_id]/regions/us-east1/sub0 sub0
tags: The Compute Engine tags to add to all instances (see Tagging
instances).
zoneUri: Optional. The zone where the Compute Engine cluster will be
located. On a create request, it is required in the "global" region. If
omitted in a non-global Cloud Dataproc region, the service will pick a
zone in the corresponding Compute Engine region. On a get request, zone
will always be present.A full URL, partial URI, or short name are valid.
Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]
projects/[project_id]/zones/[zone] us-central1-f
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""The Compute Engine metadata entries to add to all instances (see
Project and instance metadata (https://cloud.google.com/compute/docs
/storing-retrieving-metadata#project_and_instance_metadata)).
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allocationAffinity = _messages.MessageField('AllocationAffinity', 1)
internalIpOnly = _messages.BooleanField(2)
metadata = _messages.MessageField('MetadataValue', 3)
networkUri = _messages.StringField(4)
serviceAccount = _messages.StringField(5)
serviceAccountScopes = _messages.StringField(6, repeated=True)
subnetworkUri = _messages.StringField(7)
tags = _messages.StringField(8, repeated=True)
zoneUri = _messages.StringField(9)
class GetIamPolicyRequest(_messages.Message):
r"""Request message for GetIamPolicy method."""
class HadoopJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Hadoop MapReduce
(https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-
mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN
(https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-
site/YARN.html).
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Hadoop drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, or .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as -libjars or -Dfoo=bar, that can be set as job
properties, since a collision may occur that causes an incorrect job
submission.
fileUris: Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to
be copied to the working directory of Hadoop drivers and distributed
tasks. Useful for naively parallel tasks.
jarFileUris: Optional. Jar file URIs to add to the CLASSPATHs of the
Hadoop driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file containing
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file containing the main class.
Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-
mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar'
'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
properties: Optional. A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Hadoop. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/hadoop/conf/*-site
and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class HiveJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
Fields:
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
and UDFs.
properties: Optional. A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains Hive queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names and values, used to configure
Hive. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in
user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Hive command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class InstanceGroupAutoscalingPolicyConfig(_messages.Message):
r"""Configuration for the size bounds of an instance group, including its
proportional size to other groups.
Fields:
maxInstances: Required. Maximum number of instances for this group. Must
be >= min_instances.
minInstances: Optional. Minimum number of instances for this group.Default
for primary workers is 2, default for secondary workers is 0.
weight: Optional. Weight for instance group. Determines fraction of total
workers in cluster that will be composed of instances from this instance
group (e.g. if primary workers have weight 2 and secondary workers have
weight 1, then the cluster should have approximately 2 primary workers
to each secondary worker. Cluster may not reach these exact weights if
constrained by min/max bounds or other autoscaling
configurations.Default 1. Note that all groups have equal an equal
weight by default, so the cluster will attempt to maintain an equal
number of workers in each group within configured size bounds per group.
"""
maxInstances = _messages.IntegerField(1, variant=_messages.Variant.INT32)
minInstances = _messages.IntegerField(2, variant=_messages.Variant.INT32)
weight = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class InstanceGroupConfig(_messages.Message):
r"""Optional. The config settings for Compute Engine resources in an
instance group, such as a master or worker group.
Fields:
accelerators: Optional. The Compute Engine accelerator configuration for
these instances.Beta Feature: This feature is still under development.
It may be changed before final release.
diskConfig: Optional. Disk option config settings.
imageUri: Optional. The Compute Engine image resource used for cluster
instances. It can be specified or may be inferred from
SoftwareConfig.image_version.
instanceNames: Output only. The list of instance names. Cloud Dataproc
derives the names from cluster_name, num_instances, and the instance
group.
isPreemptible: Optional. Specifies that this instance group contains
preemptible instances.
machineTypeUri: Optional. The Compute Engine machine type used for cluster
instances.A full URL, partial URI, or short name are valid. Examples:
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2 projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If
you are using the Cloud Dataproc Auto Zone Placement feature, you must
use the short name of the machine type resource, for example,
n1-standard-2.
managedGroupConfig: Output only. The config for Compute Engine Instance
Group Manager that manages this group. This is only used for preemptible
instance groups.
minCpuPlatform: Optional. Specifies the minimum cpu platform for the
Instance Group. See Cloud Dataproc→Minimum CPU Platform.
numInstances: Optional. The number of VM instances in the instance group.
For master instance groups, must be set to 1.
"""
accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
diskConfig = _messages.MessageField('DiskConfig', 2)
imageUri = _messages.StringField(3)
instanceNames = _messages.StringField(4, repeated=True)
isPreemptible = _messages.BooleanField(5)
machineTypeUri = _messages.StringField(6)
managedGroupConfig = _messages.MessageField('ManagedGroupConfig', 7)
minCpuPlatform = _messages.StringField(8)
numInstances = _messages.IntegerField(9, variant=_messages.Variant.INT32)
class InstantiateWorkflowTemplateRequest(_messages.Message):
r"""A request to instantiate a workflow template.
Messages:
ParametersValue: Optional. Map from parameter names to values that should
be used for those parameters. Values may not exceed 100 characters.
Fields:
instanceId: Deprecated. Please use request_id field instead.
parameters: Optional. Map from parameter names to values that should be
used for those parameters. Values may not exceed 100 characters.
requestId: Optional. A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.It is recommended to always
set this value to a UUID
(https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag
must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
version: Optional. The version of workflow template to instantiate. If
specified, the workflow will be instantiated only if the current version
of the workflow template has the supplied version.This option cannot be
used to instantiate a previous version of workflow template.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""Optional. Map from parameter names to values that should be used for
those parameters. Values may not exceed 100 characters.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
instanceId = _messages.StringField(1)
parameters = _messages.MessageField('ParametersValue', 2)
requestId = _messages.StringField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class Job(_messages.Message):
r"""A Cloud Dataproc job resource.
Messages:
LabelsValue: Optional. The labels to associate with this job. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
Fields:
driverControlFilesUri: Output only. If present, the location of
miscellaneous control files which may be used as part of job setup and
handling. If not present, control files may be placed in the same
location as driver_output_uri.
driverOutputResourceUri: Output only. A URI pointing to the location of
the stdout of the job's driver program.
hadoopJob: Job is a Hadoop job.
hiveJob: Job is a Hive job.
jobUuid: Output only. A UUID that uniquely identifies a job within the
project over time. This is in contrast to a user-settable
reference.job_id that may be reused over time.
labels: Optional. The labels to associate with this job. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
pigJob: Job is a Pig job.
placement: Required. Job information, including how, when, and where to
run the job.
prestoJob: Job is a Presto job
pysparkJob: Job is a Pyspark job.
reference: Optional. The fully qualified reference to the job, which can
be used to obtain the equivalent REST path of the job resource. If this
property is not specified when a job is created, the server generates a
<code>job_id</code>.
scheduling: Optional. Job scheduling configuration.
sparkJob: Job is a Spark job.
sparkRJob: Job is a SparkR job.
sparkSqlJob: Job is a SparkSql job.
status: Output only. The job status. Additional application-specific
status information may be contained in the <code>type_job</code> and
<code>yarn_applications</code> fields.
statusHistory: Output only. The previous job status.
submittedBy: Output only. The email address of the user submitting the
job. For jobs submitted on the cluster, the address is
<code>username@hostname</code>.
yarnApplications: Output only. The collection of YARN applications spun up
by this job.Beta Feature: This report is available for testing purposes
only. It may be changed before final release.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this job. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a job.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverControlFilesUri = _messages.StringField(1)
driverOutputResourceUri = _messages.StringField(2)
hadoopJob = _messages.MessageField('HadoopJob', 3)
hiveJob = _messages.MessageField('HiveJob', 4)
jobUuid = _messages.StringField(5)
labels = _messages.MessageField('LabelsValue', 6)
pigJob = _messages.MessageField('PigJob', 7)
placement = _messages.MessageField('JobPlacement', 8)
prestoJob = _messages.MessageField('PrestoJob', 9)
pysparkJob = _messages.MessageField('PySparkJob', 10)
reference = _messages.MessageField('JobReference', 11)
scheduling = _messages.MessageField('JobScheduling', 12)
sparkJob = _messages.MessageField('SparkJob', 13)
sparkRJob = _messages.MessageField('SparkRJob', 14)
sparkSqlJob = _messages.MessageField('SparkSqlJob', 15)
status = _messages.MessageField('JobStatus', 16)
statusHistory = _messages.MessageField('JobStatus', 17, repeated=True)
submittedBy = _messages.StringField(18)
yarnApplications = _messages.MessageField('YarnApplication', 19, repeated=True)
class JobPlacement(_messages.Message):
r"""Cloud Dataproc job config.
Fields:
clusterName: Required. The name of the cluster where the job will be
submitted.
clusterUuid: Output only. A cluster UUID generated by the Cloud Dataproc
service when the job is submitted.
"""
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
class JobReference(_messages.Message):
r"""Encapsulates the full scoping used to reference a job.
Fields:
jobId: Optional. The job ID, which must be unique within the project. The
job ID is generated by the server upon job submission or provided by the
user as a means to perform retries without creating duplicate jobs. The
ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
or hyphens (-). The maximum length is 100 characters.
projectId: Required. The ID of the Google Cloud Platform project that the
job belongs to.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobScheduling(_messages.Message):
r"""Job scheduling options.
Fields:
maxFailuresPerHour: Optional. Maximum number of times per hour a driver
may be restarted as a result of driver terminating with non-zero code
before job is reported failed.A job may be reported as thrashing if
driver exits with non-zero code 4 times within 10 minute window.Maximum
value is 10.
"""
maxFailuresPerHour = _messages.IntegerField(1, variant=_messages.Variant.INT32)
class JobStatus(_messages.Message):
r"""Cloud Dataproc job status.
Enums:
StateValueValuesEnum: Output only. A state message specifying the overall
job state.
SubstateValueValuesEnum: Output only. Additional state information, which
includes status reported by the agent.
Fields:
details: Output only. Optional job state details, such as an error
description if the state is <code>ERROR</code>.
state: Output only. A state message specifying the overall job state.
stateStartTime: Output only. The time when this state was entered.
substate: Output only. Additional state information, which includes status
reported by the agent.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. A state message specifying the overall job state.
Values:
STATE_UNSPECIFIED: The job state is unknown.
PENDING: The job is pending; it has been submitted, but is not yet
running.
SETUP_DONE: Job has been received by the service and completed initial
setup; it will soon be submitted to the cluster.
RUNNING: The job is running on the cluster.
CANCEL_PENDING: A CancelJob request has been received, but is pending.
CANCEL_STARTED: Transient in-flight resources have been canceled, and
the request to cancel the running job has been issued to the cluster.
CANCELLED: The job cancellation was successful.
DONE: The job has completed successfully.
ERROR: The job has completed, but encountered an error.
ATTEMPT_FAILURE: Job attempt has failed. The detail field contains
failure details for this attempt.Applies to restartable jobs only.
"""
STATE_UNSPECIFIED = 0
PENDING = 1
SETUP_DONE = 2
RUNNING = 3
CANCEL_PENDING = 4
CANCEL_STARTED = 5
CANCELLED = 6
DONE = 7
ERROR = 8
ATTEMPT_FAILURE = 9
class SubstateValueValuesEnum(_messages.Enum):
r"""Output only. Additional state information, which includes status
reported by the agent.
Values:
UNSPECIFIED: The job substate is unknown.
SUBMITTED: The Job is submitted to the agent.Applies to RUNNING state.
QUEUED: The Job has been received and is awaiting execution (it may be
waiting for a condition to be met). See the "details" field for the
reason for the delay.Applies to RUNNING state.
STALE_STATUS: The agent-reported status is out of date, which may be
caused by a loss of communication between the agent and Cloud
Dataproc. If the agent does not send a timely update, the job will
fail.Applies to RUNNING state.
"""
UNSPECIFIED = 0
SUBMITTED = 1
QUEUED = 2
STALE_STATUS = 3
details = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
substate = _messages.EnumField('SubstateValueValuesEnum', 4)
class KerberosConfig(_messages.Message):
r"""Specifies Kerberos related configuration.
Fields:
crossRealmTrustAdminServer: Optional. The admin server (IP or hostname)
for the remote trusted realm in a cross realm trust relationship.
crossRealmTrustKdc: Optional. The KDC (IP or hostname) for the remote
trusted realm in a cross realm trust relationship.
crossRealmTrustRealm: Optional. The remote realm the Dataproc on-cluster
KDC will trust, should the user enable cross realm trust.
crossRealmTrustSharedPasswordUri: Optional. The GCS uri of a KMS encrypted
file containing the shared password between the on-cluster Kerberos
realm and the remote trusted realm, in a cross realm trust relationship.
enableKerberos: Optional. Flag to indicate whether to Kerberize the
cluster.
kdcDbKeyUri: Optional. The GCS uri of a KMS encrypted file containing the
master key of the KDC database.
keyPasswordUri: Optional. The GCS uri of a KMS encrypted file containing
the password to the user provided key. For the self-signed certificate,
this password is generated by Dataproc.
keystorePasswordUri: Optional. The GCS uri of a KMS encrypted file
containing the password to the user provided keystore. For the self-
signed certificate, this password is generated by Dataproc.
keystoreUri: Optional. The GCS uri of the keystore file used for SSL
encryption. If not provided, Dataproc will provide a self-signed
certificate.
kmsKeyUri: Required. The uri of the KMS key used to encrypt various
sensitive files.
rootPrincipalPasswordUri: Required. The GCS uri of a KMS encrypted file
containing the root principal password.
tgtLifetimeHours: Optional. The lifetime of the ticket granting ticket, in
hours. If not specified, or user specifies 0, then default value 10 will
be used.
truststorePasswordUri: Optional. The GCS uri of a KMS encrypted file
containing the password to the user provided truststore. For the self-
signed certificate, this password is generated by Dataproc.
truststoreUri: Optional. The GCS uri of the truststore file used for SSL
encryption. If not provided, Dataproc will provide a self-signed
certificate.
"""
crossRealmTrustAdminServer = _messages.StringField(1)
crossRealmTrustKdc = _messages.StringField(2)
crossRealmTrustRealm = _messages.StringField(3)
crossRealmTrustSharedPasswordUri = _messages.StringField(4)
enableKerberos = _messages.BooleanField(5)
kdcDbKeyUri = _messages.StringField(6)
keyPasswordUri = _messages.StringField(7)
keystorePasswordUri = _messages.StringField(8)
keystoreUri = _messages.StringField(9)
kmsKeyUri = _messages.StringField(10)
rootPrincipalPasswordUri = _messages.StringField(11)
tgtLifetimeHours = _messages.IntegerField(12, variant=_messages.Variant.INT32)
truststorePasswordUri = _messages.StringField(13)
truststoreUri = _messages.StringField(14)
class LifecycleConfig(_messages.Message):
r"""Specifies the cluster auto-delete schedule configuration.
Fields:
autoDeleteTime: Optional. The time when cluster will be auto-deleted.
autoDeleteTtl: Optional. The lifetime duration of cluster. The cluster
will be auto-deleted at the end of this period. Valid range: 10m,
14d.Example: "1d", to delete the cluster 1 day after its creation..
idleDeleteTtl: Optional. The duration to keep the cluster alive while
idling. Passing this threshold will cause the cluster to be deleted.
Valid range: 10m, 14d.Example: "10m", the minimum value, to delete the
cluster when it has had no jobs running for 10 minutes.
idleStartTime: Output only. The time when cluster became idle (most recent
job finished) and became eligible for deletion due to idleness.
"""
autoDeleteTime = _messages.StringField(1)
autoDeleteTtl = _messages.StringField(2)
idleDeleteTtl = _messages.StringField(3)
idleStartTime = _messages.StringField(4)
class ListAutoscalingPoliciesResponse(_messages.Message):
r"""A response to a request to list autoscaling policies in a project.
Fields:
nextPageToken: Output only. This token is included in the response if
there are more results to fetch.
policies: Output only. Autoscaling policies list.
"""
nextPageToken = _messages.StringField(1)
policies = _messages.MessageField('AutoscalingPolicy', 2, repeated=True)
class ListClustersResponse(_messages.Message):
r"""The list of all clusters in a project.
Fields:
clusters: Output only. The clusters in the project.
nextPageToken: Output only. This token is included in the response if
there are more results to fetch. To fetch additional results, provide
this value as the page_token in a subsequent
<code>ListClustersRequest</code>.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListJobsResponse(_messages.Message):
r"""A list of jobs in a project.
Fields:
jobs: Output only. Jobs list.
nextPageToken: Optional. This token is included in the response if there
are more results to fetch. To fetch additional results, provide this
value as the page_token in a subsequent <code>ListJobsRequest</code>.
"""
jobs = _messages.MessageField('Job', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListWorkflowTemplatesResponse(_messages.Message):
r"""A response to a request to list workflow templates in a project.
Fields:
nextPageToken: Output only. This token is included in the response if
there are more results to fetch. To fetch additional results, provide
this value as the page_token in a subsequent
<code>ListWorkflowTemplatesRequest</code>.
templates: Output only. WorkflowTemplates list.
"""
nextPageToken = _messages.StringField(1)
templates = _messages.MessageField('WorkflowTemplate', 2, repeated=True)
class LoggingConfig(_messages.Message):
r"""The runtime logging config of the job.
Messages:
DriverLogLevelsValue: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
Fields:
driverLogLevels: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DriverLogLevelsValue(_messages.Message):
r"""The per-package log levels for the driver. This may include "root"
package name to configure rootLogger. Examples: 'com.google = FATAL',
'root = INFO', 'org.apache = DEBUG'
Messages:
AdditionalProperty: An additional property for a DriverLogLevelsValue
object.
Fields:
additionalProperties: Additional properties of type DriverLogLevelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DriverLogLevelsValue object.
Enums:
ValueValueValuesEnum:
Fields:
key: Name of the additional property.
value: A ValueValueValuesEnum attribute.
"""
class ValueValueValuesEnum(_messages.Enum):
r"""ValueValueValuesEnum enum type.
Values:
LEVEL_UNSPECIFIED: <no description>
ALL: <no description>
TRACE: <no description>
DEBUG: <no description>
INFO: <no description>
WARN: <no description>
ERROR: <no description>
FATAL: <no description>
OFF: <no description>
"""
LEVEL_UNSPECIFIED = 0
ALL = 1
TRACE = 2
DEBUG = 3
INFO = 4
WARN = 5
ERROR = 6
FATAL = 7
OFF = 8
key = _messages.StringField(1)
value = _messages.EnumField('ValueValueValuesEnum', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverLogLevels = _messages.MessageField('DriverLogLevelsValue', 1)
class ManagedCluster(_messages.Message):
r"""Cluster that is managed by the workflow.
Messages:
LabelsValue: Optional. The labels to associate with this cluster.Label
keys must be between 1 and 63 characters long, and must conform to the
following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
be between 1 and 63 characters long, and must conform to the following
PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
can be associated with a given cluster.
Fields:
clusterName: Required. The cluster name prefix. A unique cluster name will
be formed by appending a random suffix.The name must contain only lower-
case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a
letter. Cannot begin or end with hyphen. Must consist of between 2 and
35 characters.
config: Required. The cluster configuration.
labels: Optional. The labels to associate with this cluster.Label keys
must be between 1 and 63 characters long, and must conform to the
following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values must
be between 1 and 63 characters long, and must conform to the following
PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels
can be associated with a given cluster.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this cluster.Label keys must be
between 1 and 63 characters long, and must conform to the following PCRE
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following PCRE regular
expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
associated with a given cluster.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
config = _messages.MessageField('ClusterConfig', 2)
labels = _messages.MessageField('LabelsValue', 3)
class ManagedGroupConfig(_messages.Message):
r"""Specifies the resources used to actively manage an instance group.
Fields:
instanceGroupManagerName: Output only. The name of the Instance Group
Manager for this group.
instanceTemplateName: Output only. The name of the Instance Template used
for the Managed Instance Group.
"""
instanceGroupManagerName = _messages.StringField(1)
instanceTemplateName = _messages.StringField(2)
class NodeInitializationAction(_messages.Message):
r"""Specifies an executable to run on a fully configured node and a timeout
period for executable completion.
Fields:
executableFile: Required. Cloud Storage URI of executable file.
executionTimeout: Optional. Amount of time executable has to complete.
Default is 10 minutes. Cluster creation fails with an explanatory error
message (the name of the executable that caused the error and the
exceeded timeout period) if the executable is not completed at end of
the timeout period.
"""
executableFile = _messages.StringField(1)
executionTimeout = _messages.StringField(2)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as Delete, the
response is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
Fields:
done: If the value is false, it means the operation is still in progress.
If true, the operation is completed, and either error or response is
available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the name should have the format of operations/some/unique/name.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other methods,
the response should have the type XxxResponse, where Xxx is the original
method name. For example, if the original method name is TakeSnapshot(),
the inferred response type is TakeSnapshotResponse.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OrderedJob(_messages.Message):
r"""A job executed by the workflow.
Messages:
LabelsValue: Optional. The labels to associate with this job.Label keys
must be between 1 and 63 characters long, and must conform to the
following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can
be associated with a given job.
Fields:
hadoopJob: Job is a Hadoop job.
hiveJob: Job is a Hive job.
labels: Optional. The labels to associate with this job.Label keys must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following regular
expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be
associated with a given job.
pigJob: Job is a Pig job.
prerequisiteStepIds: Optional. The optional list of prerequisite job
step_ids. If not specified, the job will start at the beginning of
workflow.
prestoJob: Job is a Presto job.
pysparkJob: Job is a Pyspark job.
scheduling: Optional. Job scheduling configuration.
sparkJob: Job is a Spark job.
sparkRJob: Job is a SparkR job.
sparkSqlJob: Job is a SparkSql job.
stepId: Required. The step id. The id must be unique among all jobs within
the template.The step id is used as prefix for job id, as job goog-
dataproc-workflow-step-id label, and in prerequisiteStepIds field from
other steps.The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). Cannot begin or end with underscore or
hyphen. Must consist of between 3 and 50 characters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this job.Label keys must be
between 1 and 63 characters long, and must conform to the following
regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and
63 characters long, and must conform to the following regular expression:
\p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels can be associated with a
given job.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
hadoopJob = _messages.MessageField('HadoopJob', 1)
hiveJob = _messages.MessageField('HiveJob', 2)
labels = _messages.MessageField('LabelsValue', 3)
pigJob = _messages.MessageField('PigJob', 4)
prerequisiteStepIds = _messages.StringField(5, repeated=True)
prestoJob = _messages.MessageField('PrestoJob', 6)
pysparkJob = _messages.MessageField('PySparkJob', 7)
scheduling = _messages.MessageField('JobScheduling', 8)
sparkJob = _messages.MessageField('SparkJob', 9)
sparkRJob = _messages.MessageField('SparkRJob', 10)
sparkSqlJob = _messages.MessageField('SparkSqlJob', 11)
stepId = _messages.StringField(12)
class ParameterValidation(_messages.Message):
r"""Configuration for parameter validation.
Fields:
regex: Validation based on regular expressions.
values: Validation based on a list of allowed values.
"""
regex = _messages.MessageField('RegexValidation', 1)
values = _messages.MessageField('ValueValidation', 2)
class PigJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
Fields:
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATH of
the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
loggingConfig: Optional. The runtime log config for job execution.
properties: Optional. A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains the Pig queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Pig. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in
user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Pig command: name=[value]).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 3)
properties = _messages.MessageField('PropertiesValue', 4)
queryFileUri = _messages.StringField(5)
queryList = _messages.MessageField('QueryList', 6)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 7)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources.A Policy
consists of a list of bindings. A binding binds a list of members to a role,
where the members can be user accounts, Google groups, Google domains, and
service accounts. A role is a named list of permissions defined by IAM.JSON
Example { "bindings": [ { "role": "roles/owner",
"members": [ "user:<EMAIL>",
"group:<EMAIL>", "domain:google.com",
"serviceAccount:<EMAIL>" ] },
{ "role": "roles/viewer", "members": ["user:<EMAIL>"]
} ] } YAML Example bindings: - members: - user:<EMAIL> -
group:<EMAIL> - domain:google.com - serviceAccount:my-<EMAIL>-
<EMAIL> role: roles/owner - members: -
user:<EMAIL> role: roles/viewer For a description of IAM and its
features, see the IAM developer's guide (https://cloud.google.com/iam/docs).
Fields:
bindings: Associates a list of members to a role. bindings with no members
will result in an error.
etag: etag is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the etag in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An etag is returned in the response to getIamPolicy, and
systems are expected to put that etag in the request to setIamPolicy to
ensure that their change will be applied to the same version of the
policy.If no etag is provided in the call to setIamPolicy, then the
existing policy is overwritten blindly.
version: Deprecated.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class PrestoJob(_messages.Message):
r"""A Cloud Dataproc job for running Presto (https://prestosql.io/) queries
Messages:
PropertiesValue: Optional. A mapping of property names to values. Used to
set Presto session properties (https://prestodb.io/docs/current/sql/set-
session.html) Equivalent to using the --session flag in the Presto CLI
Fields:
clientTags: Optional. Presto client tags to attach to this query
continueOnFailure: Optional. Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
loggingConfig: Optional. The runtime log config for job execution.
outputFormat: Optional. The format in which query output will be
displayed. See the Presto documentation for supported output formats
properties: Optional. A mapping of property names to values. Used to set
Presto session properties (https://prestodb.io/docs/current/sql/set-
session.html) Equivalent to using the --session flag in the Presto CLI
queryFileUri: The HCFS URI of the script that contains SQL queries.
queryList: A list of queries.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values. Used to set Presto
session properties (https://prestodb.io/docs/current/sql/set-session.html)
Equivalent to using the --session flag in the Presto CLI
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clientTags = _messages.StringField(1, repeated=True)
continueOnFailure = _messages.BooleanField(2)
loggingConfig = _messages.MessageField('LoggingConfig', 3)
outputFormat = _messages.StringField(4)
properties = _messages.MessageField('PropertiesValue', 5)
queryFileUri = _messages.StringField(6)
queryList = _messages.MessageField('QueryList', 7)
class PySparkJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache PySpark
(https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of Python drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
the Python driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainPythonFileUri: Required. The HCFS URI of the main Python file to use
as the driver. Must be a .py file.
properties: Optional. A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
pythonFileUris: Optional. HCFS file URIs of Python files to pass to the
PySpark framework. Supported file types: .py, .egg, and .zip.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
PySpark. Properties that conflict with values set by the Cloud Dataproc
API may be overwritten. Can include properties set in /etc/spark/conf
/spark-defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainPythonFileUri = _messages.StringField(6)
properties = _messages.MessageField('PropertiesValue', 7)
pythonFileUris = _messages.StringField(8, repeated=True)
class QueryList(_messages.Message):
r"""A list of queries to run on a cluster.
Fields:
queries: Required. The queries to execute. You do not need to terminate a
query with a semicolon. Multiple queries can be specified in one string
by separating each with a semicolon. Here is an example of an Cloud
Dataproc API snippet that uses a QueryList to specify a HiveJob:
"hiveJob": { "queryList": { "queries": [ "query1",
"query2", "query3;query4", ] } }
"""
queries = _messages.StringField(1, repeated=True)
class RegexValidation(_messages.Message):
r"""Validation based on regular expressions.
Fields:
regexes: Required. RE2 regular expressions used to validate the
parameter's value. The value must match the regex in its entirety
(substring matches are not sufficient).
"""
regexes = _messages.StringField(1, repeated=True)
class SecurityConfig(_messages.Message):
r"""Security related configuration, including encryption, Kerberos, etc.
Fields:
kerberosConfig: Kerberos related configuration.
"""
kerberosConfig = _messages.MessageField('KerberosConfig', 1)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for SetIamPolicy method.
Fields:
policy: REQUIRED: The complete policy to be applied to the resource. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class SoftwareConfig(_messages.Message):
r"""Specifies the selection and config of software inside the cluster.
Enums:
OptionalComponentsValueListEntryValuesEnum:
Messages:
PropertiesValue: Optional. The properties to set on daemon config
files.Property keys are specified in prefix:property format, such as
core:fs.defaultFS. The following are supported prefixes and their
mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
yarn: yarn-site.xmlFor more information, see Cluster properties.
Fields:
imageVersion: Optional. The version of software inside the cluster. It
must be one of the supported Cloud Dataproc Versions, such as "1.2"
(including a subminor version, such as "1.2.29"), or the "preview"
version. If unspecified, it defaults to the latest version.
optionalComponents: The set of optional components to activate on the
cluster.
properties: Optional. The properties to set on daemon config
files.Property keys are specified in prefix:property format, such as
core:fs.defaultFS. The following are supported prefixes and their
mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml
distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml
mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf
yarn: yarn-site.xmlFor more information, see Cluster properties.
"""
class OptionalComponentsValueListEntryValuesEnum(_messages.Enum):
r"""OptionalComponentsValueListEntryValuesEnum enum type.
Values:
COMPONENT_UNSPECIFIED: <no description>
JUPYTER: <no description>
HIVE_WEBHCAT: <no description>
ZEPPELIN: <no description>
ANACONDA: <no description>
PRESTO: <no description>
KERBEROS: <no description>
"""
COMPONENT_UNSPECIFIED = 0
JUPYTER = 1
HIVE_WEBHCAT = 2
ZEPPELIN = 3
ANACONDA = 4
PRESTO = 5
KERBEROS = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. The properties to set on daemon config files.Property keys
are specified in prefix:property format, such as core:fs.defaultFS. The
following are supported prefixes and their mappings: capacity-scheduler:
capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml
hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig:
pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more
information, see Cluster properties.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
imageVersion = _messages.StringField(1)
optionalComponents = _messages.EnumField('OptionalComponentsValueListEntryValuesEnum', 2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
class SparkJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Spark drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of Spark drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional. HCFS URIs of jar files to add to the CLASSPATHs of
the Spark driver and tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file that contains
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file that contains the main class.
properties: Optional. A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Spark. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/spark/conf/spark-
defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class SparkRJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache SparkR
(https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure SparkR. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional. HCFS URIs of archives to be extracted in the
working directory of Spark drivers and tasks. Supported file types:
.jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional. HCFS URIs of files to be copied to the working
directory of R drivers and distributed tasks. Useful for naively
parallel tasks.
loggingConfig: Optional. The runtime log config for job execution.
mainRFileUri: Required. The HCFS URI of the main R file to use as the
driver. Must be a .R file.
properties: Optional. A mapping of property names to values, used to
configure SparkR. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
SparkR. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/spark/conf/spark-
defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 4)
mainRFileUri = _messages.StringField(5)
properties = _messages.MessageField('PropertiesValue', 6)
class SparkSqlJob(_messages.Message):
r"""A Cloud Dataproc job for running Apache Spark SQL
(http://spark.apache.org/sql/) queries.
Messages:
PropertiesValue: Optional. A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
ScriptVariablesValue: Optional. Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
Fields:
jarFileUris: Optional. HCFS URIs of jar files to be added to the Spark
CLASSPATH.
loggingConfig: Optional. The runtime log config for job execution.
properties: Optional. A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
queryFileUri: The HCFS URI of the script that contains SQL queries.
queryList: A list of queries.
scriptVariables: Optional. Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Optional. A mapping of property names to values, used to configure
Spark SQL's SparkConf. Properties that conflict with values set by the
Cloud Dataproc API may be overwritten.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
r"""Optional. Mapping of query variable names to values (equivalent to the
Spark SQL command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
jarFileUris = _messages.StringField(1, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 2)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The Status type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by gRPC (https://github.com/grpc). The error model is designed to be:
Simple to use and understand for most users Flexible enough to meet
unexpected needsOverviewThe Status message contains three pieces of data:
error code, error message, and error details. The error code should be an
enum value of google.rpc.Code, but it may accept additional error codes if
needed. The error message should be a developer-facing English message that
helps developers understand and resolve the error. If a localized user-
facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package google.rpc that can be used for common error
conditions.Language mappingThe Status message is the logical representation
of the error model, but it is not necessarily the actual wire format. When
the Status message is exposed in different client libraries and different
wire protocols, it can be mapped differently. For example, it will likely be
mapped to some exceptions in Java, but more likely mapped to some error
codes in C.Other usesThe error model and the Status message can be used in a
variety of environments, either with or without APIs, to provide a
consistent developer experience across different environments.Example uses
of this error model include: Partial errors. If a service needs to return
partial errors to the client, it may embed the Status in the normal response
to indicate the partial errors. Workflow errors. A typical workflow has
multiple steps. Each step may have a Status message for error reporting.
Batch operations. If a client uses batch request and batch response, the
Status message should be used directly inside batch response, one for each
error sub-response. Asynchronous operations. If an API call embeds
asynchronous operation results in its response, the status of those
operations should be represented directly using the Status message. Logging.
If some API errors are stored in logs, the message Status could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class SubmitJobRequest(_messages.Message):
r"""A request to submit a job.
Fields:
job: Required. The job resource.
requestId: Optional. A unique id used to identify the request. If the
server receives two SubmitJobRequest requests with the same id, then the
second request will be ignored and the first Job created and stored in
the backend is returned.It is recommended to always set this value to a
UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The
id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). The maximum length is 40 characters.
"""
job = _messages.MessageField('Job', 1)
requestId = _messages.StringField(2)
class TemplateParameter(_messages.Message):
r"""A configurable parameter that replaces one or more fields in the
template. Parameterizable fields: - Labels - File uris - Job properties -
Job arguments - Script variables - Main class (in HadoopJob and SparkJob) -
Zone (in ClusterSelector)
Fields:
description: Optional. Brief description of the parameter. Must not exceed
1024 characters.
fields: Required. Paths to all fields that the parameter replaces. A field
is allowed to appear in at most one parameter's list of field paths.A
field path is similar in syntax to a google.protobuf.FieldMask. For
example, a field path that references the zone field of a workflow
template's cluster selector would be specified as
placement.clusterSelector.zone.Also, field paths can reference fields
using the following syntax: Values in maps can be referenced by key:
labels'key' placement.clusterSelector.clusterLabels'key'
placement.managedCluster.labels'key'
placement.clusterSelector.clusterLabels'key' jobs'step-id'.labels'key'
Jobs in the jobs list can be referenced by step-id: jobs'step-
id'.hadoopJob.mainJarFileUri jobs'step-id'.hiveJob.queryFileUri jobs
'step-id'.pySparkJob.mainPythonFileUri jobs'step-
id'.hadoopJob.jarFileUris0 jobs'step-id'.hadoopJob.archiveUris0 jobs
'step-id'.hadoopJob.fileUris0 jobs'step-id'.pySparkJob.pythonFileUris0
Items in repeated fields can be referenced by a zero-based index: jobs
'step-id'.sparkJob.args0 Other examples: jobs'step-
id'.hadoopJob.properties'key' jobs'step-id'.hadoopJob.args0 jobs'step-
id'.hiveJob.scriptVariables'key' jobs'step-id'.hadoopJob.mainJarFileUri
placement.clusterSelector.zoneIt may not be possible to parameterize
maps and repeated fields in their entirety since only individual map
values and individual items in repeated fields can be referenced. For
example, the following field paths are invalid:
placement.clusterSelector.clusterLabels jobs'step-id'.sparkJob.args
name: Required. Parameter name. The parameter name is used as the key, and
paired with the parameter value, which are passed to the template when
the template is instantiated. The name must contain only capital letters
(A-Z), numbers (0-9), and underscores (_), and must not start with a
number. The maximum length is 40 characters.
validation: Optional. Validation rules to be applied to this parameter's
value.
"""
description = _messages.StringField(1)
fields = _messages.StringField(2, repeated=True)
name = _messages.StringField(3)
validation = _messages.MessageField('ParameterValidation', 4)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for TestIamPermissions method.
Fields:
permissions: The set of permissions to check for the resource. Permissions
with wildcards (such as '*' or 'storage.*') are not allowed. For more
information see IAM Overview
(https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for TestIamPermissions method.
Fields:
permissions: A subset of TestPermissionsRequest.permissions that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class ValueValidation(_messages.Message):
r"""Validation based on a list of allowed values.
Fields:
values: Required. List of allowed values for the parameter.
"""
values = _messages.StringField(1, repeated=True)
class WorkflowGraph(_messages.Message):
r"""The workflow graph.
Fields:
nodes: Output only. The workflow nodes.
"""
nodes = _messages.MessageField('WorkflowNode', 1, repeated=True)
class WorkflowMetadata(_messages.Message):
r"""A Cloud Dataproc workflow template resource.
Enums:
StateValueValuesEnum: Output only. The workflow state.
Messages:
ParametersValue: Map from parameter names to values that were used for
those parameters.
Fields:
clusterName: Output only. The name of the target cluster.
clusterUuid: Output only. The UUID of target cluster.
createCluster: Output only. The create cluster operation metadata.
deleteCluster: Output only. The delete cluster operation metadata.
endTime: Output only. Workflow end time.
graph: Output only. The workflow graph.
parameters: Map from parameter names to values that were used for those
parameters.
startTime: Output only. Workflow start time.
state: Output only. The workflow state.
template: Output only. The "resource name" of the template.
version: Output only. The version of template at the time of workflow
instantiation.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The workflow state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""Map from parameter names to values that were used for those
parameters.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
createCluster = _messages.MessageField('ClusterOperation', 3)
deleteCluster = _messages.MessageField('ClusterOperation', 4)
endTime = _messages.StringField(5)
graph = _messages.MessageField('WorkflowGraph', 6)
parameters = _messages.MessageField('ParametersValue', 7)
startTime = _messages.StringField(8)
state = _messages.EnumField('StateValueValuesEnum', 9)
template = _messages.StringField(10)
version = _messages.IntegerField(11, variant=_messages.Variant.INT32)
class WorkflowNode(_messages.Message):
r"""The workflow node.
Enums:
StateValueValuesEnum: Output only. The node state.
Fields:
error: Output only. The error detail.
jobId: Output only. The job id; populated after the node enters RUNNING
state.
prerequisiteStepIds: Output only. Node's prerequisite nodes.
state: Output only. The node state.
stepId: Output only. The name of the node.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The node state.
Values:
NODE_STATUS_UNSPECIFIED: State is unspecified.
BLOCKED: The node is awaiting prerequisite node to finish.
RUNNABLE: The node is runnable but not running.
RUNNING: The node is running.
COMPLETED: The node completed successfully.
FAILED: The node failed. A node can be marked FAILED because its
ancestor or peer failed.
"""
NODE_STATUS_UNSPECIFIED = 0
BLOCKED = 1
RUNNABLE = 2
RUNNING = 3
COMPLETED = 4
FAILED = 5
error = _messages.StringField(1)
jobId = _messages.StringField(2)
prerequisiteStepIds = _messages.StringField(3, repeated=True)
state = _messages.EnumField('StateValueValuesEnum', 4)
stepId = _messages.StringField(5)
class WorkflowTemplate(_messages.Message):
r"""A Cloud Dataproc workflow template resource.
Messages:
LabelsValue: Optional. The labels to associate with this template. These
labels will be propagated to all jobs and clusters created by the
workflow instance.Label keys must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values
may be empty, but, if present, must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
32 labels can be associated with a template.
Fields:
createTime: Output only. The time template was created.
id: Required. The template id.The id must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end
with underscore or hyphen. Must consist of between 3 and 50 characters..
jobs: Required. The Directed Acyclic Graph of Jobs to submit.
labels: Optional. The labels to associate with this template. These labels
will be propagated to all jobs and clusters created by the workflow
instance.Label keys must contain 1 to 63 characters, and must conform to
RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be
empty, but, if present, must contain 1 to 63 characters, and must
conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than
32 labels can be associated with a template.
name: Output only. The "resource name" of the template, as described in
https://cloud.google.com/apis/design/resource_names of the form
projects/{project_id}/regions/{region}/workflowTemplates/{template_id}
parameters: Optional. Template parameters whose values are substituted
into the template. Values for parameters must be provided when the
template is instantiated.
placement: Required. WorkflowTemplate scheduling information.
updateTime: Output only. The time template was last updated.
version: Optional. Used to perform a consistent read-modify-write.This
field should be left blank for a CreateWorkflowTemplate request. It is
required for an UpdateWorkflowTemplate request, and must match the
current server version. A typical update template flow would fetch the
current template with a GetWorkflowTemplate request, which will return
the current template with the version field filled in with the current
server version. The user updates other fields in the template, then
returns it as part of the UpdateWorkflowTemplate request.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. The labels to associate with this template. These labels
will be propagated to all jobs and clusters created by the workflow
instance.Label keys must contain 1 to 63 characters, and must conform to
RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt).Label values may be empty,
but, if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt).No more than 32 labels can be
associated with a template.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
id = _messages.StringField(2)
jobs = _messages.MessageField('OrderedJob', 3, repeated=True)
labels = _messages.MessageField('LabelsValue', 4)
name = _messages.StringField(5)
parameters = _messages.MessageField('TemplateParameter', 6, repeated=True)
placement = _messages.MessageField('WorkflowTemplatePlacement', 7)
updateTime = _messages.StringField(8)
version = _messages.IntegerField(9, variant=_messages.Variant.INT32)
class WorkflowTemplatePlacement(_messages.Message):
r"""Specifies workflow execution target.Either managed_cluster or
cluster_selector is required.
Fields:
clusterSelector: Optional. A selector that chooses target cluster for jobs
based on metadata.The selector is evaluated at the time each job is
submitted.
managedCluster: Optional. A cluster that is managed by the workflow.
"""
clusterSelector = _messages.MessageField('ClusterSelector', 1)
managedCluster = _messages.MessageField('ManagedCluster', 2)
class YarnApplication(_messages.Message):
r"""A YARN application created by a job. Application information is a subset
of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</cod
e>.Beta Feature: This report is available for testing purposes only. It may
be changed before final release.
Enums:
StateValueValuesEnum: Required. The application state.
Fields:
name: Required. The application name.
progress: Required. The numerical progress of the application, from 1 to
100.
state: Required. The application state.
trackingUrl: Optional. The HTTP URL of the ApplicationMaster,
HistoryServer, or TimelineServer that provides application-specific
information. The URL uses the internal hostname, and requires a proxy
server for resolution and, possibly, access.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Required. The application state.
Values:
STATE_UNSPECIFIED: Status is unspecified.
NEW: Status is NEW.
NEW_SAVING: Status is NEW_SAVING.
SUBMITTED: Status is SUBMITTED.
ACCEPTED: Status is ACCEPTED.
RUNNING: Status is RUNNING.
FINISHED: Status is FINISHED.
FAILED: Status is FAILED.
KILLED: Status is KILLED.
"""
STATE_UNSPECIFIED = 0
NEW = 1
NEW_SAVING = 2
SUBMITTED = 3
ACCEPTED = 4
RUNNING = 5
FINISHED = 6
FAILED = 7
KILLED = 8
name = _messages.StringField(1)
progress = _messages.FloatField(2, variant=_messages.Variant.FLOAT)
state = _messages.EnumField('StateValueValuesEnum', 3)
trackingUrl = _messages.StringField(4)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 0.912602 | 0.257859 |
import models
import util.validation
from database import db_txn
from linkr import db
from util.exception import *
@db_txn
def add_link(alias, outgoing_url, password=<PASSWORD>, user_id=None, require_recaptcha=False):
"""
Add a new link to the database after performing necessary input validation.
:param alias: The link alias.
:param outgoing_url: The associated outgoing URL.
:param password: Plain-text password associated with this link, if applicable.
:param user_id: ID of the user to associate with this link, if applicable.
:param require_recaptcha: True to require ReCAPTCHA for accessing this link; False otherwise.
:return: An instance of models.Link representing the new entry.
:raises InvalidAliasException: If the alias is invalid.
:raises ReservedAliasException: If the alias is reserved.
:raises InvalidURLException: If the outgoing URL is invalid.
:raises UnavailableAliasException: If the alias already exists in the database.
"""
if not util.validation.is_alias_valid(alias):
raise InvalidAliasException('Alias `{alias}` is not URL safe'.format(alias=alias))
if util.validation.is_alias_reserved(alias):
raise ReservedAliasException('Alias `{alias}` is reserved'.format(alias=alias))
if not util.validation.is_url_valid(outgoing_url):
raise InvalidURLException('URL `{url}` is not a valid URL'.format(url=outgoing_url))
if models.Link.query.filter_by(alias=alias).scalar():
raise UnavailableAliasException('Alias `{alias}` already exists'.format(alias=alias))
new_link = models.Link(
alias=alias,
outgoing_url=outgoing_url,
password=password,
user_id=user_id,
require_recaptcha=require_recaptcha,
)
db.session.add(new_link)
return new_link
@db_txn
def edit_link(link_id, alias=None, outgoing_url=None):
"""
Edit an existing link's details.
:param link_id: The ID of the link to edit.
:param alias: The new alias of the link, or None to leave it unchanged.
:param outgoing_url: The new outgoing URL of the link, or None to leave it unchanged.
:return: The models.Link instance representing the modified link object.
:raises InvalidAliasException: If the alias is invalid.
:raises InvalidURLException: If the outgoing URL is invalid.
:raises NonexistentLinkException: If no link exists with the provided link ID.
"""
to_modify = get_link_by_id(link_id)
if not to_modify:
raise NonexistentLinkException('No link exists with link ID `{link_id}`'.format(
link_id=link_id,
))
if alias and not util.validation.is_alias_valid(alias):
raise InvalidAliasException('Alias `{alias}` is not URL safe'.format(alias=alias))
if alias and util.validation.is_alias_reserved(alias):
raise ReservedAliasException('Alias `{alias}` is reserved'.format(alias=alias))
if outgoing_url and not util.validation.is_url_valid(outgoing_url):
raise InvalidURLException('URL `{url}` is not a valid URL'.format(url=outgoing_url))
to_modify.edit(alias=alias, outgoing_url=outgoing_url)
db.session.add(to_modify)
return to_modify
@db_txn
def update_link_password(link_id, password):
"""
Update a link's password. This method allows both adding a password to a previously
non-password-protected link, changing the password on a password-protected link, and removing
the password from a password-protected link.
:param link_id: ID of the link for which the password should be updated.
:param password: The <PASSWORD>.
:return: The models.Link instance representing the modified Link object.
"""
to_modify = get_link_by_id(link_id)
if not to_modify:
raise NonexistentLinkException('No link exists with link ID `{link_id}`'.format(
link_id=link_id,
))
to_modify.update_password(password)
db.session.add(to_modify)
return to_modify
@db_txn
def delete_link(link_id):
"""
Delete a link from the database, if it exists.
:param link_id: The link ID to delete.
:return: The models.Link instance representing the deleted entry.
:raises NonexistentLinkException: If the link ID does not exist.
"""
to_delete = models.Link.query.filter_by(link_id=link_id)
if not to_delete.scalar():
raise NonexistentLinkException('Link ID `{link_id}` does not exist.'.format(
link_id=link_id,
))
to_delete.delete(synchronize_session='fetch')
return to_delete
@db_txn
def add_link_hit(link_id, remote_ip, referer, user_agent):
"""
Add a new link hit.
:param link_id: ID of the accessed link.
:param remote_ip: The remote IP address of the client.
:param referer: The referer of the hit.
:param user_agent: The client's user agent string.
:return: An instance of models.LinkHit representing the added entry.
:raises NonexistentLinkException: If the link ID does not exist.
"""
associated_link = models.Link.query.filter_by(link_id=link_id)
if not associated_link.scalar():
raise NonexistentLinkException('Link ID `{link_id}` does not exist.'.format(
link_id=link_id,
))
new_link_hit = models.LinkHit(
link_id=link_id,
remote_ip=remote_ip,
referer=referer,
user_agent=user_agent,
)
db.session.add(new_link_hit)
return new_link_hit
def get_link_hits_by_id(link_id, page_num=0, num_per_page=100):
"""
Retrieve paginated listing of link hits for a particular link ID.
:param link_id: The link ID whose hits should be retrieved.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of hits to retrieve per page.
:return: A list of models.LinkHit instances describing link hits, ordered by timestamp (most
recent first).
"""
return models.LinkHit.query.filter_by(
link_id=link_id,
).order_by(
models.LinkHit.hit_id.desc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_link_by_id(link_id):
"""
Retrieve a link by its ID.
:param link_id: The ID of the link.
:return: The models.Link entry, or None if nonexistent.
"""
return models.Link.query.filter_by(link_id=link_id).first()
def get_link_by_alias(alias):
"""
Retrieve a link by its alias.
:param alias: The alias of the link.
:return: The models.Link entry, or None if nonexistent.
"""
return models.Link.query.filter_by(alias=alias).first()
def get_links_like_alias(alias, page_num=0, num_per_page=100):
"""
Retrieve links whose aliases contain the input.
:param alias: A substring of an actual alias.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: All models.Link instances whose aliases is a superstring of the input.
"""
return models.Link.query.filter(
models.Link.alias.like('%{alias}%'.format(alias=alias))
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_links_for_user(user_id, page_num=0, num_per_page=100):
"""
Retrieve a paginated listing of all links created by a user.
:param user_id: The ID of the user for which links should be retrieved.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: A list of models.Link objects describing the links created by the specified user.
"""
if not models.User.query.filter_by(user_id=user_id).scalar():
raise NonexistentUserException('No user exists with user_id `{user_id}`'.format(
user_id=user_id,
))
return models.Link.query.filter_by(
user_id=user_id
).order_by(
models.Link.link_id.asc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_recent_links(page_num=0, num_per_page=100):
"""
Retrieve paginated listing of recently created links.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: A list of models.Link instances describing recent links, ordered by timestamp (most
recent first).
"""
return models.Link.query.order_by(
models.Link.link_id.desc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
|
database/link.py
|
import models
import util.validation
from database import db_txn
from linkr import db
from util.exception import *
@db_txn
def add_link(alias, outgoing_url, password=<PASSWORD>, user_id=None, require_recaptcha=False):
"""
Add a new link to the database after performing necessary input validation.
:param alias: The link alias.
:param outgoing_url: The associated outgoing URL.
:param password: Plain-text password associated with this link, if applicable.
:param user_id: ID of the user to associate with this link, if applicable.
:param require_recaptcha: True to require ReCAPTCHA for accessing this link; False otherwise.
:return: An instance of models.Link representing the new entry.
:raises InvalidAliasException: If the alias is invalid.
:raises ReservedAliasException: If the alias is reserved.
:raises InvalidURLException: If the outgoing URL is invalid.
:raises UnavailableAliasException: If the alias already exists in the database.
"""
if not util.validation.is_alias_valid(alias):
raise InvalidAliasException('Alias `{alias}` is not URL safe'.format(alias=alias))
if util.validation.is_alias_reserved(alias):
raise ReservedAliasException('Alias `{alias}` is reserved'.format(alias=alias))
if not util.validation.is_url_valid(outgoing_url):
raise InvalidURLException('URL `{url}` is not a valid URL'.format(url=outgoing_url))
if models.Link.query.filter_by(alias=alias).scalar():
raise UnavailableAliasException('Alias `{alias}` already exists'.format(alias=alias))
new_link = models.Link(
alias=alias,
outgoing_url=outgoing_url,
password=password,
user_id=user_id,
require_recaptcha=require_recaptcha,
)
db.session.add(new_link)
return new_link
@db_txn
def edit_link(link_id, alias=None, outgoing_url=None):
"""
Edit an existing link's details.
:param link_id: The ID of the link to edit.
:param alias: The new alias of the link, or None to leave it unchanged.
:param outgoing_url: The new outgoing URL of the link, or None to leave it unchanged.
:return: The models.Link instance representing the modified link object.
:raises InvalidAliasException: If the alias is invalid.
:raises InvalidURLException: If the outgoing URL is invalid.
:raises NonexistentLinkException: If no link exists with the provided link ID.
"""
to_modify = get_link_by_id(link_id)
if not to_modify:
raise NonexistentLinkException('No link exists with link ID `{link_id}`'.format(
link_id=link_id,
))
if alias and not util.validation.is_alias_valid(alias):
raise InvalidAliasException('Alias `{alias}` is not URL safe'.format(alias=alias))
if alias and util.validation.is_alias_reserved(alias):
raise ReservedAliasException('Alias `{alias}` is reserved'.format(alias=alias))
if outgoing_url and not util.validation.is_url_valid(outgoing_url):
raise InvalidURLException('URL `{url}` is not a valid URL'.format(url=outgoing_url))
to_modify.edit(alias=alias, outgoing_url=outgoing_url)
db.session.add(to_modify)
return to_modify
@db_txn
def update_link_password(link_id, password):
"""
Update a link's password. This method allows both adding a password to a previously
non-password-protected link, changing the password on a password-protected link, and removing
the password from a password-protected link.
:param link_id: ID of the link for which the password should be updated.
:param password: The <PASSWORD>.
:return: The models.Link instance representing the modified Link object.
"""
to_modify = get_link_by_id(link_id)
if not to_modify:
raise NonexistentLinkException('No link exists with link ID `{link_id}`'.format(
link_id=link_id,
))
to_modify.update_password(password)
db.session.add(to_modify)
return to_modify
@db_txn
def delete_link(link_id):
"""
Delete a link from the database, if it exists.
:param link_id: The link ID to delete.
:return: The models.Link instance representing the deleted entry.
:raises NonexistentLinkException: If the link ID does not exist.
"""
to_delete = models.Link.query.filter_by(link_id=link_id)
if not to_delete.scalar():
raise NonexistentLinkException('Link ID `{link_id}` does not exist.'.format(
link_id=link_id,
))
to_delete.delete(synchronize_session='fetch')
return to_delete
@db_txn
def add_link_hit(link_id, remote_ip, referer, user_agent):
"""
Add a new link hit.
:param link_id: ID of the accessed link.
:param remote_ip: The remote IP address of the client.
:param referer: The referer of the hit.
:param user_agent: The client's user agent string.
:return: An instance of models.LinkHit representing the added entry.
:raises NonexistentLinkException: If the link ID does not exist.
"""
associated_link = models.Link.query.filter_by(link_id=link_id)
if not associated_link.scalar():
raise NonexistentLinkException('Link ID `{link_id}` does not exist.'.format(
link_id=link_id,
))
new_link_hit = models.LinkHit(
link_id=link_id,
remote_ip=remote_ip,
referer=referer,
user_agent=user_agent,
)
db.session.add(new_link_hit)
return new_link_hit
def get_link_hits_by_id(link_id, page_num=0, num_per_page=100):
"""
Retrieve paginated listing of link hits for a particular link ID.
:param link_id: The link ID whose hits should be retrieved.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of hits to retrieve per page.
:return: A list of models.LinkHit instances describing link hits, ordered by timestamp (most
recent first).
"""
return models.LinkHit.query.filter_by(
link_id=link_id,
).order_by(
models.LinkHit.hit_id.desc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_link_by_id(link_id):
"""
Retrieve a link by its ID.
:param link_id: The ID of the link.
:return: The models.Link entry, or None if nonexistent.
"""
return models.Link.query.filter_by(link_id=link_id).first()
def get_link_by_alias(alias):
"""
Retrieve a link by its alias.
:param alias: The alias of the link.
:return: The models.Link entry, or None if nonexistent.
"""
return models.Link.query.filter_by(alias=alias).first()
def get_links_like_alias(alias, page_num=0, num_per_page=100):
"""
Retrieve links whose aliases contain the input.
:param alias: A substring of an actual alias.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: All models.Link instances whose aliases is a superstring of the input.
"""
return models.Link.query.filter(
models.Link.alias.like('%{alias}%'.format(alias=alias))
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_links_for_user(user_id, page_num=0, num_per_page=100):
"""
Retrieve a paginated listing of all links created by a user.
:param user_id: The ID of the user for which links should be retrieved.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: A list of models.Link objects describing the links created by the specified user.
"""
if not models.User.query.filter_by(user_id=user_id).scalar():
raise NonexistentUserException('No user exists with user_id `{user_id}`'.format(
user_id=user_id,
))
return models.Link.query.filter_by(
user_id=user_id
).order_by(
models.Link.link_id.asc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
def get_recent_links(page_num=0, num_per_page=100):
"""
Retrieve paginated listing of recently created links.
:param page_num: The page number to use in the pagination, zero-indexed.
:param num_per_page: The number of links to retrieve per page.
:return: A list of models.Link instances describing recent links, ordered by timestamp (most
recent first).
"""
return models.Link.query.order_by(
models.Link.link_id.desc()
).offset(
page_num * num_per_page
).limit(
num_per_page
).all()
| 0.75401 | 0.15704 |
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
class PairFM(nn.Module):
def __init__(self,
user_num,
item_num,
factors=84,
epochs=20,
lr=0.001,
reg_1=0.,
reg_2=0.,
loss_type='BPR',
gpuid='0',
early_stop=True):
"""
Pair-wise FM Recommender Class
Parameters
----------
user_num : int, the number of users
item_num : int, the number of items
factors : int, the number of latent factor
epochs : int, number of training epochs
lr : float, learning rate
reg_1 : float, first-order regularization term
reg_2 : float, second-order regularization term
loss_type : str, loss function type
gpuid : str, GPU ID
early_stop : bool, whether to activate early stop mechanism
"""
super(PairFM, self).__init__()
os.environ['CUDA_VISIBLE_DEVICES'] = gpuid
cudnn.benchmark = True
self.epochs = epochs
self.lr = lr
self.reg_1 = reg_1
self.reg_2 = reg_2
self.embed_user = nn.Embedding(user_num, factors)
self.embed_item = nn.Embedding(item_num, factors)
self.u_bias = nn.Embedding(user_num, 1)
self.i_bias = nn.Embedding(item_num, 1)
self.bias_ = nn.Parameter(torch.tensor([0.0]))
# init weight
nn.init.normal_(self.embed_user.weight, std=0.01)
nn.init.normal_(self.embed_item.weight, std=0.01)
nn.init.constant_(self.u_bias.weight, 0.0)
nn.init.constant_(self.i_bias.weight, 0.0)
self.loss_type = loss_type
self.early_stop = early_stop
def forward(self, u, i, j):
user = self.embed_user(u)
item_i = self.embed_item(i)
item_j = self.embed_item(j)
# inner product part
pred_i = (user * item_i).sum(dim=-1, keepdim=True)
pred_j = (user * item_j).sum(dim=-1, keepdim=True)
# add bias
pred_i += self.u_bias(u) + self.i_bias(i) + self.bias_
pred_j += self.u_bias(u) + self.i_bias(j) + self.bias_
return pred_i.view(-1), pred_j.view(-1)
def fit(self, train_loader):
if torch.cuda.is_available():
self.cuda()
else:
self.cpu()
optimizer = optim.SGD(self.parameters(), lr=self.lr)
last_loss = 0.
for epoch in range(1, self.epochs + 1):
self.train()
current_loss = 0.
# set process bar display
pbar = tqdm(train_loader)
pbar.set_description(f'[Epoch {epoch:03d}]')
for user, item_i, item_j, label in pbar:
if torch.cuda.is_available():
user = user.cuda()
item_i = item_i.cuda()
item_j = item_j.cuda()
label = label.cuda()
else:
user = user.cpu()
item_i = item_i.cpu()
item_j = item_j.cpu()
label = label.cpu()
self.zero_grad()
pred_i, pred_j = self.forward(user, item_i, item_j)
if self.loss_type == 'BPR':
loss = -(pred_i - pred_j).sigmoid().log().sum()
elif self.loss_type == 'HL':
loss = torch.clamp(1 - (pred_i - pred_j) * label, min=0).sum()
elif self.loss_type == 'TL':
loss = (pred_j - pred_i).sigmoid().mean() + pred_j.pow(2).sigmoid().mean()
else:
raise ValueError(f'Invalid loss type: {self.loss_type}')
loss += self.reg_1 * (self.embed_item.weight.norm(p=1) + self.embed_user.weight.norm(p=1))
loss += self.reg_2 * (self.embed_item.weight.norm() + self.embed_user.weight.norm())
if torch.isnan(loss):
raise ValueError(f'Loss=Nan or Infinity: current settings does not fit the recommender')
loss.backward()
optimizer.step()
pbar.set_postfix(loss=loss.item())
current_loss += loss.item()
self.eval()
delta_loss = float(current_loss - last_loss)
if (abs(delta_loss) < 1e-5) and self.early_stop:
print('Satisfy early stop mechanism')
break
else:
last_loss = current_loss
def predict(self, u, i):
pred_i, _ = self.forward(u, i, i)
return pred_i.cpu()
|
daisy/model/pair/FMRecommender.py
|
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
class PairFM(nn.Module):
def __init__(self,
user_num,
item_num,
factors=84,
epochs=20,
lr=0.001,
reg_1=0.,
reg_2=0.,
loss_type='BPR',
gpuid='0',
early_stop=True):
"""
Pair-wise FM Recommender Class
Parameters
----------
user_num : int, the number of users
item_num : int, the number of items
factors : int, the number of latent factor
epochs : int, number of training epochs
lr : float, learning rate
reg_1 : float, first-order regularization term
reg_2 : float, second-order regularization term
loss_type : str, loss function type
gpuid : str, GPU ID
early_stop : bool, whether to activate early stop mechanism
"""
super(PairFM, self).__init__()
os.environ['CUDA_VISIBLE_DEVICES'] = gpuid
cudnn.benchmark = True
self.epochs = epochs
self.lr = lr
self.reg_1 = reg_1
self.reg_2 = reg_2
self.embed_user = nn.Embedding(user_num, factors)
self.embed_item = nn.Embedding(item_num, factors)
self.u_bias = nn.Embedding(user_num, 1)
self.i_bias = nn.Embedding(item_num, 1)
self.bias_ = nn.Parameter(torch.tensor([0.0]))
# init weight
nn.init.normal_(self.embed_user.weight, std=0.01)
nn.init.normal_(self.embed_item.weight, std=0.01)
nn.init.constant_(self.u_bias.weight, 0.0)
nn.init.constant_(self.i_bias.weight, 0.0)
self.loss_type = loss_type
self.early_stop = early_stop
def forward(self, u, i, j):
user = self.embed_user(u)
item_i = self.embed_item(i)
item_j = self.embed_item(j)
# inner product part
pred_i = (user * item_i).sum(dim=-1, keepdim=True)
pred_j = (user * item_j).sum(dim=-1, keepdim=True)
# add bias
pred_i += self.u_bias(u) + self.i_bias(i) + self.bias_
pred_j += self.u_bias(u) + self.i_bias(j) + self.bias_
return pred_i.view(-1), pred_j.view(-1)
def fit(self, train_loader):
if torch.cuda.is_available():
self.cuda()
else:
self.cpu()
optimizer = optim.SGD(self.parameters(), lr=self.lr)
last_loss = 0.
for epoch in range(1, self.epochs + 1):
self.train()
current_loss = 0.
# set process bar display
pbar = tqdm(train_loader)
pbar.set_description(f'[Epoch {epoch:03d}]')
for user, item_i, item_j, label in pbar:
if torch.cuda.is_available():
user = user.cuda()
item_i = item_i.cuda()
item_j = item_j.cuda()
label = label.cuda()
else:
user = user.cpu()
item_i = item_i.cpu()
item_j = item_j.cpu()
label = label.cpu()
self.zero_grad()
pred_i, pred_j = self.forward(user, item_i, item_j)
if self.loss_type == 'BPR':
loss = -(pred_i - pred_j).sigmoid().log().sum()
elif self.loss_type == 'HL':
loss = torch.clamp(1 - (pred_i - pred_j) * label, min=0).sum()
elif self.loss_type == 'TL':
loss = (pred_j - pred_i).sigmoid().mean() + pred_j.pow(2).sigmoid().mean()
else:
raise ValueError(f'Invalid loss type: {self.loss_type}')
loss += self.reg_1 * (self.embed_item.weight.norm(p=1) + self.embed_user.weight.norm(p=1))
loss += self.reg_2 * (self.embed_item.weight.norm() + self.embed_user.weight.norm())
if torch.isnan(loss):
raise ValueError(f'Loss=Nan or Infinity: current settings does not fit the recommender')
loss.backward()
optimizer.step()
pbar.set_postfix(loss=loss.item())
current_loss += loss.item()
self.eval()
delta_loss = float(current_loss - last_loss)
if (abs(delta_loss) < 1e-5) and self.early_stop:
print('Satisfy early stop mechanism')
break
else:
last_loss = current_loss
def predict(self, u, i):
pred_i, _ = self.forward(u, i, i)
return pred_i.cpu()
| 0.908193 | 0.314051 |
import os
import unittest
from armi.nuclearDataIO.cccc import isotxs
from armi.utils import plotting
from armi.reactor.tests import test_reactors
from armi.tests import ISOAA_PATH, TEST_ROOT
from armi.reactor.flags import Flags
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestPlotting(unittest.TestCase):
"""
Test and demonstrate some plotting capabilities of ARMI.
Notes
-----
These tests don't do a great job of making sure the plot appears correctly,
but they do check that the lines of code run, and that an image is produced, and
demonstrate how they are meant to be called.
"""
# Change to False when you want to inspect the plots. Change back please.
removeFiles = True
@classmethod
def setUpClass(cls):
cls.o, cls.r = test_reactors.loadTestReactor()
def test_plotDepthMap(self): # indirectly tests plot face map
# set some params to visualize
for i, b in enumerate(self.o.r.core.getBlocks()):
b.p.percentBu = i / 100
fName = plotting.plotBlockDepthMap(
self.r.core, param="percentBu", fName="depthMapPlot.png", depthIndex=2
)
self._checkExists(fName)
def test_plotAssemblyTypes(self):
plotting.plotAssemblyTypes(
self.r.core.parent.blueprints, "coreAssemblyTypes1.png"
)
self._checkExists("coreAssemblyTypes1.png")
def test_plotBlockFlux(self):
try:
xslib = isotxs.readBinary(ISOAA_PATH)
self.r.core.lib = xslib
blockList = self.r.core.getBlocks()
for _, b in enumerate(blockList):
b.p.mgFlux = range(33)
plotting.plotBlockFlux(self.r.core, fName="flux.png", bList=blockList)
self.assertTrue(os.path.exists("flux.png"))
plotting.plotBlockFlux(
self.r.core, fName="peak.png", bList=blockList, peak=True
)
self.assertTrue(os.path.exists("peak.png"))
plotting.plotBlockFlux(
self.r.core,
fName="bList2.png",
bList=blockList,
bList2=blockList,
)
self.assertTrue(os.path.exists("bList2.png"))
# can't test adjoint at the moment, testBlock doesn't like to .getMgFlux(adjoint=True)
finally:
os.remove("flux.txt") # secondarily created during the call.
os.remove("flux.png") # created during the call.
os.remove("peak.txt") # csecondarily reated during the call.
os.remove("peak.png") # created during the call.
os.remove("bList2.txt") # secondarily created during the call.
os.remove("bList2.png") # created during the call.
def test_plotHexBlock(self):
with TemporaryDirectoryChanger():
first_fuel_block = self.r.core.getFirstBlock(Flags.FUEL)
first_fuel_block.autoCreateSpatialGrids()
plotting.plotBlockDiagram(first_fuel_block, "blockDiagram23.svg", True)
self.assertTrue(os.path.exists("blockDiagram23.svg"))
def test_plotCartesianBlock(self):
from armi import settings
from armi.reactor import blueprints, reactors
with TemporaryDirectoryChanger():
cs = settings.Settings(
os.path.join(TEST_ROOT, "tutorials", "c5g7-settings.yaml")
)
blueprint = blueprints.loadFromCs(cs)
_ = reactors.factory(cs, blueprint)
for name, bDesign in blueprint.blockDesigns.items():
b = bDesign.construct(cs, blueprint, 0, 1, 1, "AA", {})
plotting.plotBlockDiagram(b, "{}.svg".format(name), True)
self.assertTrue(os.path.exists("uo2.svg"))
self.assertTrue(os.path.exists("mox.svg"))
def _checkExists(self, fName):
self.assertTrue(os.path.exists(fName))
if self.removeFiles:
os.remove(fName)
if __name__ == "__main__":
unittest.main()
|
armi/utils/tests/test_plotting.py
|
import os
import unittest
from armi.nuclearDataIO.cccc import isotxs
from armi.utils import plotting
from armi.reactor.tests import test_reactors
from armi.tests import ISOAA_PATH, TEST_ROOT
from armi.reactor.flags import Flags
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestPlotting(unittest.TestCase):
"""
Test and demonstrate some plotting capabilities of ARMI.
Notes
-----
These tests don't do a great job of making sure the plot appears correctly,
but they do check that the lines of code run, and that an image is produced, and
demonstrate how they are meant to be called.
"""
# Change to False when you want to inspect the plots. Change back please.
removeFiles = True
@classmethod
def setUpClass(cls):
cls.o, cls.r = test_reactors.loadTestReactor()
def test_plotDepthMap(self): # indirectly tests plot face map
# set some params to visualize
for i, b in enumerate(self.o.r.core.getBlocks()):
b.p.percentBu = i / 100
fName = plotting.plotBlockDepthMap(
self.r.core, param="percentBu", fName="depthMapPlot.png", depthIndex=2
)
self._checkExists(fName)
def test_plotAssemblyTypes(self):
plotting.plotAssemblyTypes(
self.r.core.parent.blueprints, "coreAssemblyTypes1.png"
)
self._checkExists("coreAssemblyTypes1.png")
def test_plotBlockFlux(self):
try:
xslib = isotxs.readBinary(ISOAA_PATH)
self.r.core.lib = xslib
blockList = self.r.core.getBlocks()
for _, b in enumerate(blockList):
b.p.mgFlux = range(33)
plotting.plotBlockFlux(self.r.core, fName="flux.png", bList=blockList)
self.assertTrue(os.path.exists("flux.png"))
plotting.plotBlockFlux(
self.r.core, fName="peak.png", bList=blockList, peak=True
)
self.assertTrue(os.path.exists("peak.png"))
plotting.plotBlockFlux(
self.r.core,
fName="bList2.png",
bList=blockList,
bList2=blockList,
)
self.assertTrue(os.path.exists("bList2.png"))
# can't test adjoint at the moment, testBlock doesn't like to .getMgFlux(adjoint=True)
finally:
os.remove("flux.txt") # secondarily created during the call.
os.remove("flux.png") # created during the call.
os.remove("peak.txt") # csecondarily reated during the call.
os.remove("peak.png") # created during the call.
os.remove("bList2.txt") # secondarily created during the call.
os.remove("bList2.png") # created during the call.
def test_plotHexBlock(self):
with TemporaryDirectoryChanger():
first_fuel_block = self.r.core.getFirstBlock(Flags.FUEL)
first_fuel_block.autoCreateSpatialGrids()
plotting.plotBlockDiagram(first_fuel_block, "blockDiagram23.svg", True)
self.assertTrue(os.path.exists("blockDiagram23.svg"))
def test_plotCartesianBlock(self):
from armi import settings
from armi.reactor import blueprints, reactors
with TemporaryDirectoryChanger():
cs = settings.Settings(
os.path.join(TEST_ROOT, "tutorials", "c5g7-settings.yaml")
)
blueprint = blueprints.loadFromCs(cs)
_ = reactors.factory(cs, blueprint)
for name, bDesign in blueprint.blockDesigns.items():
b = bDesign.construct(cs, blueprint, 0, 1, 1, "AA", {})
plotting.plotBlockDiagram(b, "{}.svg".format(name), True)
self.assertTrue(os.path.exists("uo2.svg"))
self.assertTrue(os.path.exists("mox.svg"))
def _checkExists(self, fName):
self.assertTrue(os.path.exists(fName))
if self.removeFiles:
os.remove(fName)
if __name__ == "__main__":
unittest.main()
| 0.626696 | 0.48621 |
import os
import shutil
import unittest
import tempfile
from telemetry.core import util
from telemetry.internal.backends.chrome import crx_id
class CrxIdUnittest(unittest.TestCase):
CRX_ID_DIR = util.GetUnittestDataDir()
PACKED_CRX = os.path.join(CRX_ID_DIR,
'jebgalgnebhfojomionfpkfelancnnkf.crx')
PACKED_APP_ID = 'jebgalgnebhfojomionfpkfelancnnkf'
PACKED_HASH_BYTES = \
'{0x94, 0x16, 0x0b, 0x6d, 0x41, 0x75, 0xe9, 0xec,' \
' 0x8e, 0xd5, 0xfa, 0x54, 0xb0, 0xd2, 0xdd, 0xa5,' \
' 0x6e, 0x05, 0x6b, 0xe8, 0x73, 0x47, 0xf6, 0xc4,' \
' 0x11, 0x9f, 0xbc, 0xb3, 0x09, 0xb3, 0x5b, 0x40}'
UNPACKED_APP_ID = 'cbcdidchbppangcjoddlpdjlenngjldk'
UNPACKED_HASH_BYTES = \
'{0x21, 0x23, 0x83, 0x27, 0x1f, 0xf0, 0xd6, 0x29,' \
' 0xe3, 0x3b, 0xf3, 0x9b, 0x4d, 0xd6, 0x9b, 0x3a,' \
' 0xff, 0x7d, 0x6b, 0xc4, 0x78, 0x30, 0x47, 0xa6,' \
' 0x23, 0x12, 0x72, 0x84, 0x9b, 0x9a, 0xf6, 0x3c}'
def testPackedHashAppId(self):
""" Test the output generated for a canned, packed CRX. """
self.assertEqual(crx_id.GetCRXAppID(self.PACKED_CRX),
self.PACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(self.PACKED_CRX),
self.PACKED_HASH_BYTES)
def testUnpackedHashAppId(self):
""" Test the output generated for a canned, unpacked extension. """
unpacked_test_manifest_path = os.path.join(
self.CRX_ID_DIR, 'manifest_with_key.json')
temp_unpacked_crx = tempfile.mkdtemp()
shutil.copy2(unpacked_test_manifest_path,
os.path.join(temp_unpacked_crx, 'manifest.json'))
self.assertEqual(crx_id.GetCRXAppID(temp_unpacked_crx),
self.UNPACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(temp_unpacked_crx),
self.UNPACKED_HASH_BYTES)
self.assertTrue(crx_id.HasPublicKey(temp_unpacked_crx))
shutil.rmtree(temp_unpacked_crx)
def testFromFilePath(self):
""" Test calculation of extension id from file paths. """
self.assertEqual(crx_id.GetCRXAppID('/tmp/temp_extension',
from_file_path=True),
'ajbbicncdkdlchpjplgjaglppbcbmaji')
def testFromWindowsPath(self):
self.assertEqual(crx_id.GetCRXAppID(r'D:\Documents\chrome\test_extension',
from_file_path=True,
is_win_path=True),
'fegemedmbnhglnecjgbdhekaghkccplm')
# Test drive letter normalization.
k_win_path_id = 'aiinlcdagjihibappcdnnhcccdokjlaf'
self.assertEqual(crx_id.GetCRXAppID(r'c:\temp_extension',
from_file_path=True,
is_win_path=True),
k_win_path_id)
self.assertEqual(crx_id.GetCRXAppID(r'C:\temp_extension',
from_file_path=True,
is_win_path=True),
k_win_path_id)
|
telemetry/telemetry/internal/backends/chrome/crx_id_unittest.py
|
import os
import shutil
import unittest
import tempfile
from telemetry.core import util
from telemetry.internal.backends.chrome import crx_id
class CrxIdUnittest(unittest.TestCase):
CRX_ID_DIR = util.GetUnittestDataDir()
PACKED_CRX = os.path.join(CRX_ID_DIR,
'jebgalgnebhfojomionfpkfelancnnkf.crx')
PACKED_APP_ID = 'jebgalgnebhfojomionfpkfelancnnkf'
PACKED_HASH_BYTES = \
'{0x94, 0x16, 0x0b, 0x6d, 0x41, 0x75, 0xe9, 0xec,' \
' 0x8e, 0xd5, 0xfa, 0x54, 0xb0, 0xd2, 0xdd, 0xa5,' \
' 0x6e, 0x05, 0x6b, 0xe8, 0x73, 0x47, 0xf6, 0xc4,' \
' 0x11, 0x9f, 0xbc, 0xb3, 0x09, 0xb3, 0x5b, 0x40}'
UNPACKED_APP_ID = 'cbcdidchbppangcjoddlpdjlenngjldk'
UNPACKED_HASH_BYTES = \
'{0x21, 0x23, 0x83, 0x27, 0x1f, 0xf0, 0xd6, 0x29,' \
' 0xe3, 0x3b, 0xf3, 0x9b, 0x4d, 0xd6, 0x9b, 0x3a,' \
' 0xff, 0x7d, 0x6b, 0xc4, 0x78, 0x30, 0x47, 0xa6,' \
' 0x23, 0x12, 0x72, 0x84, 0x9b, 0x9a, 0xf6, 0x3c}'
def testPackedHashAppId(self):
""" Test the output generated for a canned, packed CRX. """
self.assertEqual(crx_id.GetCRXAppID(self.PACKED_CRX),
self.PACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(self.PACKED_CRX),
self.PACKED_HASH_BYTES)
def testUnpackedHashAppId(self):
""" Test the output generated for a canned, unpacked extension. """
unpacked_test_manifest_path = os.path.join(
self.CRX_ID_DIR, 'manifest_with_key.json')
temp_unpacked_crx = tempfile.mkdtemp()
shutil.copy2(unpacked_test_manifest_path,
os.path.join(temp_unpacked_crx, 'manifest.json'))
self.assertEqual(crx_id.GetCRXAppID(temp_unpacked_crx),
self.UNPACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(temp_unpacked_crx),
self.UNPACKED_HASH_BYTES)
self.assertTrue(crx_id.HasPublicKey(temp_unpacked_crx))
shutil.rmtree(temp_unpacked_crx)
def testFromFilePath(self):
""" Test calculation of extension id from file paths. """
self.assertEqual(crx_id.GetCRXAppID('/tmp/temp_extension',
from_file_path=True),
'ajbbicncdkdlchpjplgjaglppbcbmaji')
def testFromWindowsPath(self):
self.assertEqual(crx_id.GetCRXAppID(r'D:\Documents\chrome\test_extension',
from_file_path=True,
is_win_path=True),
'fegemedmbnhglnecjgbdhekaghkccplm')
# Test drive letter normalization.
k_win_path_id = 'aiinlcdagjihibappcdnnhcccdokjlaf'
self.assertEqual(crx_id.GetCRXAppID(r'c:\temp_extension',
from_file_path=True,
is_win_path=True),
k_win_path_id)
self.assertEqual(crx_id.GetCRXAppID(r'C:\temp_extension',
from_file_path=True,
is_win_path=True),
k_win_path_id)
| 0.255715 | 0.243238 |
import base64
import json
import random
from pathlib import Path
import imagehash
import numpy as np
from raymon.profiling.extractors import SimpleExtractor
class FixedSubpatchSimilarity(SimpleExtractor):
_attrs = ["patch", "refs"]
_patch_keys = ["x0", "y0", "<KEY>"]
def __init__(self, patch, refs=None, nrefs=10, idfr=None):
"""[summary]
Args:
patch ([int], optional): [description]. The x0, y0, x1, y1 of the patch to look at.
refs ([np.array], optional): [description]. References of what the patch should look like
"""
self._nrefs = None
self._patch = None
self._refs = None
self._idfr = None
self.patch = patch
self.nrefs = nrefs
self.refs = refs
self.idfr = idfr
"""
PROPERTIES
"""
@property
def patch(self):
return self._patch
@patch.setter
def patch(self, value):
if isinstance(value, dict):
self._patch = {key: value[key] for key in self._patch_keys}
elif isinstance(value, list) and len(value) == 4:
self._patch = {key: value[i] for i, key in enumerate(self._patch_keys)}
else:
raise ValueError(f"patch must be a dict or list, not {type(value)}")
# make sure the correct keys are there
print(f"Patch set to: {self._patch} for {self}")
@property
def refs(self):
return self._refs
@refs.setter
def refs(self, value):
if value is None:
self._refs = None
return
if not (isinstance(value, list) and len(value) == self.nrefs):
raise ValueError(f"refs should be a list of length {self.nrefs}")
parsed_refs = []
for ref in value:
if isinstance(ref, imagehash.ImageHash):
parsed_refs.append(ref)
elif isinstance(ref, str):
parsed_refs.append(imagehash.hex_to_hash(ref))
else:
raise ValueError(f"refs should either be str or ImageHash, not {type(ref)}")
self._refs = parsed_refs
@property
def nrefs(self):
return self._nrefs
@nrefs.setter
def nrefs(self, value):
value = int(value)
if not (isinstance(value, int) and value > 0):
self._nrefs = None
raise ValueError(f"nrefs should be a an int > 0")
self._nrefs = value
@property
def idfr(self):
return self._idfr
@idfr.setter
def idfr(self, value):
self._idfr = str(value)
"""Feature extractor"""
def extract(self, data):
phash = self._extract(data)
dist = min(abs(ref - phash) for ref in self.refs)
return dist
def _extract(self, data):
patch = [self.patch["x0"], self.patch["y0"], self.patch["x1"], self.patch["y1"]]
crop = data.crop(box=patch)
phash = imagehash.phash(crop)
return phash
"""Serializable interface """
def to_jcr(self):
data = {
"patch": self.patch,
"refs": [str(ref) for ref in self.refs] if self.refs is not None else None,
"nrefs": self.nrefs,
}
state = {"class": self.class2str(), "state": data}
return state
@classmethod
def from_jcr(cls, jcr):
patch, refs, nrefs, idfr = None, None, None, None
if "patch" in jcr:
patch = jcr["patch"]
if "nrefs" in jcr:
nrefs = jcr["nrefs"]
if "refs" in jcr:
refs = jcr["refs"]
if "idfr" in jcr:
refs = jcr["idfr"]
return cls(patch=patch, refs=refs, nrefs=nrefs, idfr=idfr)
"""Buildable interface"""
def build(self, data):
refs = []
chosen_samples = random.choices(data, k=self.nrefs)
for sample in chosen_samples:
ref = self._extract(sample)
refs.append(ref)
self.refs = refs
def is_built(self):
return self.refs is not None and len(self.refs) == self.nrefs
def __str__(self):
return f"{self.class2str()} ({self.idfr})"
|
raymon/profiling/extractors/vision/similarity.py
|
import base64
import json
import random
from pathlib import Path
import imagehash
import numpy as np
from raymon.profiling.extractors import SimpleExtractor
class FixedSubpatchSimilarity(SimpleExtractor):
_attrs = ["patch", "refs"]
_patch_keys = ["x0", "y0", "<KEY>"]
def __init__(self, patch, refs=None, nrefs=10, idfr=None):
"""[summary]
Args:
patch ([int], optional): [description]. The x0, y0, x1, y1 of the patch to look at.
refs ([np.array], optional): [description]. References of what the patch should look like
"""
self._nrefs = None
self._patch = None
self._refs = None
self._idfr = None
self.patch = patch
self.nrefs = nrefs
self.refs = refs
self.idfr = idfr
"""
PROPERTIES
"""
@property
def patch(self):
return self._patch
@patch.setter
def patch(self, value):
if isinstance(value, dict):
self._patch = {key: value[key] for key in self._patch_keys}
elif isinstance(value, list) and len(value) == 4:
self._patch = {key: value[i] for i, key in enumerate(self._patch_keys)}
else:
raise ValueError(f"patch must be a dict or list, not {type(value)}")
# make sure the correct keys are there
print(f"Patch set to: {self._patch} for {self}")
@property
def refs(self):
return self._refs
@refs.setter
def refs(self, value):
if value is None:
self._refs = None
return
if not (isinstance(value, list) and len(value) == self.nrefs):
raise ValueError(f"refs should be a list of length {self.nrefs}")
parsed_refs = []
for ref in value:
if isinstance(ref, imagehash.ImageHash):
parsed_refs.append(ref)
elif isinstance(ref, str):
parsed_refs.append(imagehash.hex_to_hash(ref))
else:
raise ValueError(f"refs should either be str or ImageHash, not {type(ref)}")
self._refs = parsed_refs
@property
def nrefs(self):
return self._nrefs
@nrefs.setter
def nrefs(self, value):
value = int(value)
if not (isinstance(value, int) and value > 0):
self._nrefs = None
raise ValueError(f"nrefs should be a an int > 0")
self._nrefs = value
@property
def idfr(self):
return self._idfr
@idfr.setter
def idfr(self, value):
self._idfr = str(value)
"""Feature extractor"""
def extract(self, data):
phash = self._extract(data)
dist = min(abs(ref - phash) for ref in self.refs)
return dist
def _extract(self, data):
patch = [self.patch["x0"], self.patch["y0"], self.patch["x1"], self.patch["y1"]]
crop = data.crop(box=patch)
phash = imagehash.phash(crop)
return phash
"""Serializable interface """
def to_jcr(self):
data = {
"patch": self.patch,
"refs": [str(ref) for ref in self.refs] if self.refs is not None else None,
"nrefs": self.nrefs,
}
state = {"class": self.class2str(), "state": data}
return state
@classmethod
def from_jcr(cls, jcr):
patch, refs, nrefs, idfr = None, None, None, None
if "patch" in jcr:
patch = jcr["patch"]
if "nrefs" in jcr:
nrefs = jcr["nrefs"]
if "refs" in jcr:
refs = jcr["refs"]
if "idfr" in jcr:
refs = jcr["idfr"]
return cls(patch=patch, refs=refs, nrefs=nrefs, idfr=idfr)
"""Buildable interface"""
def build(self, data):
refs = []
chosen_samples = random.choices(data, k=self.nrefs)
for sample in chosen_samples:
ref = self._extract(sample)
refs.append(ref)
self.refs = refs
def is_built(self):
return self.refs is not None and len(self.refs) == self.nrefs
def __str__(self):
return f"{self.class2str()} ({self.idfr})"
| 0.737158 | 0.224491 |
from dataclasses import dataclass
from datetime import date, timedelta, datetime
from typing import Iterable, Union, List
import pandas as pd
import numpy as np
@dataclass
class DateRange:
start_date: date
end_date: date
def __init__(self, start_date, end_date):
self.start_date = safe_convert_to_date(start_date)
self.end_date = safe_convert_to_date(end_date)
def intersection(self, other):
new_start = max(self.start_date, other.start_date)
new_end = min(self.end_date, other.end_date)
if new_start < new_end:
return DateRange(new_start, new_end)
def date_range(self) -> np.array:
return pd.date_range(self.start_date, self.end_date, freq="D").date
def total_days(self) -> int:
return np.timedelta64(
self.end_date - self.start_date).astype(
'timedelta64[D]').astype(np.float32)
def split_into_years(self) -> List:
duration = self.end_date - self.start_date
num_years = duration / timedelta(days=365)
if num_years <= 1:
return [self]
else:
end_date = self.end_date
new_date_ranges = []
for year_back in range(np.ceil(num_years).astype(int)):
start_date = end_date - timedelta(days=365)
if start_date < self.start_date:
start_date = self.start_date
new_date_ranges.append(DateRange(start_date, end_date))
end_date = start_date
return new_date_ranges
def get_date_range_list(dates: Iterable[date]) -> List[DateRange]:
if not dates:
return []
dates = np.array(dates)
dates = np.sort(dates)
dates_diff = np.diff(dates)
location_of_gaps = np.where(dates_diff > timedelta(days=1))[0]
index_of_last_date = len(dates) - 1
location_of_gaps = list(location_of_gaps)
location_of_gaps.append(index_of_last_date)
start_i = 0
date_range_list = []
for end_i in location_of_gaps:
date_range = DateRange(
start_date=dates[start_i],
end_date=dates[end_i])
date_range_list.append(date_range)
start_i = end_i + 1
return date_range_list
def safe_convert_to_date(dt: Union[datetime, date, str]) -> date:
if isinstance(dt, str):
dt = pd.Timestamp(dt)
if isinstance(dt, datetime):
return dt.date()
if isinstance(dt, date):
return dt
def merge_date_ranges_to_years(
date_ranges: Iterable[DateRange]) -> List[DateRange]:
"""
Args:
date_ranges: List of DateRanges, in ascending chronological order.
Returns:
List of DateRanges, each representing a year, in descending order.
"""
if not date_ranges:
return []
# Split multi-year date ranges
date_ranges_split = []
for date_range in date_ranges[::-1]:
date_ranges_split.extend(date_range.split_into_years())
years_to_download = []
for date_range in date_ranges_split:
if years_to_download:
intersection = date_range.intersection(years_to_download[-1])
if intersection == date_range:
# date_range falls within the last year to retrieve,
# so we can ignore this date_range
continue
elif intersection is None:
# No overlap
date_to = date_range.end_date
else:
# Overlap
date_to = intersection.start_date
else:
date_to = date_range.end_date
date_from = date_to - timedelta(days=365)
years_to_download.append(DateRange(date_from, date_to))
return years_to_download
|
pvoutput/daterange.py
|
from dataclasses import dataclass
from datetime import date, timedelta, datetime
from typing import Iterable, Union, List
import pandas as pd
import numpy as np
@dataclass
class DateRange:
start_date: date
end_date: date
def __init__(self, start_date, end_date):
self.start_date = safe_convert_to_date(start_date)
self.end_date = safe_convert_to_date(end_date)
def intersection(self, other):
new_start = max(self.start_date, other.start_date)
new_end = min(self.end_date, other.end_date)
if new_start < new_end:
return DateRange(new_start, new_end)
def date_range(self) -> np.array:
return pd.date_range(self.start_date, self.end_date, freq="D").date
def total_days(self) -> int:
return np.timedelta64(
self.end_date - self.start_date).astype(
'timedelta64[D]').astype(np.float32)
def split_into_years(self) -> List:
duration = self.end_date - self.start_date
num_years = duration / timedelta(days=365)
if num_years <= 1:
return [self]
else:
end_date = self.end_date
new_date_ranges = []
for year_back in range(np.ceil(num_years).astype(int)):
start_date = end_date - timedelta(days=365)
if start_date < self.start_date:
start_date = self.start_date
new_date_ranges.append(DateRange(start_date, end_date))
end_date = start_date
return new_date_ranges
def get_date_range_list(dates: Iterable[date]) -> List[DateRange]:
if not dates:
return []
dates = np.array(dates)
dates = np.sort(dates)
dates_diff = np.diff(dates)
location_of_gaps = np.where(dates_diff > timedelta(days=1))[0]
index_of_last_date = len(dates) - 1
location_of_gaps = list(location_of_gaps)
location_of_gaps.append(index_of_last_date)
start_i = 0
date_range_list = []
for end_i in location_of_gaps:
date_range = DateRange(
start_date=dates[start_i],
end_date=dates[end_i])
date_range_list.append(date_range)
start_i = end_i + 1
return date_range_list
def safe_convert_to_date(dt: Union[datetime, date, str]) -> date:
if isinstance(dt, str):
dt = pd.Timestamp(dt)
if isinstance(dt, datetime):
return dt.date()
if isinstance(dt, date):
return dt
def merge_date_ranges_to_years(
date_ranges: Iterable[DateRange]) -> List[DateRange]:
"""
Args:
date_ranges: List of DateRanges, in ascending chronological order.
Returns:
List of DateRanges, each representing a year, in descending order.
"""
if not date_ranges:
return []
# Split multi-year date ranges
date_ranges_split = []
for date_range in date_ranges[::-1]:
date_ranges_split.extend(date_range.split_into_years())
years_to_download = []
for date_range in date_ranges_split:
if years_to_download:
intersection = date_range.intersection(years_to_download[-1])
if intersection == date_range:
# date_range falls within the last year to retrieve,
# so we can ignore this date_range
continue
elif intersection is None:
# No overlap
date_to = date_range.end_date
else:
# Overlap
date_to = intersection.start_date
else:
date_to = date_range.end_date
date_from = date_to - timedelta(days=365)
years_to_download.append(DateRange(date_from, date_to))
return years_to_download
| 0.844537 | 0.314656 |
import os
import mock
from oslo_serialization import jsonutils
from watcher.common import context
from watcher.common import exception
from watcher.common import nova_helper
from watcher.common import service as watcher_service
from watcher.decision_engine.model import element
from watcher.decision_engine.model.notification import nova as novanotification
from watcher.tests import base as base_test
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model.notification import fake_managers
class NotificationTestCase(base_test.TestCase):
@staticmethod
def load_message(filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as json_file:
json_data = jsonutils.load(json_file)
return json_data
class TestReceiveNovaNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
FAKE_NOTIFICATIONS = {
'instance.create.end': 'instance-create-end.json',
'instance.lock': 'instance-lock.json',
'instance.unlock': 'instance-unlock.json',
'instance.pause.end': 'instance-pause-end.json',
'instance.power_off.end': 'instance-power_off-end.json',
'instance.power_on.end': 'instance-power_on-end.json',
'instance.resize_confirm.end': 'instance-resize_confirm-end.json',
'instance.restore.end': 'instance-restore-end.json',
'instance.resume.end': 'instance-resume-end.json',
'instance.shelve.end': 'instance-shelve-end.json',
'instance.shutdown.end': 'instance-shutdown-end.json',
'instance.suspend.end': 'instance-suspend-end.json',
'instance.unpause.end': 'instance-unpause-end.json',
'instance.unrescue.end': 'instance-unrescue-end.json',
'instance.unshelve.end': 'instance-unshelve-end.json',
'instance.rebuild.end': 'instance-rebuild-end.json',
'instance.rescue.end': 'instance-rescue-end.json',
'instance.update': 'instance-update.json',
'instance.live_migration_force_complete.end':
'instance-live_migration_force_complete-end.json',
'instance.live_migration_post_dest.end':
'instance-live_migration_post_dest-end.json',
'instance.delete.end': 'instance-delete-end.json',
'instance.soft_delete.end': 'instance-soft_delete-end.json',
'service.create': 'service-create.json',
'service.delete': 'service-delete.json',
'service.update': 'service-update.json',
}
def setUp(self):
super(TestReceiveNovaNotifications, self).setUp()
p_from_dict = mock.patch.object(context.RequestContext, 'from_dict')
m_from_dict = p_from_dict.start()
m_from_dict.return_value = self.context
self.addCleanup(p_from_dict.stop)
p_heartbeat = mock.patch.object(
watcher_service.ServiceHeartbeat, "send_beat")
self.m_heartbeat = p_heartbeat.start()
self.addCleanup(p_heartbeat.stop)
@mock.patch.object(novanotification.VersionedNotification, 'info')
def test_receive_nova_notifications(self, m_info):
de_service = watcher_service.Service(fake_managers.FakeManager)
n_dicts = novanotification.VersionedNotification.notification_mapping
for n_type in n_dicts.keys():
n_json = self.FAKE_NOTIFICATIONS[n_type]
message = self.load_message(n_json)
expected_message = message['payload']
publisher_id = message['publisher_id']
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_with(
self.context, publisher_id, n_type,
expected_message, self.FAKE_METADATA)
class TestNovaNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestNovaNotifications, self).setUp()
# fake cluster
self.fake_cdmc = faker_cluster_state.FakerModelCollector()
def test_nova_service_update(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
node0_uuid = 'Node_0'
node0 = compute_model.get_node_by_uuid(node0_uuid)
message = self.load_message('scenario3_service-update-disabled.json')
self.assertEqual('hostname_0', node0.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual('Node_0', node0.hostname)
self.assertEqual(element.ServiceState.OFFLINE.value, node0.state)
self.assertEqual(element.ServiceState.DISABLED.value, node0.status)
message = self.load_message('scenario3_service-update-enabled.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual('Node_0', node0.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
@mock.patch.object(nova_helper, "NovaHelper")
def test_nova_service_create(self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=lambda uuid: mock.Mock(
name='m_get_compute_node_by_hostname',
id=3,
hypervisor_hostname="host2",
state='up',
status='enabled',
uuid=uuid,
memory_mb=7777,
vcpus=42,
free_disk_gb=974,
local_gb=1337))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
new_node_uuid = 'host2'
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, new_node_uuid)
message = self.load_message('service-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
new_node = compute_model.get_node_by_uuid(new_node_uuid)
self.assertEqual('host2', new_node.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, new_node.state)
self.assertEqual(element.ServiceState.ENABLED.value, new_node.status)
def test_nova_service_delete(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
node0_uuid = 'Node_0'
# Before
self.assertTrue(compute_model.get_node_by_uuid(node0_uuid))
message = self.load_message('service-delete.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, node0_uuid)
def test_nova_instance_update(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-update.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
@mock.patch.object(nova_helper, "NovaHelper")
def test_nova_instance_update_notfound_still_creates(
self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=lambda uuid: mock.Mock(
name='m_get_compute_node_by_hostname',
id=3,
hypervisor_hostname="Node_2",
state='up',
status='enabled',
uuid=uuid,
memory_mb=7777,
vcpus=42,
free_disk_gb=974,
local_gb=1337))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
message = self.load_message('scenario3_notfound_instance-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
node_2 = compute_model.get_node_by_uuid('Node_2')
self.assertEqual(7777, node_2.memory)
self.assertEqual(42, node_2.vcpus)
self.assertEqual(974, node_2.disk)
self.assertEqual(1337, node_2.disk_capacity)
@mock.patch.object(nova_helper, "NovaHelper")
def test_instance_update_node_notfound_set_unmapped(
self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=exception.ComputeNodeNotFound(name="TEST"))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
message = self.load_message(
'scenario3_notfound_instance-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
m_get_compute_node_by_hostname.assert_any_call('Node_2')
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, 'Node_2')
def test_nova_instance_create(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2'
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
message = self.load_message('instance-create-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
def test_nova_instance_delete_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
# Before
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
message = self.load_message('instance-delete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
def test_nova_instance_soft_delete_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
# Before
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
message = self.load_message('instance-soft_delete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
def test_live_migrated_force_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-live_migration_force_complete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_live_migrated_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-live_migration_post_dest-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_lock(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-lock.json')
self.assertFalse(instance0.locked)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertTrue(instance0.locked)
message = self.load_message('instance-unlock.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertFalse(instance0.locked)
def test_nova_instance_pause(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-pause-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
message = self.load_message('instance-unpause-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_power_on_off(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-power_off-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.STOPPED.value, instance0.state)
message = self.load_message('instance-power_on-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_instance_rebuild_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message('instance-rebuild-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_rescue(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-rescue-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.RESCUED.value, instance0.state)
message = self.load_message('instance-unrescue-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_instance_resize_confirm_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-resize_confirm-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_restore_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-restore-end.json')
instance0.state = element.InstanceState.ERROR.value
self.assertEqual(element.InstanceState.ERROR.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_resume_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-resume-end.json')
instance0.state = element.InstanceState.ERROR.value
self.assertEqual(element.InstanceState.ERROR.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_shelve(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-shelve-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.SHELVED.value, instance0.state)
message = self.load_message('instance-unshelve-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_shutdown_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-shutdown-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.STOPPED.value, instance0.state)
def test_nova_instance_suspend_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-suspend-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(
element.InstanceState.SUSPENDED.value, instance0.state)
|
python-watcher-2.0.0/watcher/tests/decision_engine/model/notification/test_nova_notifications.py
|
import os
import mock
from oslo_serialization import jsonutils
from watcher.common import context
from watcher.common import exception
from watcher.common import nova_helper
from watcher.common import service as watcher_service
from watcher.decision_engine.model import element
from watcher.decision_engine.model.notification import nova as novanotification
from watcher.tests import base as base_test
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model.notification import fake_managers
class NotificationTestCase(base_test.TestCase):
@staticmethod
def load_message(filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as json_file:
json_data = jsonutils.load(json_file)
return json_data
class TestReceiveNovaNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
FAKE_NOTIFICATIONS = {
'instance.create.end': 'instance-create-end.json',
'instance.lock': 'instance-lock.json',
'instance.unlock': 'instance-unlock.json',
'instance.pause.end': 'instance-pause-end.json',
'instance.power_off.end': 'instance-power_off-end.json',
'instance.power_on.end': 'instance-power_on-end.json',
'instance.resize_confirm.end': 'instance-resize_confirm-end.json',
'instance.restore.end': 'instance-restore-end.json',
'instance.resume.end': 'instance-resume-end.json',
'instance.shelve.end': 'instance-shelve-end.json',
'instance.shutdown.end': 'instance-shutdown-end.json',
'instance.suspend.end': 'instance-suspend-end.json',
'instance.unpause.end': 'instance-unpause-end.json',
'instance.unrescue.end': 'instance-unrescue-end.json',
'instance.unshelve.end': 'instance-unshelve-end.json',
'instance.rebuild.end': 'instance-rebuild-end.json',
'instance.rescue.end': 'instance-rescue-end.json',
'instance.update': 'instance-update.json',
'instance.live_migration_force_complete.end':
'instance-live_migration_force_complete-end.json',
'instance.live_migration_post_dest.end':
'instance-live_migration_post_dest-end.json',
'instance.delete.end': 'instance-delete-end.json',
'instance.soft_delete.end': 'instance-soft_delete-end.json',
'service.create': 'service-create.json',
'service.delete': 'service-delete.json',
'service.update': 'service-update.json',
}
def setUp(self):
super(TestReceiveNovaNotifications, self).setUp()
p_from_dict = mock.patch.object(context.RequestContext, 'from_dict')
m_from_dict = p_from_dict.start()
m_from_dict.return_value = self.context
self.addCleanup(p_from_dict.stop)
p_heartbeat = mock.patch.object(
watcher_service.ServiceHeartbeat, "send_beat")
self.m_heartbeat = p_heartbeat.start()
self.addCleanup(p_heartbeat.stop)
@mock.patch.object(novanotification.VersionedNotification, 'info')
def test_receive_nova_notifications(self, m_info):
de_service = watcher_service.Service(fake_managers.FakeManager)
n_dicts = novanotification.VersionedNotification.notification_mapping
for n_type in n_dicts.keys():
n_json = self.FAKE_NOTIFICATIONS[n_type]
message = self.load_message(n_json)
expected_message = message['payload']
publisher_id = message['publisher_id']
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_with(
self.context, publisher_id, n_type,
expected_message, self.FAKE_METADATA)
class TestNovaNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestNovaNotifications, self).setUp()
# fake cluster
self.fake_cdmc = faker_cluster_state.FakerModelCollector()
def test_nova_service_update(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
node0_uuid = 'Node_0'
node0 = compute_model.get_node_by_uuid(node0_uuid)
message = self.load_message('scenario3_service-update-disabled.json')
self.assertEqual('hostname_0', node0.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual('Node_0', node0.hostname)
self.assertEqual(element.ServiceState.OFFLINE.value, node0.state)
self.assertEqual(element.ServiceState.DISABLED.value, node0.status)
message = self.load_message('scenario3_service-update-enabled.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual('Node_0', node0.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
@mock.patch.object(nova_helper, "NovaHelper")
def test_nova_service_create(self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=lambda uuid: mock.Mock(
name='m_get_compute_node_by_hostname',
id=3,
hypervisor_hostname="host2",
state='up',
status='enabled',
uuid=uuid,
memory_mb=7777,
vcpus=42,
free_disk_gb=974,
local_gb=1337))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
new_node_uuid = 'host2'
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, new_node_uuid)
message = self.load_message('service-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
new_node = compute_model.get_node_by_uuid(new_node_uuid)
self.assertEqual('host2', new_node.hostname)
self.assertEqual(element.ServiceState.ONLINE.value, new_node.state)
self.assertEqual(element.ServiceState.ENABLED.value, new_node.status)
def test_nova_service_delete(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
node0_uuid = 'Node_0'
# Before
self.assertTrue(compute_model.get_node_by_uuid(node0_uuid))
message = self.load_message('service-delete.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, node0_uuid)
def test_nova_instance_update(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-update.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
@mock.patch.object(nova_helper, "NovaHelper")
def test_nova_instance_update_notfound_still_creates(
self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=lambda uuid: mock.Mock(
name='m_get_compute_node_by_hostname',
id=3,
hypervisor_hostname="Node_2",
state='up',
status='enabled',
uuid=uuid,
memory_mb=7777,
vcpus=42,
free_disk_gb=974,
local_gb=1337))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
message = self.load_message('scenario3_notfound_instance-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
node_2 = compute_model.get_node_by_uuid('Node_2')
self.assertEqual(7777, node_2.memory)
self.assertEqual(42, node_2.vcpus)
self.assertEqual(974, node_2.disk)
self.assertEqual(1337, node_2.disk_capacity)
@mock.patch.object(nova_helper, "NovaHelper")
def test_instance_update_node_notfound_set_unmapped(
self, m_nova_helper_cls):
m_get_compute_node_by_hostname = mock.Mock(
side_effect=exception.ComputeNodeNotFound(name="TEST"))
m_nova_helper_cls.return_value = mock.Mock(
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
name='m_nova_helper')
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
message = self.load_message(
'scenario3_notfound_instance-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
m_get_compute_node_by_hostname.assert_any_call('Node_2')
self.assertRaises(
exception.ComputeNodeNotFound,
compute_model.get_node_by_uuid, 'Node_2')
def test_nova_instance_create(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2'
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
message = self.load_message('instance-create-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
self.assertEqual(1, instance0.vcpus)
self.assertEqual(1, instance0.disk_capacity)
self.assertEqual(512, instance0.memory)
def test_nova_instance_delete_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
# Before
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
message = self.load_message('instance-delete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
def test_nova_instance_soft_delete_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
# Before
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
message = self.load_message('instance-soft_delete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# After
self.assertRaises(
exception.InstanceNotFound,
compute_model.get_instance_by_uuid, instance0_uuid)
def test_live_migrated_force_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-live_migration_force_complete-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_live_migrated_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-live_migration_post_dest-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_lock(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-lock.json')
self.assertFalse(instance0.locked)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertTrue(instance0.locked)
message = self.load_message('instance-unlock.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertFalse(instance0.locked)
def test_nova_instance_pause(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-pause-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
message = self.load_message('instance-unpause-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_power_on_off(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-power_off-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.STOPPED.value, instance0.state)
message = self.load_message('instance-power_on-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_instance_rebuild_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message('instance-rebuild-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_rescue(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-rescue-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.RESCUED.value, instance0.state)
message = self.load_message('instance-unrescue-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_instance_resize_confirm_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_0', node.uuid)
message = self.load_message(
'instance-resize_confirm-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
self.assertEqual('Node_1', node.uuid)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_restore_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-restore-end.json')
instance0.state = element.InstanceState.ERROR.value
self.assertEqual(element.InstanceState.ERROR.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_resume_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-resume-end.json')
instance0.state = element.InstanceState.ERROR.value
self.assertEqual(element.InstanceState.ERROR.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_shelve(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-shelve-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.SHELVED.value, instance0.state)
message = self.load_message('instance-unshelve-end.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
def test_nova_instance_shutdown_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-shutdown-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(element.InstanceState.STOPPED.value, instance0.state)
def test_nova_instance_suspend_end(self):
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
self.fake_cdmc.cluster_data_model = compute_model
handler = novanotification.VersionedNotification(self.fake_cdmc)
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
message = self.load_message('instance-suspend-end.json')
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
self.assertEqual(
element.InstanceState.SUSPENDED.value, instance0.state)
| 0.496338 | 0.096748 |
import pandas as pd
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn import metrics
from utils.constants import Maps
# Pandas options for better printing
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.width', 1000)
# Read in our scored maps generated in the map_score script
map_scores = pd.read_csv('results/scored_maps.csv')
# Limit our results to 2021 maps
map_scores = map_scores[map_scores['season'] == 2021]
# Duplicate and mirror our map scores so that we can get a row for each team on attack and defense on each map
map_scores_swapped = map_scores.copy(deep=True)
map_scores_swapped['team_one_score'] = map_scores['team_two_score']
map_scores_swapped['team_two_score'] = map_scores['team_one_score']
map_scores_swapped['team_one_name'] = map_scores['team_two_name']
map_scores_swapped['team_two_name'] = map_scores['team_one_name']
map_scores = pd.concat([map_scores_swapped, map_scores])
map_scores = map_scores.dropna()
players_per_map = pd.read_csv('results/players_per_map.csv')
full_frame = map_scores \
.merge(players_per_map, left_on=['match_id', 'map_name', 'team_one_name'],
right_on=['match_id', 'map_name', 'team']) \
.merge(players_per_map, left_on=['match_id', 'map_name', 'team_two_name'],
right_on=['match_id', 'map_name', 'team'], suffixes=('_team_one', '_team_two'))
full_frame = full_frame[
['team_one_score', 'player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one',
'player5_team_one', 'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two',
'player4_team_two', 'player5_team_two', 'player6_team_two']]
players = list(full_frame[
['player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one', 'player5_team_one',
'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two', 'player4_team_two',
'player5_team_two', 'player6_team_two']].stack().unique())
players.sort()
# Convert an input row into a row of our sparse matrix
def map_players(row_in, players):
t1_players = [row_in[0], row_in[1], row_in[2], row_in[3], row_in[4], row_in[5]]
t2_players = [row_in[6], row_in[7], row_in[8], row_in[9], row_in[10], row_in[11]]
row_out = np.zeros([len(players) * 2])
for p in t1_players:
row_out[players.index(p)] = 1
for p in t2_players:
row_out[players.index(p) + len(players)] = -1
return row_out
# Take in our input data, convert the teams into our sparse design matrix and the map scores into our target column
def extract_X_Y(frame):
stints_x_base = frame[['player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one', 'player5_team_one',
'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two', 'player4_team_two',
'player5_team_two', 'player6_team_two']].values
stint_X_rows = np.apply_along_axis(map_players, 1, stints_x_base, players)
stint_Y_rows = frame[['team_one_score']].values
return stint_X_rows, stint_Y_rows
# Convert lambda value to alpha needed for ridge CV
def lambda_to_alpha(lambda_value, samples):
return (lambda_value * samples) / 2.0
# Convert RidgeCV alpha back into a lambda value
def alpha_to_lambda(alpha_value, samples):
return (alpha_value * 2.0) / samples
# Calculate Regularized Map Score Added
def calculate_rmts(stint_X_rows, stint_Y_rows):
# We will perform cross validation across a number of different lambdas
lambdas = [.01, 0.025, .05, 0.075, .1, .125, .15, .175, .2, .225, .25]
# convert the lambdas into alpha values
alphas = [lambda_to_alpha(l, stint_X_rows.shape[0]) for l in lambdas]
# Create our ridge CV model
clf = RidgeCV(alphas=alphas, cv=5, fit_intercept=True, normalize=False)
# Fit our data
model = clf.fit(stint_X_rows, stint_Y_rows)
# extract our teams, and coefficients and combine them into a single matrix (20 x 3)
team_arr = np.transpose(np.array(players).reshape(1, len(players)))
coef_array_attack = np.transpose(model.coef_[:, 0:len(players)])
coef_array_def = np.transpose(model.coef_[:, len(players):])
team_coef_arr = np.concatenate([team_arr, coef_array_attack, coef_array_def], axis=1)
# build a dataframe from our matrix
rmts = pd.DataFrame(team_coef_arr)
intercept = model.intercept_[0]
# Rename columns to include the current map type
attack_str = 'rmsa attack'
defend_str = 'rmsa defend'
rmts.columns = ['player', attack_str, defend_str]
rmts[attack_str] = rmts[attack_str].astype(float)
rmts[defend_str] = rmts[defend_str].astype(float)
# Calculate a total RMSA
rmts['rmsa'] = rmts[attack_str] + rmts[defend_str]
rmts['intercept'] = intercept
# Generate a couple of error statistics
lambda_picked = alpha_to_lambda(model.alpha_, stint_X_rows.shape[0])
print('r^2: ', model.score(stint_X_rows, stint_Y_rows))
print('lambda: ', lambda_picked)
print('intercept: ', intercept)
pred = model.predict(stint_X_rows)
print('MAE: ', metrics.mean_absolute_error(stint_Y_rows, pred))
print('MSE: ', metrics.mean_squared_error(stint_Y_rows, pred))
rmts = rmts.sort_values(by='rmsa', ascending=False)
return rmts
x, y = extract_X_Y(full_frame)
rmsa = calculate_rmts(x, y)
rmsa['rank'] = rmsa['rmsa'].rank(ascending=False)
rmsa = rmsa[['rank', 'player', 'rmsa', 'rmsa attack', 'rmsa defend']]
print(rmsa)
rmsa.to_csv('results/player_rating.csv', index=False)
|
player_rating.py
|
import pandas as pd
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn import metrics
from utils.constants import Maps
# Pandas options for better printing
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.width', 1000)
# Read in our scored maps generated in the map_score script
map_scores = pd.read_csv('results/scored_maps.csv')
# Limit our results to 2021 maps
map_scores = map_scores[map_scores['season'] == 2021]
# Duplicate and mirror our map scores so that we can get a row for each team on attack and defense on each map
map_scores_swapped = map_scores.copy(deep=True)
map_scores_swapped['team_one_score'] = map_scores['team_two_score']
map_scores_swapped['team_two_score'] = map_scores['team_one_score']
map_scores_swapped['team_one_name'] = map_scores['team_two_name']
map_scores_swapped['team_two_name'] = map_scores['team_one_name']
map_scores = pd.concat([map_scores_swapped, map_scores])
map_scores = map_scores.dropna()
players_per_map = pd.read_csv('results/players_per_map.csv')
full_frame = map_scores \
.merge(players_per_map, left_on=['match_id', 'map_name', 'team_one_name'],
right_on=['match_id', 'map_name', 'team']) \
.merge(players_per_map, left_on=['match_id', 'map_name', 'team_two_name'],
right_on=['match_id', 'map_name', 'team'], suffixes=('_team_one', '_team_two'))
full_frame = full_frame[
['team_one_score', 'player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one',
'player5_team_one', 'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two',
'player4_team_two', 'player5_team_two', 'player6_team_two']]
players = list(full_frame[
['player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one', 'player5_team_one',
'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two', 'player4_team_two',
'player5_team_two', 'player6_team_two']].stack().unique())
players.sort()
# Convert an input row into a row of our sparse matrix
def map_players(row_in, players):
t1_players = [row_in[0], row_in[1], row_in[2], row_in[3], row_in[4], row_in[5]]
t2_players = [row_in[6], row_in[7], row_in[8], row_in[9], row_in[10], row_in[11]]
row_out = np.zeros([len(players) * 2])
for p in t1_players:
row_out[players.index(p)] = 1
for p in t2_players:
row_out[players.index(p) + len(players)] = -1
return row_out
# Take in our input data, convert the teams into our sparse design matrix and the map scores into our target column
def extract_X_Y(frame):
stints_x_base = frame[['player1_team_one', 'player2_team_one', 'player3_team_one', 'player4_team_one', 'player5_team_one',
'player6_team_one', 'player1_team_two', 'player2_team_two', 'player3_team_two', 'player4_team_two',
'player5_team_two', 'player6_team_two']].values
stint_X_rows = np.apply_along_axis(map_players, 1, stints_x_base, players)
stint_Y_rows = frame[['team_one_score']].values
return stint_X_rows, stint_Y_rows
# Convert lambda value to alpha needed for ridge CV
def lambda_to_alpha(lambda_value, samples):
return (lambda_value * samples) / 2.0
# Convert RidgeCV alpha back into a lambda value
def alpha_to_lambda(alpha_value, samples):
return (alpha_value * 2.0) / samples
# Calculate Regularized Map Score Added
def calculate_rmts(stint_X_rows, stint_Y_rows):
# We will perform cross validation across a number of different lambdas
lambdas = [.01, 0.025, .05, 0.075, .1, .125, .15, .175, .2, .225, .25]
# convert the lambdas into alpha values
alphas = [lambda_to_alpha(l, stint_X_rows.shape[0]) for l in lambdas]
# Create our ridge CV model
clf = RidgeCV(alphas=alphas, cv=5, fit_intercept=True, normalize=False)
# Fit our data
model = clf.fit(stint_X_rows, stint_Y_rows)
# extract our teams, and coefficients and combine them into a single matrix (20 x 3)
team_arr = np.transpose(np.array(players).reshape(1, len(players)))
coef_array_attack = np.transpose(model.coef_[:, 0:len(players)])
coef_array_def = np.transpose(model.coef_[:, len(players):])
team_coef_arr = np.concatenate([team_arr, coef_array_attack, coef_array_def], axis=1)
# build a dataframe from our matrix
rmts = pd.DataFrame(team_coef_arr)
intercept = model.intercept_[0]
# Rename columns to include the current map type
attack_str = 'rmsa attack'
defend_str = 'rmsa defend'
rmts.columns = ['player', attack_str, defend_str]
rmts[attack_str] = rmts[attack_str].astype(float)
rmts[defend_str] = rmts[defend_str].astype(float)
# Calculate a total RMSA
rmts['rmsa'] = rmts[attack_str] + rmts[defend_str]
rmts['intercept'] = intercept
# Generate a couple of error statistics
lambda_picked = alpha_to_lambda(model.alpha_, stint_X_rows.shape[0])
print('r^2: ', model.score(stint_X_rows, stint_Y_rows))
print('lambda: ', lambda_picked)
print('intercept: ', intercept)
pred = model.predict(stint_X_rows)
print('MAE: ', metrics.mean_absolute_error(stint_Y_rows, pred))
print('MSE: ', metrics.mean_squared_error(stint_Y_rows, pred))
rmts = rmts.sort_values(by='rmsa', ascending=False)
return rmts
x, y = extract_X_Y(full_frame)
rmsa = calculate_rmts(x, y)
rmsa['rank'] = rmsa['rmsa'].rank(ascending=False)
rmsa = rmsa[['rank', 'player', 'rmsa', 'rmsa attack', 'rmsa defend']]
print(rmsa)
rmsa.to_csv('results/player_rating.csv', index=False)
| 0.804291 | 0.371906 |
import sqlite3
from models.sl_logger import SlLogger
logger = SlLogger.get_logger(__name__)
class UserModel:
def __init__(self, _id, username, password, pswd_hint):
self.id = _id
self.username = username
self.password = password
self.pswd_hint = pswd_hint
@classmethod
def create_connection(cls, db_file):
connection = None
try:
connection = sqlite3.connect(db_file)
logger.info('Connection to ' + db_file + ' is successful')
except Exception as e:
logger.error(e)
return connection
@classmethod
def find_by_username(cls, username):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username=?"
logger.info("Querying users table in data.db on username field")
result = cursor.execute(query, (username,))
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, _id):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id=?"
logger.info("Querying users table in data.db on id field")
result = cursor.execute(query, (_id,))
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def get_password_by_username(cls, username, password):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT " + password + " FROM users WHERE username=?"
logger.info("Querying users table in data.db on username field to get " + password)
result = cursor.execute(query, (username,))
row = result.fetchone()[0]
connection.close()
return row
@classmethod
def user_exists(cls, username):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM users WHERE username=?"
logger.info("Querying users table in data.db to check if username exists")
result = cursor.execute(query, (username,))
row = result.fetchone()[0]
connection.close()
return row
@classmethod
def update_password(cls, username, newPassword):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "UPDATE users SET password=? WHERE username=?"
logger.info("Updating user table with new hashed password")
cursor.execute(query, (newPassword, username))
connection.commit()
connection.close()
|
models/user.py
|
import sqlite3
from models.sl_logger import SlLogger
logger = SlLogger.get_logger(__name__)
class UserModel:
def __init__(self, _id, username, password, pswd_hint):
self.id = _id
self.username = username
self.password = password
self.pswd_hint = pswd_hint
@classmethod
def create_connection(cls, db_file):
connection = None
try:
connection = sqlite3.connect(db_file)
logger.info('Connection to ' + db_file + ' is successful')
except Exception as e:
logger.error(e)
return connection
@classmethod
def find_by_username(cls, username):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username=?"
logger.info("Querying users table in data.db on username field")
result = cursor.execute(query, (username,))
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, _id):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id=?"
logger.info("Querying users table in data.db on id field")
result = cursor.execute(query, (_id,))
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def get_password_by_username(cls, username, password):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT " + password + " FROM users WHERE username=?"
logger.info("Querying users table in data.db on username field to get " + password)
result = cursor.execute(query, (username,))
row = result.fetchone()[0]
connection.close()
return row
@classmethod
def user_exists(cls, username):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM users WHERE username=?"
logger.info("Querying users table in data.db to check if username exists")
result = cursor.execute(query, (username,))
row = result.fetchone()[0]
connection.close()
return row
@classmethod
def update_password(cls, username, newPassword):
connection = UserModel.create_connection('data.db')
if connection:
cursor = connection.cursor()
query = "UPDATE users SET password=? WHERE username=?"
logger.info("Updating user table with new hashed password")
cursor.execute(query, (newPassword, username))
connection.commit()
connection.close()
| 0.376623 | 0.063628 |
import psycopg2
def read_db(product_id, track_order):
try:
connection = psycopg2.connect(database = 'tracklist', user = 'allex', password = '',
host = '172.16.17.32',
port = '5432')
except psycopg2.Error as err:
print('An error occurred while trying to connect to the database')
else:
print('Connection to the database was successful!')
cursor = connection.cursor()
cursor.execute(f"""
select link_sample, cd, track_order, song_title, artists, duration
from
test.track_list where product_id='{product_id}' and track_order='{track_order}';
""")
record = cursor.fetchone()
if record is not None:
print('record found: ', record)
return record
else:
print('record not found')
return None
def update_local_server(product_code, product_id, table_string):
try:
connection2 = psycopg2.connect(database = 'tracklist', user = 'allex', password = '',
host = '172.16.17.32',
port = '5432')
except psycopg2.Error as err:
print('An error occurred while trying to connect to the database')
else:
print('Connection to the database was successful!')
def sanitize_single_line_string(initial_string):
string = str(initial_string)
if string is not None:
return string.replace("'", "''") if string.find(
"'") != -1 else string
else:
return string
cursor = connection2.cursor()
cursor.execute(f"""
insert into test.import_table(product_code, product_id, description)
values (
'{product_code}',
'{product_id}',
'{sanitize_single_line_string(table_string)}'
);
""")
connection2.commit()
connection2.close()
|
read_db.py
|
import psycopg2
def read_db(product_id, track_order):
try:
connection = psycopg2.connect(database = 'tracklist', user = 'allex', password = '',
host = '172.16.17.32',
port = '5432')
except psycopg2.Error as err:
print('An error occurred while trying to connect to the database')
else:
print('Connection to the database was successful!')
cursor = connection.cursor()
cursor.execute(f"""
select link_sample, cd, track_order, song_title, artists, duration
from
test.track_list where product_id='{product_id}' and track_order='{track_order}';
""")
record = cursor.fetchone()
if record is not None:
print('record found: ', record)
return record
else:
print('record not found')
return None
def update_local_server(product_code, product_id, table_string):
try:
connection2 = psycopg2.connect(database = 'tracklist', user = 'allex', password = '',
host = '172.16.17.32',
port = '5432')
except psycopg2.Error as err:
print('An error occurred while trying to connect to the database')
else:
print('Connection to the database was successful!')
def sanitize_single_line_string(initial_string):
string = str(initial_string)
if string is not None:
return string.replace("'", "''") if string.find(
"'") != -1 else string
else:
return string
cursor = connection2.cursor()
cursor.execute(f"""
insert into test.import_table(product_code, product_id, description)
values (
'{product_code}',
'{product_id}',
'{sanitize_single_line_string(table_string)}'
);
""")
connection2.commit()
connection2.close()
| 0.244183 | 0.094093 |
from __future__ import print_function, division
from warnings import warn
import pandas as pd
import numpy as np
import pickle
import copy
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
class CombinatorialOptimisation(Disaggregator):
"""1 dimensional combinatorial optimisation NILM algorithm.
Attributes
----------
model : list of dicts
Each dict has these keys:
states : list of ints (the power (Watts) used in different states)
training_metadata : ElecMeter or MeterGroup object used for training
this set of states. We need this information because we
need the appliance type (and perhaps some other metadata)
for each model.
state_combinations : 2D array
Each column is an appliance.
Each row is a possible combination of power demand values e.g.
[[0, 0, 0, 0],
[0, 0, 0, 100],
[0, 0, 50, 0],
[0, 0, 50, 100], ...]
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = []
self.state_combinations = None
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'CO'
def train(self, metergroup, num_states_dict=None, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
num_states_dict : dict
**load_kwargs : keyword arguments passed to `meter.power_series()`
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if num_states_dict is None:
num_states_dict = {}
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
power_series = meter.power_series(**load_kwargs)
chunk = next(power_series)
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
self.train_on_chunk(chunk, meter, max_num_clusters, num_on_states)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
try:
next(power_series)
except StopIteration:
pass
else:
warn("The current implementation of CombinatorialOptimisation"
" can only handle a single chunk. But there are multiple"
" chunks available. So have only trained on the"
" first chunk!")
print("Done training!")
def train_on_chunk(self, chunk, meter, max_num_clusters, num_on_states):
# Check if we've already trained on this meter
meters_in_model = [d['training_metadata'] for d in self.model]
if meter in meters_in_model:
raise RuntimeError(
"Meter {} is already in model!"
" Can't train twice on the same meter!"
.format(meter))
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
def _set_state_combinations_if_necessary(self):
"""Get centroids"""
# If we import sklearn at the top of the file then auto doc fails.
if (self.state_combinations is None or
self.state_combinations.shape[1] != len(self.model)):
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
def disaggregate(self, mains, output_datastore,**load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds. Set to 60 by default.
sections : TimeFrameGroup, optional
Set to mains.good_sections() by default.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
appliance_powers = self.disaggregate_chunk(chunk)
for i, model in enumerate(self.model):
appliance_power = appliance_powers.iloc[:, i]
if len(appliance_power) == 0:
continue
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = model['training_metadata'].instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols)
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Copy mains data to disag output
mains_df = pd.DataFrame(chunk, columns=cols)
output_datastore.append(key=mains_data_location, value=mains_df)
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=[d['training_metadata'] for d in self.model]
)
def disaggregate_chunk(self, mains):
"""In-memory disaggregation.
Parameters
----------
mains : pd.Series
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")
if len(mains) < self.MIN_CHUNK_LENGTH:
raise RuntimeError("Chunk is too short.")
# Because CombinatorialOptimisation could have been trained using
# either train() or train_on_chunk(), we must
# set state_combinations here.
self._set_state_combinations_if_necessary()
"""
# Add vampire power to the model
if vampire_power is None:
vampire_power = get_vampire_power(mains)
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
"""
state_combinations = self.state_combinations
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
# summed_power_of_each_combination is now an array where each
# value is the total power demand for each combination of states.
# Start disaggregation
indices_of_state_combinations, residual_power = find_nearest(
summed_power_of_each_combination, mains.values)
appliance_powers_dict = {}
for i, model in enumerate(self.model):
print("Estimating power demand for '{}'"
.format(model['training_metadata']))
predicted_power = state_combinations[
indices_of_state_combinations, i].flatten()
column = pd.Series(predicted_power, index=mains.index, name=i)
appliance_powers_dict[self.model[i]['training_metadata']] = column
appliance_powers = pd.DataFrame(appliance_powers_dict, dtype='float32')
return appliance_powers
def import_model(self, filename):
with open(filename, 'rb') as in_file:
imported_model = pickle.load(in_file)
self.model = imported_model.model
# Recreate datastores from filenames
for pair in self.model:
store_filename = pair['training_metadata'].store
pair['training_metadata'].store = HDFDataStore(store_filename)
self.state_combinations = imported_model.state_combinations
self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH
def export_model(self, filename):
# Can't pickle datastore, so convert to filenames
original_stores = []
for pair in self.model:
original_store = pair['training_metadata'].store
original_stores.append(original_store)
pair['training_metadata'].store = original_store.store.filename
try:
with open(filename, 'wb') as out_file:
pickle.dump(self, out_file)
finally:
# Restore the stores even if the pickling fails
for original_store, pair in zip(original_stores, self.model):
pair['training_metadata'].store = original_store
|
nilmtk/disaggregate/combinatorial_optimisation.py
|
from __future__ import print_function, division
from warnings import warn
import pandas as pd
import numpy as np
import pickle
import copy
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
class CombinatorialOptimisation(Disaggregator):
"""1 dimensional combinatorial optimisation NILM algorithm.
Attributes
----------
model : list of dicts
Each dict has these keys:
states : list of ints (the power (Watts) used in different states)
training_metadata : ElecMeter or MeterGroup object used for training
this set of states. We need this information because we
need the appliance type (and perhaps some other metadata)
for each model.
state_combinations : 2D array
Each column is an appliance.
Each row is a possible combination of power demand values e.g.
[[0, 0, 0, 0],
[0, 0, 0, 100],
[0, 0, 50, 0],
[0, 0, 50, 100], ...]
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = []
self.state_combinations = None
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'CO'
def train(self, metergroup, num_states_dict=None, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
num_states_dict : dict
**load_kwargs : keyword arguments passed to `meter.power_series()`
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if num_states_dict is None:
num_states_dict = {}
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
power_series = meter.power_series(**load_kwargs)
chunk = next(power_series)
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
self.train_on_chunk(chunk, meter, max_num_clusters, num_on_states)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
try:
next(power_series)
except StopIteration:
pass
else:
warn("The current implementation of CombinatorialOptimisation"
" can only handle a single chunk. But there are multiple"
" chunks available. So have only trained on the"
" first chunk!")
print("Done training!")
def train_on_chunk(self, chunk, meter, max_num_clusters, num_on_states):
# Check if we've already trained on this meter
meters_in_model = [d['training_metadata'] for d in self.model]
if meter in meters_in_model:
raise RuntimeError(
"Meter {} is already in model!"
" Can't train twice on the same meter!"
.format(meter))
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
def _set_state_combinations_if_necessary(self):
"""Get centroids"""
# If we import sklearn at the top of the file then auto doc fails.
if (self.state_combinations is None or
self.state_combinations.shape[1] != len(self.model)):
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
def disaggregate(self, mains, output_datastore,**load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds. Set to 60 by default.
sections : TimeFrameGroup, optional
Set to mains.good_sections() by default.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
appliance_powers = self.disaggregate_chunk(chunk)
for i, model in enumerate(self.model):
appliance_power = appliance_powers.iloc[:, i]
if len(appliance_power) == 0:
continue
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = model['training_metadata'].instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols)
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Copy mains data to disag output
mains_df = pd.DataFrame(chunk, columns=cols)
output_datastore.append(key=mains_data_location, value=mains_df)
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=[d['training_metadata'] for d in self.model]
)
def disaggregate_chunk(self, mains):
"""In-memory disaggregation.
Parameters
----------
mains : pd.Series
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")
if len(mains) < self.MIN_CHUNK_LENGTH:
raise RuntimeError("Chunk is too short.")
# Because CombinatorialOptimisation could have been trained using
# either train() or train_on_chunk(), we must
# set state_combinations here.
self._set_state_combinations_if_necessary()
"""
# Add vampire power to the model
if vampire_power is None:
vampire_power = get_vampire_power(mains)
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
"""
state_combinations = self.state_combinations
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
# summed_power_of_each_combination is now an array where each
# value is the total power demand for each combination of states.
# Start disaggregation
indices_of_state_combinations, residual_power = find_nearest(
summed_power_of_each_combination, mains.values)
appliance_powers_dict = {}
for i, model in enumerate(self.model):
print("Estimating power demand for '{}'"
.format(model['training_metadata']))
predicted_power = state_combinations[
indices_of_state_combinations, i].flatten()
column = pd.Series(predicted_power, index=mains.index, name=i)
appliance_powers_dict[self.model[i]['training_metadata']] = column
appliance_powers = pd.DataFrame(appliance_powers_dict, dtype='float32')
return appliance_powers
def import_model(self, filename):
with open(filename, 'rb') as in_file:
imported_model = pickle.load(in_file)
self.model = imported_model.model
# Recreate datastores from filenames
for pair in self.model:
store_filename = pair['training_metadata'].store
pair['training_metadata'].store = HDFDataStore(store_filename)
self.state_combinations = imported_model.state_combinations
self.MIN_CHUNK_LENGTH = imported_model.MIN_CHUNK_LENGTH
def export_model(self, filename):
# Can't pickle datastore, so convert to filenames
original_stores = []
for pair in self.model:
original_store = pair['training_metadata'].store
original_stores.append(original_store)
pair['training_metadata'].store = original_store.store.filename
try:
with open(filename, 'wb') as out_file:
pickle.dump(self, out_file)
finally:
# Restore the stores even if the pickling fails
for original_store, pair in zip(original_stores, self.model):
pair['training_metadata'].store = original_store
| 0.739705 | 0.482307 |
import contextlib
import tensorflow as tf
import tensorflow_gan as tfgan
sn_gettr = tfgan.features.spectral_normalization_custom_getter
def snconv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, training=True,
name='snconv2d'):
"""Creates a 2d conv-layer with Spectral Norm applied to the weights.
Args:
input_: 4D input tensor (batch size, height, width, channel).
output_dim: Number of features in the output layer.
k_h: The height of the convolutional kernel.
k_w: The width of the convolutional kernel.
d_h: The height stride of the convolutional kernel.
d_w: The width stride of the convolutional kernel.
training: If `True`, add the spectral norm assign ops.
name: The name of the variable scope.
Returns:
conv: The normalized tensor.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
return tf.layers.conv2d(
input_,
filters=output_dim,
kernel_size=(k_h, k_w),
strides=(d_h, d_w),
padding='same',
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.compat.v1.initializers.zeros(),
name=name)
def snlinear(x, output_size, bias_start=0.0, training=True, name='snlinear'):
"""Creates a linear layer with Spectral Normalization applied.
Args:
x: 2D input tensor (batch size, features).
output_size: Integer number of features in output of layer.
bias_start: Float to which bias parameters are initialized.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into.
Returns:
The normalized output tensor of the linear layer.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
return tf.layers.dense(
x,
output_size,
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.compat.v1.initializers.constant(bias_start))
def sn_embedding(x, number_classes, embedding_size, training=True,
name='snembedding'):
"""Creates an embedding lookup with Spectral Normalization applied.
Args:
x: 1D input tensor (batch size, ).
number_classes: The number of classes.
embedding_size: The length of the embeddding vector for each class.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into
Returns:
The output tensor (batch size, embedding_size).
"""
with tf.compat.v1.variable_scope(name):
embedding_map = tf.compat.v1.get_variable(
name='embedding_map',
shape=[number_classes, embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
embedding_map_bar_transpose = tfgan.features.spectral_normalize(
tf.transpose(a=embedding_map), training=training)
embedding_map_bar = tf.transpose(a=embedding_map_bar_transpose)
return tf.nn.embedding_lookup(params=embedding_map_bar, ids=x)
class ConditionalBatchNorm(object):
"""Conditional Batch Normalization.
The same as normal Batch Normalization, but there is a different (gamma, beta)
pair for each possible category.
For each class, it has a specific gamma and beta as normalization variable.
"""
# TODO(augustusodena) Merge conditional batch norm with batch norm.
# TODO(augustusodena) Use more sophisticated FilM layer here.
# TODO(augustusodena) Why does this need to be a class?
def __init__(self, num_categories, name='conditional_batch_norm'):
"""Inits the object.
This is just a setter.
Args:
num_categories: Integer number of classes (and gamma, beta pairs).
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.compat.v1.variable_scope(name):
self.name = name
self.num_categories = num_categories
def __call__(self, inputs, labels):
"""Adds Conditional Batch norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
labels: Tensor of labels - same first dimension as inputs.
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
axis = [0, 1, 2]
shape = tf.TensorShape([self.num_categories]).concatenate(params_shape)
with tf.compat.v1.variable_scope(self.name):
self.gamma = tf.compat.v1.get_variable(
'gamma', shape, initializer=tf.compat.v1.initializers.ones())
self.beta = tf.compat.v1.get_variable(
'beta', shape, initializer=tf.compat.v1.initializers.zeros())
beta = tf.gather(self.beta, labels)
beta = tf.expand_dims(tf.expand_dims(beta, 1), 1)
gamma = tf.gather(self.gamma, labels)
gamma = tf.expand_dims(tf.expand_dims(gamma, 1), 1)
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
class BatchNorm(object):
"""Batch Normalization.
This is just vanilla batch normalization.
"""
def __init__(self, name='batch_norm'):
"""Inits the object.
This is just a setter.
Args:
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.compat.v1.variable_scope(name):
self.name = name
def __call__(self, inputs):
"""Adds Batch Norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape().as_list()
params_shape = inputs_shape[-1]
axis = [0, 1, 2]
shape = tf.TensorShape([params_shape])
with tf.compat.v1.variable_scope(self.name):
self.gamma = tf.compat.v1.get_variable(
'gamma', shape, initializer=tf.compat.v1.initializers.ones())
self.beta = tf.compat.v1.get_variable(
'beta', shape, initializer=tf.compat.v1.initializers.zeros())
beta = self.beta
gamma = self.gamma
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):
"""Builds graph for a spectrally normalized 1 by 1 convolution.
This is used in the context of non-local networks to reduce channel count for
strictly computational reasons.
Args:
x: A 4-D tensorflow tensor.
output_dim: An integer representing desired channel count in the output.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with the same batch, height, and width as the input.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
w = tf.compat.v1.get_variable(
'weights', [1, 1, x.get_shape()[-1], output_dim],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(
input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')
return conv
def sn_non_local_block_sim(x, training=True, name='sn_nonlocal'):
"""Builds graph for the self-attention block.
This is one third of the tricks from the SAGAN paper.
Args:
x: A 4-D tensorflow tensor.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with self-attention having been applied.
"""
with tf.compat.v1.variable_scope(name):
_, h, w, num_channels = x.shape.as_list()
location_num = h * w
downsampled_num = location_num // 4
# theta path
theta = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_theta')
theta = tf.reshape(
theta, [-1, location_num, num_channels // 8])
# phi path
phi = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_phi')
phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
phi = tf.reshape(
phi, [-1, downsampled_num, num_channels // 8])
attn = tf.matmul(theta, phi, transpose_b=True)
attn = tf.nn.softmax(attn)
# g path
g = sn_conv1x1(x, num_channels // 2, training, 'sn_conv_g')
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)
g = tf.reshape(
g, [-1, downsampled_num, num_channels // 2])
attn_g = tf.matmul(attn, g)
attn_g = tf.reshape(attn_g, [-1, h, w, num_channels // 2])
sigma = tf.compat.v1.get_variable(
'sigma_ratio', [], initializer=tf.compat.v1.initializers.constant(0.0))
attn_g = sn_conv1x1(attn_g, num_channels, training, 'sn_conv_attn')
return x + sigma * attn_g
@contextlib.contextmanager
def variables_on_gpu0():
"""Put variables on GPU."""
old_fn = tf.compat.v1.get_variable
def new_fn(*args, **kwargs):
with tf.device('/gpu:0'):
return old_fn(*args, **kwargs)
tf.compat.v1.get_variable = new_fn
yield
tf.compat.v1.get_variable = old_fn
def avg_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
galaxy2galaxy/layers/spectral_ops.py
|
import contextlib
import tensorflow as tf
import tensorflow_gan as tfgan
sn_gettr = tfgan.features.spectral_normalization_custom_getter
def snconv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, training=True,
name='snconv2d'):
"""Creates a 2d conv-layer with Spectral Norm applied to the weights.
Args:
input_: 4D input tensor (batch size, height, width, channel).
output_dim: Number of features in the output layer.
k_h: The height of the convolutional kernel.
k_w: The width of the convolutional kernel.
d_h: The height stride of the convolutional kernel.
d_w: The width stride of the convolutional kernel.
training: If `True`, add the spectral norm assign ops.
name: The name of the variable scope.
Returns:
conv: The normalized tensor.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
return tf.layers.conv2d(
input_,
filters=output_dim,
kernel_size=(k_h, k_w),
strides=(d_h, d_w),
padding='same',
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.compat.v1.initializers.zeros(),
name=name)
def snlinear(x, output_size, bias_start=0.0, training=True, name='snlinear'):
"""Creates a linear layer with Spectral Normalization applied.
Args:
x: 2D input tensor (batch size, features).
output_size: Integer number of features in output of layer.
bias_start: Float to which bias parameters are initialized.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into.
Returns:
The normalized output tensor of the linear layer.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
return tf.layers.dense(
x,
output_size,
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.compat.v1.initializers.constant(bias_start))
def sn_embedding(x, number_classes, embedding_size, training=True,
name='snembedding'):
"""Creates an embedding lookup with Spectral Normalization applied.
Args:
x: 1D input tensor (batch size, ).
number_classes: The number of classes.
embedding_size: The length of the embeddding vector for each class.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into
Returns:
The output tensor (batch size, embedding_size).
"""
with tf.compat.v1.variable_scope(name):
embedding_map = tf.compat.v1.get_variable(
name='embedding_map',
shape=[number_classes, embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
embedding_map_bar_transpose = tfgan.features.spectral_normalize(
tf.transpose(a=embedding_map), training=training)
embedding_map_bar = tf.transpose(a=embedding_map_bar_transpose)
return tf.nn.embedding_lookup(params=embedding_map_bar, ids=x)
class ConditionalBatchNorm(object):
"""Conditional Batch Normalization.
The same as normal Batch Normalization, but there is a different (gamma, beta)
pair for each possible category.
For each class, it has a specific gamma and beta as normalization variable.
"""
# TODO(augustusodena) Merge conditional batch norm with batch norm.
# TODO(augustusodena) Use more sophisticated FilM layer here.
# TODO(augustusodena) Why does this need to be a class?
def __init__(self, num_categories, name='conditional_batch_norm'):
"""Inits the object.
This is just a setter.
Args:
num_categories: Integer number of classes (and gamma, beta pairs).
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.compat.v1.variable_scope(name):
self.name = name
self.num_categories = num_categories
def __call__(self, inputs, labels):
"""Adds Conditional Batch norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
labels: Tensor of labels - same first dimension as inputs.
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
axis = [0, 1, 2]
shape = tf.TensorShape([self.num_categories]).concatenate(params_shape)
with tf.compat.v1.variable_scope(self.name):
self.gamma = tf.compat.v1.get_variable(
'gamma', shape, initializer=tf.compat.v1.initializers.ones())
self.beta = tf.compat.v1.get_variable(
'beta', shape, initializer=tf.compat.v1.initializers.zeros())
beta = tf.gather(self.beta, labels)
beta = tf.expand_dims(tf.expand_dims(beta, 1), 1)
gamma = tf.gather(self.gamma, labels)
gamma = tf.expand_dims(tf.expand_dims(gamma, 1), 1)
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
class BatchNorm(object):
"""Batch Normalization.
This is just vanilla batch normalization.
"""
def __init__(self, name='batch_norm'):
"""Inits the object.
This is just a setter.
Args:
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.compat.v1.variable_scope(name):
self.name = name
def __call__(self, inputs):
"""Adds Batch Norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape().as_list()
params_shape = inputs_shape[-1]
axis = [0, 1, 2]
shape = tf.TensorShape([params_shape])
with tf.compat.v1.variable_scope(self.name):
self.gamma = tf.compat.v1.get_variable(
'gamma', shape, initializer=tf.compat.v1.initializers.ones())
self.beta = tf.compat.v1.get_variable(
'beta', shape, initializer=tf.compat.v1.initializers.zeros())
beta = self.beta
gamma = self.gamma
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):
"""Builds graph for a spectrally normalized 1 by 1 convolution.
This is used in the context of non-local networks to reduce channel count for
strictly computational reasons.
Args:
x: A 4-D tensorflow tensor.
output_dim: An integer representing desired channel count in the output.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with the same batch, height, and width as the input.
"""
with tf.compat.v1.variable_scope(
name, custom_getter=sn_gettr(training=training)):
w = tf.compat.v1.get_variable(
'weights', [1, 1, x.get_shape()[-1], output_dim],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(
input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')
return conv
def sn_non_local_block_sim(x, training=True, name='sn_nonlocal'):
"""Builds graph for the self-attention block.
This is one third of the tricks from the SAGAN paper.
Args:
x: A 4-D tensorflow tensor.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with self-attention having been applied.
"""
with tf.compat.v1.variable_scope(name):
_, h, w, num_channels = x.shape.as_list()
location_num = h * w
downsampled_num = location_num // 4
# theta path
theta = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_theta')
theta = tf.reshape(
theta, [-1, location_num, num_channels // 8])
# phi path
phi = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_phi')
phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
phi = tf.reshape(
phi, [-1, downsampled_num, num_channels // 8])
attn = tf.matmul(theta, phi, transpose_b=True)
attn = tf.nn.softmax(attn)
# g path
g = sn_conv1x1(x, num_channels // 2, training, 'sn_conv_g')
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)
g = tf.reshape(
g, [-1, downsampled_num, num_channels // 2])
attn_g = tf.matmul(attn, g)
attn_g = tf.reshape(attn_g, [-1, h, w, num_channels // 2])
sigma = tf.compat.v1.get_variable(
'sigma_ratio', [], initializer=tf.compat.v1.initializers.constant(0.0))
attn_g = sn_conv1x1(attn_g, num_channels, training, 'sn_conv_attn')
return x + sigma * attn_g
@contextlib.contextmanager
def variables_on_gpu0():
"""Put variables on GPU."""
old_fn = tf.compat.v1.get_variable
def new_fn(*args, **kwargs):
with tf.device('/gpu:0'):
return old_fn(*args, **kwargs)
tf.compat.v1.get_variable = new_fn
yield
tf.compat.v1.get_variable = old_fn
def avg_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| 0.917342 | 0.671982 |
from __future__ import annotations
import json
import os
import re
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Collection, Optional
from dotenv import load_dotenv
from .logger import set_log
from .util import datetime_parser, open_query, postgres_date
log = set_log(__name__)
PostData = dict[str, Collection[str]]
# key_map = {"outer1": {"inner11", "inner12}, "outer2": {"inner21"}}
KeyMap = dict[str, set[str]]
ListInnerResponse = dict[str, list[dict[str, dict[str, str]]]]
DictInnerResponse = dict[str, dict[str, Any]]
DuneRecord = dict[str, str]
# pylint: disable=too-few-public-methods
# TODO - use namedtuple for MetaData and QueryResults
class MetaData:
"""The standard information returned from the Dune API as `query_results`"""
id: str
job_id: str
error: Optional[str]
runtime: int
generated_at: datetime
columns: list[str]
def __init__(self, obj: str):
"""
Constructor method
:param obj: input should have the following form
Example input:
{
'id': '3158cc2c-5ed1-4779-b523-eeb9c3b34b21',
'job_id': '093e440d-66ce-4c00-81ec-2406f0403bc0',
'error': None,
'runtime': 0,
'generated_at': '2022-03-19T07:11:37.344998+00:00',
'columns': ['number', 'size', 'time', 'block_hash', 'tx_fees'],
'__typename': 'query_results'
}
"""
self.__dict__ = json.loads(obj, object_hook=datetime_parser)
class QueryResults:
"""Class containing the Data results of a Dune Select Query"""
meta: Optional[MetaData]
data: list[DuneRecord]
def __init__(self, data: ListInnerResponse):
assert data.keys() == {
"query_results",
"get_result_by_job_id",
"query_errors",
}, f"invalid keys {data.keys()}"
assert len(data["query_results"]) == 1, f"Unexpected query_results {data}"
# Could wrap meta conversion into a try-catch, since we don't really need it.
# But, I can't think of a broad enough exception that won't trip up the liner.
self.meta = MetaData(json.dumps(data["query_results"][0]))
self.data = [rec["data"] for rec in data["get_result_by_job_id"]]
class Network(Enum):
"""Enum for supported EVM networks"""
SOLANA = 1
MAINNET = 4
GCHAIN = 6
POLYGON = 7
OPTIMISM_V1 = 8
BINANCE = 9
OPTIMISM_V2 = 10
def __str__(self) -> str:
result = super.__str__(self)
if self == Network.SOLANA:
result = "Solana"
elif self == Network.MAINNET:
result = "Ethereum Mainnet"
elif self == Network.GCHAIN:
result = "Gnosis Chain"
elif self == Network.POLYGON:
result = "Polygon"
elif self == Network.OPTIMISM_V1:
result = "Optimism (OVM 1.0)"
elif self == Network.OPTIMISM_V2:
result = "Optimism (OVM 2.0)"
elif self == Network.BINANCE:
result = "Binance Smart Chain"
return result
@classmethod
def from_string(cls, network_str: str) -> Network:
"""
Attempts to parse network name from string.
returns None is no match
"""
patterns = {
r"(.*)mainnet": cls.MAINNET,
r"g(.*)chain": cls.GCHAIN,
r"solana": cls.SOLANA,
r"poly": cls.POLYGON,
r"optimism(.*)1": cls.OPTIMISM_V1,
r"optimism(.*)2": cls.OPTIMISM_V2,
r"bsc": cls.BINANCE,
r"binance": cls.BINANCE,
}
for pattern, network in patterns.items():
if re.match(pattern, network_str, re.IGNORECASE):
return network
raise ValueError(f"could not parse Network from '{network_str}'")
class ParameterType(Enum):
"""
Enum of the 4 distinct dune parameter types
"""
TEXT = "text"
NUMBER = "number"
DATE = "datetime"
@classmethod
def from_string(cls, type_str: str) -> ParameterType:
"""
Attempts to parse Parameter from string.
returns None is no match
"""
patterns = {
r"text": cls.TEXT,
r"number": cls.NUMBER,
r"date": cls.DATE,
}
for pattern, network in patterns.items():
if re.match(pattern, type_str, re.IGNORECASE):
return network
raise ValueError(f"could not parse Network from '{type_str}'")
class QueryParameter:
"""Class whose instances are Dune Compatible Query Parameters"""
def __init__(
self,
name: str,
parameter_type: ParameterType,
value: Any,
):
self.key: str = name
self.type: ParameterType = parameter_type
self.value = value
def __eq__(self, other: object) -> bool:
if not isinstance(other, QueryParameter):
return NotImplemented
return all(
[
self.key == other.key,
self.value == other.value,
self.type.value == other.type.value,
]
)
@classmethod
def text_type(cls, name: str, value: str) -> QueryParameter:
"""Constructs a Query parameter of type text"""
return cls(name, ParameterType.TEXT, value)
@classmethod
def number_type(cls, name: str, value: int | float) -> QueryParameter:
"""Constructs a Query parameter of type number"""
return cls(name, ParameterType.NUMBER, value)
@classmethod
def date_type(cls, name: str, value: datetime | str) -> QueryParameter:
"""
Constructs a Query parameter of type date.
For convenience, we allow proper datetime type, or string
"""
if isinstance(value, str):
value = postgres_date(value)
return cls(name, ParameterType.DATE, value)
def _value_str(self) -> str:
if self.type in (ParameterType.TEXT, ParameterType.NUMBER):
return str(self.value)
if self.type == ParameterType.DATE:
# This is the postgres string format of timestamptz
return str(self.value.strftime("%Y-%m-%d %H:%M:%S"))
raise TypeError(f"Type {self.type} not recognized!")
def to_dict(self) -> dict[str, str]:
"""Converts QueryParameter into string json format accepted by Dune API"""
results = {
"key": self.key,
"type": self.type.value,
"value": self._value_str(),
}
return results
@classmethod
def from_dict(cls, obj: dict[str, Any]) -> QueryParameter:
"""
Constructs Query Parameters from json.
TODO - this could probably be done similar to the __init__ method of MetaData
"""
name, value = obj["key"], obj["value"]
p_type = ParameterType.from_string(obj["type"])
if p_type == ParameterType.DATE:
return cls.date_type(name, value)
if p_type == ParameterType.TEXT:
assert isinstance(value, str)
return cls.text_type(name, value)
if p_type == ParameterType.NUMBER:
if isinstance(value, str):
value = float(value) if "." in value else int(value)
return cls.number_type(name, value)
raise ValueError(f"Could not parse Query parameter from {obj}")
def __str__(self) -> str:
return (
f"QueryParameter("
f"name: {self.key}, "
f"value: {self.value}, "
f"type: {self.type.value})"
)
@dataclass
class Post:
"""Holds query json and response validation details"""
data: PostData
key_map: KeyMap
@dataclass
class DashboardTile:
"""
A slightly different arrangement of data that is essentially equivalent to a Query
Acts as an intermediary type when composing queries from json
"""
name: str
description: str
select_file: str
query_id: int
network: Network
parameters: list[QueryParameter]
base_file: Optional[str]
@classmethod
def from_dict(cls, obj: dict[str, Any], path: str) -> DashboardTile:
"""Constructs Record from Dune Data as string dict"""
return cls(
name=obj.get("name", "untitled"),
description=obj.get("description", ""),
select_file="/".join([path, obj["query_file"]]),
network=Network.from_string(obj["network"]),
query_id=int(obj["id"]),
parameters=[QueryParameter.from_dict(p) for p in obj.get("parameters", [])],
base_file=obj.get("requires"),
)
def build_query(self) -> str:
"""Constructs a query from base file and select file attributes"""
if self.base_file is not None:
components = map(open_query, [self.base_file, self.select_file])
return "\n".join(list(components))
return open_query(self.select_file)
@dataclass
class DuneQuery:
"""Contains all the relevant data necessary to initiate a Dune Query"""
name: str
description: str
raw_sql: str
network: Network
parameters: list[QueryParameter]
query_id: int
def __hash__(self) -> int:
return hash(self.query_id)
def __eq__(self, other: object) -> bool:
if not isinstance(other, DuneQuery):
return NotImplemented
equality_conditions = [
self.name == other.name,
self.description == other.description,
self.raw_sql == other.raw_sql,
self.network.value == other.network.value,
self.query_id == other.query_id,
self.parameters == other.parameters,
]
log.debug(f"Equality Conditions: {equality_conditions}")
return all(equality_conditions)
@classmethod
def from_environment(
cls,
raw_sql: str,
network: Network,
description: str = "",
parameters: Optional[list[QueryParameter]] = None,
name: Optional[str] = None,
) -> DuneQuery:
"""Constructs a query using the Universal Query ID provided in env file."""
load_dotenv()
return cls(
raw_sql=raw_sql,
description=description,
network=network,
parameters=parameters if parameters is not None else [],
name=name if name else "untitled",
query_id=int(os.environ["DUNE_QUERY_ID"]),
)
@classmethod
def from_tile(cls, tile: DashboardTile) -> DuneQuery:
"""Constructs Dune Query from DashboardTile object"""
return cls(
name=tile.name,
description=tile.description,
raw_sql=tile.build_query(),
network=tile.network,
parameters=tile.parameters,
query_id=tile.query_id,
)
def _request_parameters(self) -> list[dict[str, str]]:
return [p.to_dict() for p in self.parameters]
def upsert_query_post(self) -> Post:
"""Returns json data for a post of type UpsertQuery"""
object_data: dict[str, Any] = {
"id": self.query_id,
"schedule": None,
"dataset_id": self.network.value,
"name": self.name,
"query": self.raw_sql,
"user_id": 84,
"description": self.description,
"is_archived": False,
"is_temp": False,
"tags": [],
"parameters": self._request_parameters(),
"visualizations": {
"data": [],
"on_conflict": {
"constraint": "visualizations_pkey",
"update_columns": ["name", "options"],
},
},
}
key_map = {
"insert_queries_one": {
"id",
"dataset_id",
"name",
"description",
"query",
"is_private",
"is_temp",
"is_archived",
"created_at",
"updated_at",
"schedule",
"tags",
"parameters",
"visualizations",
"forked_query",
"user",
"query_favorite_count_all",
"favorite_queries",
}
}
return Post(
data={
"operationName": "UpsertQuery",
"variables": {
"object": object_data,
"on_conflict": {
"constraint": "queries_pkey",
"update_columns": [
"dataset_id",
"name",
"description",
"query",
"schedule",
"is_archived",
"is_temp",
"tags",
"parameters",
],
},
"session_id": 0, # must be an int, but value is irrelevant
},
"query": """
mutation UpsertQuery(
$session_id: Int!
$object: queries_insert_input!
$on_conflict: queries_on_conflict!
$favs_last_24h: Boolean! = false
$favs_last_7d: Boolean! = false
$favs_last_30d: Boolean! = false
$favs_all_time: Boolean! = true
) {
insert_queries_one(object: $object, on_conflict: $on_conflict) {
...Query
favorite_queries(where: { user_id: { _eq: $session_id } }, limit: 1) {
created_at
}
}
}
fragment Query on queries {
...BaseQuery
...QueryVisualizations
...QueryForked
...QueryUsers
...QueryFavorites
}
fragment BaseQuery on queries {
id
dataset_id
name
description
query
is_private
is_temp
is_archived
created_at
updated_at
schedule
tags
parameters
}
fragment QueryVisualizations on queries {
visualizations {
id
type
name
options
created_at
}
}
fragment QueryForked on queries {
forked_query {
id
name
user {
name
}
}
}
fragment QueryUsers on queries {
user {
...User
}
}
fragment User on users {
id
name
profile_image_url
}
fragment QueryFavorites on queries {
query_favorite_count_all @include(if: $favs_all_time) {
favorite_count
}
query_favorite_count_last_24h @include(if: $favs_last_24h) {
favorite_count
}
query_favorite_count_last_7d @include(if: $favs_last_7d) {
favorite_count
}
query_favorite_count_last_30d @include(if: $favs_last_30d) {
favorite_count
}
}
""",
},
key_map=key_map,
)
@staticmethod
def find_result_by_job(job_id: str) -> Post:
"""Returns json data for a post of type FindResultDataByResult"""
query = """
query FindResultDataByJob($job_id: uuid!) {
query_results(where: {job_id: {_eq: $job_id}, error: {_is_null: true}}) {
id
job_id
runtime
generated_at
columns
}
query_errors(where: {job_id: {_eq: $job_id}}) {
id
job_id
runtime
message
metadata
type
generated_at
}
get_result_by_job_id(args: {want_job_id: $job_id}) {
data
}
}
"""
return Post(
data={
"operationName": "FindResultDataByJob",
"variables": {"job_id": job_id},
"query": query,
},
key_map={
"query_results": {
"id",
"job_id",
"runtime",
"generated_at",
"columns",
},
"query_errors": {
"id",
"job_id",
"runtime",
"message",
"metadata",
"type",
"generated_at",
},
"get_result_by_job_id": {"data"},
},
)
@staticmethod
def get_queue_position(job_id: str) -> Post:
"""Returns json data for a post of type GetQueuePosition
This is meant to determine when query execution has completed.
"""
query = """
query GetQueuePosition($job_id: uuid!) {
view_queue_positions(where: {id: {_eq: $job_id}}) {
pos
}
jobs_by_pk(id: $job_id) {
id
user_id
category
created_at
locked_until
}
}
"""
return Post(
data={
"operationName": "GetQueuePosition",
"variables": {"job_id": job_id},
"query": query,
},
key_map={"data": {"view_queue_positions", "jobs_by_pk"}},
)
def execute_query_post(self) -> Post:
"""Returns json data for a post of type ExecuteQuery"""
query = """
mutation ExecuteQuery($query_id: Int!, $parameters: [Parameter!]!) {
execute_query(query_id: $query_id, parameters: $parameters) {
job_id
}
}
"""
return Post(
data={
"operationName": "ExecuteQuery",
"variables": {
"query_id": self.query_id,
"parameters": [p.to_dict() for p in self.parameters],
},
"query": query,
},
key_map={"execute_query": {"job_id"}},
)
|
src/duneapi/types.py
|
from __future__ import annotations
import json
import os
import re
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Collection, Optional
from dotenv import load_dotenv
from .logger import set_log
from .util import datetime_parser, open_query, postgres_date
log = set_log(__name__)
PostData = dict[str, Collection[str]]
# key_map = {"outer1": {"inner11", "inner12}, "outer2": {"inner21"}}
KeyMap = dict[str, set[str]]
ListInnerResponse = dict[str, list[dict[str, dict[str, str]]]]
DictInnerResponse = dict[str, dict[str, Any]]
DuneRecord = dict[str, str]
# pylint: disable=too-few-public-methods
# TODO - use namedtuple for MetaData and QueryResults
class MetaData:
"""The standard information returned from the Dune API as `query_results`"""
id: str
job_id: str
error: Optional[str]
runtime: int
generated_at: datetime
columns: list[str]
def __init__(self, obj: str):
"""
Constructor method
:param obj: input should have the following form
Example input:
{
'id': '3158cc2c-5ed1-4779-b523-eeb9c3b34b21',
'job_id': '093e440d-66ce-4c00-81ec-2406f0403bc0',
'error': None,
'runtime': 0,
'generated_at': '2022-03-19T07:11:37.344998+00:00',
'columns': ['number', 'size', 'time', 'block_hash', 'tx_fees'],
'__typename': 'query_results'
}
"""
self.__dict__ = json.loads(obj, object_hook=datetime_parser)
class QueryResults:
"""Class containing the Data results of a Dune Select Query"""
meta: Optional[MetaData]
data: list[DuneRecord]
def __init__(self, data: ListInnerResponse):
assert data.keys() == {
"query_results",
"get_result_by_job_id",
"query_errors",
}, f"invalid keys {data.keys()}"
assert len(data["query_results"]) == 1, f"Unexpected query_results {data}"
# Could wrap meta conversion into a try-catch, since we don't really need it.
# But, I can't think of a broad enough exception that won't trip up the liner.
self.meta = MetaData(json.dumps(data["query_results"][0]))
self.data = [rec["data"] for rec in data["get_result_by_job_id"]]
class Network(Enum):
"""Enum for supported EVM networks"""
SOLANA = 1
MAINNET = 4
GCHAIN = 6
POLYGON = 7
OPTIMISM_V1 = 8
BINANCE = 9
OPTIMISM_V2 = 10
def __str__(self) -> str:
result = super.__str__(self)
if self == Network.SOLANA:
result = "Solana"
elif self == Network.MAINNET:
result = "Ethereum Mainnet"
elif self == Network.GCHAIN:
result = "Gnosis Chain"
elif self == Network.POLYGON:
result = "Polygon"
elif self == Network.OPTIMISM_V1:
result = "Optimism (OVM 1.0)"
elif self == Network.OPTIMISM_V2:
result = "Optimism (OVM 2.0)"
elif self == Network.BINANCE:
result = "Binance Smart Chain"
return result
@classmethod
def from_string(cls, network_str: str) -> Network:
"""
Attempts to parse network name from string.
returns None is no match
"""
patterns = {
r"(.*)mainnet": cls.MAINNET,
r"g(.*)chain": cls.GCHAIN,
r"solana": cls.SOLANA,
r"poly": cls.POLYGON,
r"optimism(.*)1": cls.OPTIMISM_V1,
r"optimism(.*)2": cls.OPTIMISM_V2,
r"bsc": cls.BINANCE,
r"binance": cls.BINANCE,
}
for pattern, network in patterns.items():
if re.match(pattern, network_str, re.IGNORECASE):
return network
raise ValueError(f"could not parse Network from '{network_str}'")
class ParameterType(Enum):
"""
Enum of the 4 distinct dune parameter types
"""
TEXT = "text"
NUMBER = "number"
DATE = "datetime"
@classmethod
def from_string(cls, type_str: str) -> ParameterType:
"""
Attempts to parse Parameter from string.
returns None is no match
"""
patterns = {
r"text": cls.TEXT,
r"number": cls.NUMBER,
r"date": cls.DATE,
}
for pattern, network in patterns.items():
if re.match(pattern, type_str, re.IGNORECASE):
return network
raise ValueError(f"could not parse Network from '{type_str}'")
class QueryParameter:
"""Class whose instances are Dune Compatible Query Parameters"""
def __init__(
self,
name: str,
parameter_type: ParameterType,
value: Any,
):
self.key: str = name
self.type: ParameterType = parameter_type
self.value = value
def __eq__(self, other: object) -> bool:
if not isinstance(other, QueryParameter):
return NotImplemented
return all(
[
self.key == other.key,
self.value == other.value,
self.type.value == other.type.value,
]
)
@classmethod
def text_type(cls, name: str, value: str) -> QueryParameter:
"""Constructs a Query parameter of type text"""
return cls(name, ParameterType.TEXT, value)
@classmethod
def number_type(cls, name: str, value: int | float) -> QueryParameter:
"""Constructs a Query parameter of type number"""
return cls(name, ParameterType.NUMBER, value)
@classmethod
def date_type(cls, name: str, value: datetime | str) -> QueryParameter:
"""
Constructs a Query parameter of type date.
For convenience, we allow proper datetime type, or string
"""
if isinstance(value, str):
value = postgres_date(value)
return cls(name, ParameterType.DATE, value)
def _value_str(self) -> str:
if self.type in (ParameterType.TEXT, ParameterType.NUMBER):
return str(self.value)
if self.type == ParameterType.DATE:
# This is the postgres string format of timestamptz
return str(self.value.strftime("%Y-%m-%d %H:%M:%S"))
raise TypeError(f"Type {self.type} not recognized!")
def to_dict(self) -> dict[str, str]:
"""Converts QueryParameter into string json format accepted by Dune API"""
results = {
"key": self.key,
"type": self.type.value,
"value": self._value_str(),
}
return results
@classmethod
def from_dict(cls, obj: dict[str, Any]) -> QueryParameter:
"""
Constructs Query Parameters from json.
TODO - this could probably be done similar to the __init__ method of MetaData
"""
name, value = obj["key"], obj["value"]
p_type = ParameterType.from_string(obj["type"])
if p_type == ParameterType.DATE:
return cls.date_type(name, value)
if p_type == ParameterType.TEXT:
assert isinstance(value, str)
return cls.text_type(name, value)
if p_type == ParameterType.NUMBER:
if isinstance(value, str):
value = float(value) if "." in value else int(value)
return cls.number_type(name, value)
raise ValueError(f"Could not parse Query parameter from {obj}")
def __str__(self) -> str:
return (
f"QueryParameter("
f"name: {self.key}, "
f"value: {self.value}, "
f"type: {self.type.value})"
)
@dataclass
class Post:
"""Holds query json and response validation details"""
data: PostData
key_map: KeyMap
@dataclass
class DashboardTile:
"""
A slightly different arrangement of data that is essentially equivalent to a Query
Acts as an intermediary type when composing queries from json
"""
name: str
description: str
select_file: str
query_id: int
network: Network
parameters: list[QueryParameter]
base_file: Optional[str]
@classmethod
def from_dict(cls, obj: dict[str, Any], path: str) -> DashboardTile:
"""Constructs Record from Dune Data as string dict"""
return cls(
name=obj.get("name", "untitled"),
description=obj.get("description", ""),
select_file="/".join([path, obj["query_file"]]),
network=Network.from_string(obj["network"]),
query_id=int(obj["id"]),
parameters=[QueryParameter.from_dict(p) for p in obj.get("parameters", [])],
base_file=obj.get("requires"),
)
def build_query(self) -> str:
"""Constructs a query from base file and select file attributes"""
if self.base_file is not None:
components = map(open_query, [self.base_file, self.select_file])
return "\n".join(list(components))
return open_query(self.select_file)
@dataclass
class DuneQuery:
"""Contains all the relevant data necessary to initiate a Dune Query"""
name: str
description: str
raw_sql: str
network: Network
parameters: list[QueryParameter]
query_id: int
def __hash__(self) -> int:
return hash(self.query_id)
def __eq__(self, other: object) -> bool:
if not isinstance(other, DuneQuery):
return NotImplemented
equality_conditions = [
self.name == other.name,
self.description == other.description,
self.raw_sql == other.raw_sql,
self.network.value == other.network.value,
self.query_id == other.query_id,
self.parameters == other.parameters,
]
log.debug(f"Equality Conditions: {equality_conditions}")
return all(equality_conditions)
@classmethod
def from_environment(
cls,
raw_sql: str,
network: Network,
description: str = "",
parameters: Optional[list[QueryParameter]] = None,
name: Optional[str] = None,
) -> DuneQuery:
"""Constructs a query using the Universal Query ID provided in env file."""
load_dotenv()
return cls(
raw_sql=raw_sql,
description=description,
network=network,
parameters=parameters if parameters is not None else [],
name=name if name else "untitled",
query_id=int(os.environ["DUNE_QUERY_ID"]),
)
@classmethod
def from_tile(cls, tile: DashboardTile) -> DuneQuery:
"""Constructs Dune Query from DashboardTile object"""
return cls(
name=tile.name,
description=tile.description,
raw_sql=tile.build_query(),
network=tile.network,
parameters=tile.parameters,
query_id=tile.query_id,
)
def _request_parameters(self) -> list[dict[str, str]]:
return [p.to_dict() for p in self.parameters]
def upsert_query_post(self) -> Post:
"""Returns json data for a post of type UpsertQuery"""
object_data: dict[str, Any] = {
"id": self.query_id,
"schedule": None,
"dataset_id": self.network.value,
"name": self.name,
"query": self.raw_sql,
"user_id": 84,
"description": self.description,
"is_archived": False,
"is_temp": False,
"tags": [],
"parameters": self._request_parameters(),
"visualizations": {
"data": [],
"on_conflict": {
"constraint": "visualizations_pkey",
"update_columns": ["name", "options"],
},
},
}
key_map = {
"insert_queries_one": {
"id",
"dataset_id",
"name",
"description",
"query",
"is_private",
"is_temp",
"is_archived",
"created_at",
"updated_at",
"schedule",
"tags",
"parameters",
"visualizations",
"forked_query",
"user",
"query_favorite_count_all",
"favorite_queries",
}
}
return Post(
data={
"operationName": "UpsertQuery",
"variables": {
"object": object_data,
"on_conflict": {
"constraint": "queries_pkey",
"update_columns": [
"dataset_id",
"name",
"description",
"query",
"schedule",
"is_archived",
"is_temp",
"tags",
"parameters",
],
},
"session_id": 0, # must be an int, but value is irrelevant
},
"query": """
mutation UpsertQuery(
$session_id: Int!
$object: queries_insert_input!
$on_conflict: queries_on_conflict!
$favs_last_24h: Boolean! = false
$favs_last_7d: Boolean! = false
$favs_last_30d: Boolean! = false
$favs_all_time: Boolean! = true
) {
insert_queries_one(object: $object, on_conflict: $on_conflict) {
...Query
favorite_queries(where: { user_id: { _eq: $session_id } }, limit: 1) {
created_at
}
}
}
fragment Query on queries {
...BaseQuery
...QueryVisualizations
...QueryForked
...QueryUsers
...QueryFavorites
}
fragment BaseQuery on queries {
id
dataset_id
name
description
query
is_private
is_temp
is_archived
created_at
updated_at
schedule
tags
parameters
}
fragment QueryVisualizations on queries {
visualizations {
id
type
name
options
created_at
}
}
fragment QueryForked on queries {
forked_query {
id
name
user {
name
}
}
}
fragment QueryUsers on queries {
user {
...User
}
}
fragment User on users {
id
name
profile_image_url
}
fragment QueryFavorites on queries {
query_favorite_count_all @include(if: $favs_all_time) {
favorite_count
}
query_favorite_count_last_24h @include(if: $favs_last_24h) {
favorite_count
}
query_favorite_count_last_7d @include(if: $favs_last_7d) {
favorite_count
}
query_favorite_count_last_30d @include(if: $favs_last_30d) {
favorite_count
}
}
""",
},
key_map=key_map,
)
@staticmethod
def find_result_by_job(job_id: str) -> Post:
"""Returns json data for a post of type FindResultDataByResult"""
query = """
query FindResultDataByJob($job_id: uuid!) {
query_results(where: {job_id: {_eq: $job_id}, error: {_is_null: true}}) {
id
job_id
runtime
generated_at
columns
}
query_errors(where: {job_id: {_eq: $job_id}}) {
id
job_id
runtime
message
metadata
type
generated_at
}
get_result_by_job_id(args: {want_job_id: $job_id}) {
data
}
}
"""
return Post(
data={
"operationName": "FindResultDataByJob",
"variables": {"job_id": job_id},
"query": query,
},
key_map={
"query_results": {
"id",
"job_id",
"runtime",
"generated_at",
"columns",
},
"query_errors": {
"id",
"job_id",
"runtime",
"message",
"metadata",
"type",
"generated_at",
},
"get_result_by_job_id": {"data"},
},
)
@staticmethod
def get_queue_position(job_id: str) -> Post:
"""Returns json data for a post of type GetQueuePosition
This is meant to determine when query execution has completed.
"""
query = """
query GetQueuePosition($job_id: uuid!) {
view_queue_positions(where: {id: {_eq: $job_id}}) {
pos
}
jobs_by_pk(id: $job_id) {
id
user_id
category
created_at
locked_until
}
}
"""
return Post(
data={
"operationName": "GetQueuePosition",
"variables": {"job_id": job_id},
"query": query,
},
key_map={"data": {"view_queue_positions", "jobs_by_pk"}},
)
def execute_query_post(self) -> Post:
"""Returns json data for a post of type ExecuteQuery"""
query = """
mutation ExecuteQuery($query_id: Int!, $parameters: [Parameter!]!) {
execute_query(query_id: $query_id, parameters: $parameters) {
job_id
}
}
"""
return Post(
data={
"operationName": "ExecuteQuery",
"variables": {
"query_id": self.query_id,
"parameters": [p.to_dict() for p in self.parameters],
},
"query": query,
},
key_map={"execute_query": {"job_id"}},
)
| 0.654122 | 0.265577 |
import asyncio
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot import *
from userbot.cmdhelp import CmdHelp
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "ν2.ο"
@bot.on(admin_cmd(pattern="think$"))
@bot.on(sudo_cmd(pattern="think$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 28)
event = await edit_or_reply(event, "ThInKiNg...")
animation_chars = [
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING... 🤔",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 36])
@bot.on(admin_cmd(pattern=f"snakes$", outgoing=True))
@bot.on(sudo_cmd(pattern="snakes$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 27)
event = await edit_or_reply(event, "Sssss...Snake...")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◻️◼️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 27])
@bot.on(admin_cmd(pattern=f"humans$", outgoing=True))
@bot.on(sudo_cmd(pattern="humans$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 16)
event = await edit_or_reply(event, "Tarzan The Man!!")
animation_chars = [
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛🚗\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛🚗⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛🚗⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛🚗⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛🚗⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛🚗⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n🚗⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜😊⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬛⬜⬛\n⬛⬛⬜⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬛⬛⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬛😊⬛⬜⬛\n⬛⬛⬜⬜⬜⬛⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬛⬛⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬛⬜⬜⬜⬛⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜😊⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 16])
@bot.on(admin_cmd(pattern=f"mcs$", outgoing=True))
@bot.on(sudo_cmd(pattern="mcs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 28)
event = await edit_or_reply(event, "Huh??")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◻️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 28])
@bot.on(admin_cmd(pattern="viruses$"))
@bot.on(sudo_cmd(pattern="viruses$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 30)
event = await edit_or_reply(event, "Giving this sir/miss Virus!!")
animation_chars = [
"🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️\n◼️◼️◼️◼️\n◼️◼️◼️◼️\n◼️◼️◼️◼️",
"◼️◼️◼️\n◼️◼️◼️\n◼️◼️◼️",
"◼️◼️\n◼️◼️",
"◼️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 30])
@bot.on(admin_cmd(pattern=f"raping$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"raping$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 30)
event = await edit_or_reply(event, "Rappppeeeee.....Shhh!!!")
animation_chars = [
"**r**",
"**ra**",
"**rap**",
"**rape**",
"**rape_**",
"**rape_t**",
"**rape_tr**",
"**rape_tra**",
"**rape_trai**",
"**rape_train**",
"**ape_train🚅**",
"**pe_train🚅🚃🚃**",
"**e_train🚅🚃🚃🚃**",
"**_train🚅🚃🚃🚃🚃**",
"**train🚅🚃🚃🚃🚃🚃**",
"**rain🚅🚃🚃🚃🚃🚃🚃**",
"**ain🚅🚃🚃🚃🚃🚃🚃🚃**",
"**in🚅🚃🚃🚃🚃🚃🚃🚃🚃**",
"**n🚅🚃🚃🚃🚃🚃🚃🚃🚃🚃**",
"🚅🚃🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃",
"🚃🚃🚃",
"🚃🚃",
"🚃",
"**rApEd**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 30])
@bot.on(admin_cmd(pattern=f"nikl$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"nikl$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 6)
event = await edit_or_reply(event, "Nikal Lavde Lassun..")
animation_chars = [
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⢳⡀⠀⡏⠀⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Nikal ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀⠀__⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⠀⢳⡀⠀⡏⠀⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Lavde ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀|__|⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Pehli ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀(P)⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Fursat ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀⠀__ ⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Meeee ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀|__| ⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Nikal ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀lodu⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"musics$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"musics$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Booty..Booty..Booty shake...Kan chil gye na?😂")
animation_chars = [
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:00** ▱▱▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `▶️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀ **⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:01** ▰▱▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀ **⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀ **Now Playing: Shape of u**\n\n**00:02** ▰▰▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:03** ▰▰▰▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:04** ▰▰▰▰▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:05** ▰▰▰▰▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:06** ▰▰▰▰▰▰▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:07** ▰▰▰▰▰▰▰▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:08** ▰▰▰▰▰▰▰▰▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:09** ▰▰▰▰▰▰▰▰▰▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:10** ▰▰▰▰▰▰▰▰▰▰ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏺️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"squeeze$", outgoing=True))
@bot.on(sudo_cmd(pattern="squeeze$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(
event, "╔═══════════════════╗ \n \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n \t░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ \t░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(6)
@bot.on(admin_cmd(pattern=f"loadings$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"loadings$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "▯")
animation_chars = ["▮", "▯", "▬", "▭", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"squares$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"squares$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "◨")
animation_chars = ["◧", "◨", "◧", "◨", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"ups$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"ups$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "╻")
animation_chars = ["╹", "╻", "╹", "╻", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"rounds$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"rounds$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "Round...")
animation_chars = ["⚫", "⬤", "●", "∘", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"hearts$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"hearts$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "❤️")
animation_chars = ["🖤", "❤️", "🖤", "❤️", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"anime$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"anime$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "😢")
animation_chars = [
"😁",
"😧",
"😡",
"😢",
"**Repo of eviral**",
"😁",
"😧",
"😡",
"😢",
"__**[Good to see you Guys....]**__(github.com/Teameviral/FIREX)",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@bot.on(admin_cmd(pattern=f"fnls$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"fnls$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 6)
event = await edit_or_reply(event, "Hey There....")
animation_chars = ["😁🏿", "😁🏾", "😁🏽", "😁🏼", "😁", "**Good to see you Firand....**"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"monkeys$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"monkeys$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 12)
event = await edit_or_reply(event, "Hey There....")
animation_chars = ["🐵", "🙉", "🙈", "🙊", "🖕🐵🖕", "**Good to see you Firand....**"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"herbers$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"herbers$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 10)
event = await edit_or_reply(event, "Power On......")
animation_chars = [
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 5.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.13GB\n **🔹used:** 33.77GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 158.98GB\n **🔹recv:** 146.27GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 159720314\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 20.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 7.18GB\n **🔹used:** 28.26GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 146.27GB\n **🔹recv:** 124.33GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 143565654\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 60.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 6.52GB\n **🔹used:** 35.78GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 124.33GB\n **🔹recv:** 162.48GB\n **🔹sent_packets:** 25655655\n **🔹recv_packets:** 165289456\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.81GB\n **🔹used:** 30.11GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 162.48GB\n **🔹recv:** 175.75GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 135345655\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 80.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.76GB\n **🔹used:** 29.35GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 175.75GB\n **🔹recv:** 118.55GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 185466554\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 62.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.23GB\n **🔹used:** 33.32GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 118.55GB\n **🔹recv:** 168.65GB\n **🔹sent_packets:** 24786554\n **🔹recv_packets:** 156745865\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 30.6%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.75GB\n **🔹used:** 36.54GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 168.65GB\n **🔹recv:** 128.35GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 1475823589\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 10.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 10.20GB\n **🔹used:** 25.40GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 128.35GB\n **🔹recv:** 108.31GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 157865426\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.25GB\n **🔹used:** 31.14GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 108.31GB\n **🔹recv:** 167.17GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 124575356\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 76.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.01GB\n **🔹used:** 33.27GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 167.17GB\n **🔹recv:** 158.98GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 165455856\n\n\n**===================**\n",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@bot.on(admin_cmd(pattern=f"hands$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"hands$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 13)
event = await edit_or_reply(event, "🖐️")
animation_chars = [
"👈",
"👉",
"☝️",
"👆",
"🖕",
"👇",
"✌️",
"🤞",
"🖖",
"🤘",
"🤙",
"🖐️",
"👌",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 13])
@bot.on(admin_cmd(pattern=f"gsgs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"gsgs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 12)
event = await edit_or_reply(event, "ContDown....")
animation_chars = [
"🔟",
"9️⃣",
"8️⃣",
"7️⃣",
"6️⃣",
"5️⃣",
"4️⃣",
"3️⃣",
"2️⃣",
"1️⃣",
"0️⃣",
"🆘",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@bot.on(admin_cmd(pattern=r"thearts$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"thearts$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 54)
event = await edit_or_reply(event, "🖤")
animation_chars = [
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
@bot.on(admin_cmd(pattern="unoobs$"))
@bot.on(sudo_cmd(pattern="unoobs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 9)
event = await edit_or_reply(event, "U nOoB")
animation_chars = [
"EvErYbOdY",
"iZ",
"BiGGeSt",
"NoOoB",
"uNtiL",
"YoU",
"aRriVe",
"😈",
"EvErYbOdY iZ BiGGeSt NoOoB uNtiL YoU aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 9])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="menoobs$"))
@bot.on(sudo_cmd(pattern="menoobs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 9)
event = await edit_or_reply(event, "mE nOoB")
animation_chars = [
"EvErYbOdY",
"iZ",
"BiGGeSt",
"NoOoB",
"uNtiL",
"i",
"aRriVe",
"😈",
"EvErYbOdY iZ BiGGeSt NoOoB uNtiL i aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 9])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="upros$"))
@bot.on(sudo_cmd(pattern="upros$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 8)
event = await edit_or_reply(event, "U pRo")
animation_chars = [
"EvErYbOdY",
"iZ",
"PeRu",
"uNtiL",
"YoU",
"aRriVe",
"😈",
"EvErYbOdY iZ PeRu uNtiL YoU aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 8])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="mepros$"))
@bot.on(sudo_cmd(pattern="mepros$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 8)
event = await edit_or_reply(event, "mE pRo")
animation_chars = [
"EvErYbOdY",
"iZ",
"PeRu",
"uNtiL",
"i",
"aRriVe",
"😈",
"EvErYbOdY iZ PeRu uNtiL i aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 8])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern=f"quickheals$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"quickheals$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "QuickHeaml")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: No Virus Found...`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"sqhs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"sqhs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "SQH")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: No Virus Found...`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"medkit$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"medkit$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Injecting Virus")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: ⚠️Virus Found⚠️\nMore Info: Torzan, Spyware, Adware`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"macoss$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"macoss$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Mac(Apple Laptop)")
animation_chars = [
"`Connecting To Hackintosh...`",
"`Initiating Hackintosh Login.`",
"`Loading Hackintosh... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Hackintosh... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Hackintosh`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"window$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"window$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Windows")
animation_chars = [
"`Connecting To Windows 10...`",
"`Initiating Windows 10 Login.`",
"`Loading Windows 10... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Windows 10... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Windows 10`\n\n**My PC Specs:**\n\n **CPU:** __3.4GHz ryzen 9 5950x (16-core,32 threads 64MB cache, up to 4.9GHz)__\n\n**Graphics:** __Nvidia GeForce RTX 3090 OC (24GB GDDR6X)__\n\n**RAM:** __64GB DDR4 (4000MHz)__\n\n**Screen:** __17.3-inch, UHD (3840 x 2160) 144Hz Hdr G-Sync__\n\n**Storage:** __512GB nvme gen 4 SSD, 5 TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.1, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), 2 HDMI2.0, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"linuxs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"linuxs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Linux")
animation_chars = [
"`Connecting To Linux...`",
"`Initiating Linux Login.`",
"`Loading Linux... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Linux... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Linux`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"stocks$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"stocks$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Stocks")
animation_chars = [
"`Connecting To Symbian OS...`",
"`Initiating Symbian OS Login.`",
"`Loading Symbian OS... 0%\n█████████████████████████ `",
"`Loading Symbian OS... 3%\n█████████████████████▒▒▒▒ `",
"`Loading Symbian OS... 9%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 23%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 39%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 69%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 89%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 100%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Symbian OS`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"oss$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"oss$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 7)
event = await edit_or_reply(event, "OS")
animation_chars = [
"`Scanning OS...`",
"`Scanning OS......`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n☑️ `.macos`\n☑️ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n☑️ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n✅ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n✅ `.linux`\n✅ `.stock`\n\nDeveloped By: @Eviral",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 7])
CmdHelp("animation").add_command("eye", None, "Use And see").add_command(
"think", None, "Use and see"
).add_command("snakes", None, "Use and see").add_command(
"humans", None, "Use and see"
).add_command(
"mcs", None, "Use and see"
).add_command(
"viruses", None, "Use and see"
).add_command(
"raping", None, "Use and see"
).add_command(
"nikl", None, "Use and see"
).add_command(
"musics", None, "Use and see"
).add_command(
"squeeze", None, "Use and see"
).add_command(
"loadings", None, "use and see"
).add_command(
"squares", None, "Use and see"
).add_command(
"ups", None, "Use and see"
).add_command(
"rounds", None, "use and see"
).add_command(
"hearts", None, "Use And See"
).add_command(
"animes", None, "Use And See"
).add_command(
"fmls", None, "Use And See"
).add_command(
"monkeys", None, "Use and see"
).add_command(
"herbers", None, "Use and see"
).add_command(
"hands", None, "Use and see"
).add_command(
"gsgs", None, "Use and see"
).add_command(
"unoobs", None, "Use and see"
).add_command(
"menoobs", None, "Use and see"
).add_command(
"upros", None, "Use and see"
).add_command(
"mepros", None, "Use and see"
).add_command(
"thearts", None, "Use and see"
).add_command(
"quickheal", None, "use and see"
).add_command(
"sqhs", None, "Use and see"
).add_command(
"medkit", None, "Use and see"
).add_command(
"macos", None, "use and see"
).add_command(
"window", None, "Use And See"
).add_command(
"linuxs", None, "Use And See"
).add_command(
"stocks", None, "Use And See"
).add_command(
"oss", None, "Use And See"
).add_type(
"Official"
).add_info(
"Its Very Useful Module too much just use these u must(might) have fun"
).add_warning(
"Harmless Module✅"
).add_type(
"Addons"
).add()
|
userbot/plugins/Xtra_Plugin/animations.py
|
import asyncio
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot import *
from userbot.cmdhelp import CmdHelp
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "ν2.ο"
@bot.on(admin_cmd(pattern="think$"))
@bot.on(sudo_cmd(pattern="think$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 28)
event = await edit_or_reply(event, "ThInKiNg...")
animation_chars = [
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING",
"THI&K#N₹",
"T+IN@I?G",
"¿H$NK∆NG",
"¶H×NK&N*",
"NGITHKIN",
"T+I#K@₹G",
"THINKING... 🤔",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 36])
@bot.on(admin_cmd(pattern=f"snakes$", outgoing=True))
@bot.on(sudo_cmd(pattern="snakes$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 27)
event = await edit_or_reply(event, "Sssss...Snake...")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◼️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◼️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◼️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️\n◻️◻️◻️◻️◻️",
"◻️◻️◻️◻️◻️\n◻️◼️◻️◼️◻️\n◻️◻️◻️◻️◻️\n◻️◼️◼️◼️◻️\n◻️◻️◻️◻️◻️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 27])
@bot.on(admin_cmd(pattern=f"humans$", outgoing=True))
@bot.on(sudo_cmd(pattern="humans$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 16)
event = await edit_or_reply(event, "Tarzan The Man!!")
animation_chars = [
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛🚗\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛🚗⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛🚗⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛🚗⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛🚗⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛🚗⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n🚗⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜😊⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬛⬜⬛\n⬛⬛⬜⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬛⬛⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬛😊⬛⬜⬛\n⬛⬛⬜⬜⬜⬛⬛\n⬛⬛⬛⬜⬛⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬛⬛⬛⬛⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛😊⬛⬛⬛\n⬛⬛⬜⬜⬜⬛⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n⬛⬛⬜⬛⬜⬛⬛\n🔲🔲🔲🔲🔲🔲🔲",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬛⬛⬛⬛⬛⬛⬛\n⬜⬜⬜😊⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n🔲🔲🔲🔲🔲🔲🔲",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 16])
@bot.on(admin_cmd(pattern=f"mcs$", outgoing=True))
@bot.on(sudo_cmd(pattern="mcs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 28)
event = await edit_or_reply(event, "Huh??")
animation_chars = [
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◻️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◻️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◻️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◻️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◻️◼️◻️◼️\n◼️◼️◼️◼️◼️\n◼️◻️◻️◻️◼️\n◼️◼️◼️◼️◼️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 28])
@bot.on(admin_cmd(pattern="viruses$"))
@bot.on(sudo_cmd(pattern="viruses$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 30)
event = await edit_or_reply(event, "Giving this sir/miss Virus!!")
animation_chars = [
"🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️🔴🔵🌕♓♎⛎◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️🔴🔵🌕♓♎⛎🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🔴🔵🌕♓♎⛎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️◼️◼️",
"◼️◼️◼️◼️\n◼️◼️◼️◼️\n◼️◼️◼️◼️\n◼️◼️◼️◼️",
"◼️◼️◼️\n◼️◼️◼️\n◼️◼️◼️",
"◼️◼️\n◼️◼️",
"◼️",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 30])
@bot.on(admin_cmd(pattern=f"raping$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"raping$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 30)
event = await edit_or_reply(event, "Rappppeeeee.....Shhh!!!")
animation_chars = [
"**r**",
"**ra**",
"**rap**",
"**rape**",
"**rape_**",
"**rape_t**",
"**rape_tr**",
"**rape_tra**",
"**rape_trai**",
"**rape_train**",
"**ape_train🚅**",
"**pe_train🚅🚃🚃**",
"**e_train🚅🚃🚃🚃**",
"**_train🚅🚃🚃🚃🚃**",
"**train🚅🚃🚃🚃🚃🚃**",
"**rain🚅🚃🚃🚃🚃🚃🚃**",
"**ain🚅🚃🚃🚃🚃🚃🚃🚃**",
"**in🚅🚃🚃🚃🚃🚃🚃🚃🚃**",
"**n🚅🚃🚃🚃🚃🚃🚃🚃🚃🚃**",
"🚅🚃🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃🚃",
"🚃🚃🚃🚃",
"🚃🚃🚃",
"🚃🚃",
"🚃",
"**rApEd**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 30])
@bot.on(admin_cmd(pattern=f"nikl$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"nikl$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 6)
event = await edit_or_reply(event, "Nikal Lavde Lassun..")
animation_chars = [
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⢳⡀⠀⡏⠀⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Nikal ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀⠀__⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⠀⢳⡀⠀⡏⠀⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Lavde ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀|__|⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀⠀⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Pehli ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀(P)⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Fursat ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀⠀__ ⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Meeee ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀|__| ⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
"`⠀⠀⠀⣠⣶⡾⠏⠉⠙⠳⢦⡀⠀⠀⠀⢠⠞⠉⠙⠲⡀⠀\n ⠀⣴⠿⠏⠀⠀⠀⠀⠀ ⠀⢳⡀⠀⡏⠀⠀ ⠀⢷\n⢠⣟⣋⡀⢀⣀⣀⡀⠀⣀⡀⣧⠀⢸⠀ ⠀ ⡇\n⢸⣯⡭⠁⠸⣛⣟⠆⡴⣻⡲⣿ ⣸ Nikal ⡇\n ⣟⣿⡭⠀⠀⠀⠀⠀⢱⠀ ⣿ ⢹⠀ ⡇\n ⠙⢿⣯⠄⠀⠀lodu⠀⠀⡿ ⠀⡇⠀⠀⠀⠀ ⡼\n⠀⠀⠀⠹⣶⠆⠀⠀⠀⠀⠀⡴⠃⠀ ⠘⠤⣄⣠⠞⠀\n⠀⠀⠀⠀⢸⣷⡦⢤⡤⢤⣞⣁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n⠀⢀⣤⣴⣿⣏⠁⠀⠀⠸⣏⢯⣷⣖⣦⡀⠀⠀⠀⠀⠀⠀\n⢀⣾⣽⣿⣿⣿⣿⠛⢲⣶⣾⢉⡷⣿⣿⠵⣿⠀⠀⠀⠀⠀⠀\n⣼⣿⠍⠉⣿⡭⠉⠙⢺⣇⣼⡏⠀⠀ ⠀⣄⢸⠀⠀⠀⠀⠀⠀`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"musics$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"musics$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Booty..Booty..Booty shake...Kan chil gye na?😂")
animation_chars = [
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:00** ▱▱▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `▶️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀ **⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:01** ▰▱▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀ **⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀ **Now Playing: Shape of u**\n\n**00:02** ▰▰▱▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤⬤ 81% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:03** ▰▰▰▱▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:04** ▰▰▰▰▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:05** ▰▰▰▰▱▱▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:06** ▰▰▰▰▰▰▱▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:07** ▰▰▰▰▰▰▰▱▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:08** ▰▰▰▰▰▰▰▰▱▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:09** ▰▰▰▰▰▰▰▰▰▱ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏸️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
"⬤⬤◯ 80% ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀`✖️`\n\n⠀⠀⠀⠀⠀[MadBoi Music Player](tg://user?id=1732236209)\n\n⠀⠀⠀⠀**Now Playing: Shape of u**\n\n**00:10** ▰▰▰▰▰▰▰▰▰▰ **00:10**\n\n⠀⠀⠀⠀⠀`🔂` `⏮️` `⏪️` `⏺️` `⏩️` `⏭️`\n\n**⠀Next Song:** __Alan Walker - Alone.__\n\n⠀⠀⠀⠀**⠀Device: Nokia 1100**",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"squeeze$", outgoing=True))
@bot.on(sudo_cmd(pattern="squeeze$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await edit_or_reply(
event, "╔═══════════════════╗ \n \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n \t░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ \t░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit("╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝")
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(1)
await event.edit(
"╔═══════════════════╗ \n ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ \n╚═══════════════════╝"
)
await asyncio.sleep(6)
@bot.on(admin_cmd(pattern=f"loadings$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"loadings$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "▯")
animation_chars = ["▮", "▯", "▬", "▭", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"squares$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"squares$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "◨")
animation_chars = ["◧", "◨", "◧", "◨", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"ups$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"ups$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "╻")
animation_chars = ["╹", "╻", "╹", "╻", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"rounds$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"rounds$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "Round...")
animation_chars = ["⚫", "⬤", "●", "∘", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"hearts$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"hearts$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "❤️")
animation_chars = ["🖤", "❤️", "🖤", "❤️", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@bot.on(admin_cmd(pattern=f"anime$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"anime$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 20)
event = await edit_or_reply(event, "😢")
animation_chars = [
"😁",
"😧",
"😡",
"😢",
"**Repo of eviral**",
"😁",
"😧",
"😡",
"😢",
"__**[Good to see you Guys....]**__(github.com/Teameviral/FIREX)",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@bot.on(admin_cmd(pattern=f"fnls$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"fnls$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 6)
event = await edit_or_reply(event, "Hey There....")
animation_chars = ["😁🏿", "😁🏾", "😁🏽", "😁🏼", "😁", "**Good to see you Firand....**"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"monkeys$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"monkeys$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 12)
event = await edit_or_reply(event, "Hey There....")
animation_chars = ["🐵", "🙉", "🙈", "🙊", "🖕🐵🖕", "**Good to see you Firand....**"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@bot.on(admin_cmd(pattern=f"herbers$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"herbers$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 10)
event = await edit_or_reply(event, "Power On......")
animation_chars = [
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 5.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.13GB\n **🔹used:** 33.77GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 158.98GB\n **🔹recv:** 146.27GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 159720314\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 20.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 7.18GB\n **🔹used:** 28.26GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 146.27GB\n **🔹recv:** 124.33GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 143565654\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 60.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 6.52GB\n **🔹used:** 35.78GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 124.33GB\n **🔹recv:** 162.48GB\n **🔹sent_packets:** 25655655\n **🔹recv_packets:** 165289456\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.81GB\n **🔹used:** 30.11GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 162.48GB\n **🔹recv:** 175.75GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 135345655\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 80.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.76GB\n **🔹used:** 29.35GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 175.75GB\n **🔹recv:** 118.55GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 185466554\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 62.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.23GB\n **🔹used:** 33.32GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 118.55GB\n **🔹recv:** 168.65GB\n **🔹sent_packets:** 24786554\n **🔹recv_packets:** 156745865\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 30.6%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.75GB\n **🔹used:** 36.54GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 168.65GB\n **🔹recv:** 128.35GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 1475823589\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 10.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 10.20GB\n **🔹used:** 25.40GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 128.35GB\n **🔹recv:** 108.31GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 157865426\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.25GB\n **🔹used:** 31.14GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 108.31GB\n **🔹recv:** 167.17GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 124575356\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 76.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.01GB\n **🔹used:** 33.27GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 167.17GB\n **🔹recv:** 158.98GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 165455856\n\n\n**===================**\n",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@bot.on(admin_cmd(pattern=f"hands$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"hands$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 13)
event = await edit_or_reply(event, "🖐️")
animation_chars = [
"👈",
"👉",
"☝️",
"👆",
"🖕",
"👇",
"✌️",
"🤞",
"🖖",
"🤘",
"🤙",
"🖐️",
"👌",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 13])
@bot.on(admin_cmd(pattern=f"gsgs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"gsgs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 12)
event = await edit_or_reply(event, "ContDown....")
animation_chars = [
"🔟",
"9️⃣",
"8️⃣",
"7️⃣",
"6️⃣",
"5️⃣",
"4️⃣",
"3️⃣",
"2️⃣",
"1️⃣",
"0️⃣",
"🆘",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@bot.on(admin_cmd(pattern=r"thearts$", outgoing=True))
@bot.on(sudo_cmd(pattern=r"thearts$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 54)
event = await edit_or_reply(event, "🖤")
animation_chars = [
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
@bot.on(admin_cmd(pattern="unoobs$"))
@bot.on(sudo_cmd(pattern="unoobs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 9)
event = await edit_or_reply(event, "U nOoB")
animation_chars = [
"EvErYbOdY",
"iZ",
"BiGGeSt",
"NoOoB",
"uNtiL",
"YoU",
"aRriVe",
"😈",
"EvErYbOdY iZ BiGGeSt NoOoB uNtiL YoU aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 9])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="menoobs$"))
@bot.on(sudo_cmd(pattern="menoobs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 9)
event = await edit_or_reply(event, "mE nOoB")
animation_chars = [
"EvErYbOdY",
"iZ",
"BiGGeSt",
"NoOoB",
"uNtiL",
"i",
"aRriVe",
"😈",
"EvErYbOdY iZ BiGGeSt NoOoB uNtiL i aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 9])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="upros$"))
@bot.on(sudo_cmd(pattern="upros$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 8)
event = await edit_or_reply(event, "U pRo")
animation_chars = [
"EvErYbOdY",
"iZ",
"PeRu",
"uNtiL",
"YoU",
"aRriVe",
"😈",
"EvErYbOdY iZ PeRu uNtiL YoU aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 8])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern="mepros$"))
@bot.on(sudo_cmd(pattern="mepros$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 8)
event = await edit_or_reply(event, "mE pRo")
animation_chars = [
"EvErYbOdY",
"iZ",
"PeRu",
"uNtiL",
"i",
"aRriVe",
"😈",
"EvErYbOdY iZ PeRu uNtiL i aRriVe 😈",
]
for i in animation_ttl:
await event.edit(animation_chars[i % 8])
await asyncio.sleep(animation_interval)
@bot.on(admin_cmd(pattern=f"quickheals$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"quickheals$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "QuickHeaml")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription: Pru User\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: No Virus Found...`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"sqhs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"sqhs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "SQH")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: No Virus Found...`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"medkit$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"medkit$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Injecting Virus")
animation_chars = [
"`Downloading File..`",
"`File Downloaded....`",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 84%\n█████████████████████▒▒▒▒ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nFile Scanned... 100%\n█████████████████████████ `",
"`Quick Heal Total Security Checkup\n\n\nSubscription:` {DEFAULTUSER} `\nValid Until: 31/12/2099\n\nTask: 01 of 01 Files Scanned...\n\nResult: ⚠️Virus Found⚠️\nMore Info: Torzan, Spyware, Adware`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"macoss$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"macoss$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Mac(Apple Laptop)")
animation_chars = [
"`Connecting To Hackintosh...`",
"`Initiating Hackintosh Login.`",
"`Loading Hackintosh... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Hackintosh... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Hackintosh`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"window$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"window$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Windows")
animation_chars = [
"`Connecting To Windows 10...`",
"`Initiating Windows 10 Login.`",
"`Loading Windows 10... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Windows 10... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Windows 10`\n\n**My PC Specs:**\n\n **CPU:** __3.4GHz ryzen 9 5950x (16-core,32 threads 64MB cache, up to 4.9GHz)__\n\n**Graphics:** __Nvidia GeForce RTX 3090 OC (24GB GDDR6X)__\n\n**RAM:** __64GB DDR4 (4000MHz)__\n\n**Screen:** __17.3-inch, UHD (3840 x 2160) 144Hz Hdr G-Sync__\n\n**Storage:** __512GB nvme gen 4 SSD, 5 TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.1, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), 2 HDMI2.0, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"linuxs$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"linuxs$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Now My Phone Become Linux")
animation_chars = [
"`Connecting To Linux...`",
"`Initiating Linux Login.`",
"`Loading Linux... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Linux... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Linux`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"stocks$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"stocks$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
event = await edit_or_reply(event, "Stocks")
animation_chars = [
"`Connecting To Symbian OS...`",
"`Initiating Symbian OS Login.`",
"`Loading Symbian OS... 0%\n█████████████████████████ `",
"`Loading Symbian OS... 3%\n█████████████████████▒▒▒▒ `",
"`Loading Symbian OS... 9%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 23%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 39%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 69%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 89%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 100%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Symbian OS`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@bot.on(admin_cmd(pattern=f"oss$", outgoing=True))
@bot.on(sudo_cmd(pattern=f"oss$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 7)
event = await edit_or_reply(event, "OS")
animation_chars = [
"`Scanning OS...`",
"`Scanning OS......`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n☑️ `.macos`\n☑️ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n☑️ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n☑️ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n✅ `.linux`\n☑️ `.stock`",
"__Current Loaded OS: Symbian OS__\n\n**To Boot Other OS, Use The Following Trigger:**\n✅ `.macos`\n✅ `.windows`\n✅ `.linux`\n✅ `.stock`\n\nDeveloped By: @Eviral",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 7])
CmdHelp("animation").add_command("eye", None, "Use And see").add_command(
"think", None, "Use and see"
).add_command("snakes", None, "Use and see").add_command(
"humans", None, "Use and see"
).add_command(
"mcs", None, "Use and see"
).add_command(
"viruses", None, "Use and see"
).add_command(
"raping", None, "Use and see"
).add_command(
"nikl", None, "Use and see"
).add_command(
"musics", None, "Use and see"
).add_command(
"squeeze", None, "Use and see"
).add_command(
"loadings", None, "use and see"
).add_command(
"squares", None, "Use and see"
).add_command(
"ups", None, "Use and see"
).add_command(
"rounds", None, "use and see"
).add_command(
"hearts", None, "Use And See"
).add_command(
"animes", None, "Use And See"
).add_command(
"fmls", None, "Use And See"
).add_command(
"monkeys", None, "Use and see"
).add_command(
"herbers", None, "Use and see"
).add_command(
"hands", None, "Use and see"
).add_command(
"gsgs", None, "Use and see"
).add_command(
"unoobs", None, "Use and see"
).add_command(
"menoobs", None, "Use and see"
).add_command(
"upros", None, "Use and see"
).add_command(
"mepros", None, "Use and see"
).add_command(
"thearts", None, "Use and see"
).add_command(
"quickheal", None, "use and see"
).add_command(
"sqhs", None, "Use and see"
).add_command(
"medkit", None, "Use and see"
).add_command(
"macos", None, "use and see"
).add_command(
"window", None, "Use And See"
).add_command(
"linuxs", None, "Use And See"
).add_command(
"stocks", None, "Use And See"
).add_command(
"oss", None, "Use And See"
).add_type(
"Official"
).add_info(
"Its Very Useful Module too much just use these u must(might) have fun"
).add_warning(
"Harmless Module✅"
).add_type(
"Addons"
).add()
| 0.17774 | 0.065276 |
import unittest
from domain_scoring.domain_value_transformer import NaiveTransformer, SMALLER
from util.lists import all_pairs
class NaiveDomainValueTransformerTest(unittest.TestCase):
def setUp(self):
self.transformer = NaiveTransformer()
self.oracle = self._create_oracle(range(6))
def test_transform(self):
self.assertEqual([(1, 0.5), (2, 1.0)], self.transformer.transform([(1, 2)], [SMALLER]))
def test_spread_domain_value(self):
self.assertEqual(
[(1, 0.5), (2, 1.0)],
self.transformer._spread_domain_value([1, 2]))
self.assertEqual(
[(1, 0.25), (2, 0.5), (3, .75), (4, 1.0)],
self.transformer._spread_domain_value([1, 2, 3, 4]))
self.assertEqual(
[(1, 0.5), (2, 1.0)],
self.transformer._spread_domain_value([1, 2]))
self.assertEqual(
[(1, 1.0)],
self.transformer._spread_domain_value([1]))
def test_order_pairs(self):
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._order_pairs(self.oracle, [5, 3, 1, 0, 2, 4]))
self.assertEqual([1, 2, 3, 4, 5], self.transformer._order_pairs(self.oracle, [5, 3, 1, 2, 4]))
def test_merge(self):
self.assertEqual([1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [1, 2, 3], [4, 5]))
self.assertEqual([1, 2, 3], self.transformer._merge(self.oracle, [3], [1, 2]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [1, 4, 5], [0, 2, 3]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [], [0, 1, 2, 3, 4, 5]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [0, 1, 2, 3, 4, 5], []))
def test_extract_metapaths(self):
pairs = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
self.assertEqual([1, 2, 3, 4], self.transformer._extract_metapaths(pairs))
def _create_oracle(self, elements):
all_elements = all_pairs(elements)
return dict(zip(all_elements, [SMALLER] * len(all_elements)))
if __name__ == '__main__':
unittest.main()
|
tests/domain_scoring/domain_value_transformer_test.py
|
import unittest
from domain_scoring.domain_value_transformer import NaiveTransformer, SMALLER
from util.lists import all_pairs
class NaiveDomainValueTransformerTest(unittest.TestCase):
def setUp(self):
self.transformer = NaiveTransformer()
self.oracle = self._create_oracle(range(6))
def test_transform(self):
self.assertEqual([(1, 0.5), (2, 1.0)], self.transformer.transform([(1, 2)], [SMALLER]))
def test_spread_domain_value(self):
self.assertEqual(
[(1, 0.5), (2, 1.0)],
self.transformer._spread_domain_value([1, 2]))
self.assertEqual(
[(1, 0.25), (2, 0.5), (3, .75), (4, 1.0)],
self.transformer._spread_domain_value([1, 2, 3, 4]))
self.assertEqual(
[(1, 0.5), (2, 1.0)],
self.transformer._spread_domain_value([1, 2]))
self.assertEqual(
[(1, 1.0)],
self.transformer._spread_domain_value([1]))
def test_order_pairs(self):
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._order_pairs(self.oracle, [5, 3, 1, 0, 2, 4]))
self.assertEqual([1, 2, 3, 4, 5], self.transformer._order_pairs(self.oracle, [5, 3, 1, 2, 4]))
def test_merge(self):
self.assertEqual([1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [1, 2, 3], [4, 5]))
self.assertEqual([1, 2, 3], self.transformer._merge(self.oracle, [3], [1, 2]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [1, 4, 5], [0, 2, 3]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [], [0, 1, 2, 3, 4, 5]))
self.assertEqual([0, 1, 2, 3, 4, 5], self.transformer._merge(self.oracle, [0, 1, 2, 3, 4, 5], []))
def test_extract_metapaths(self):
pairs = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
self.assertEqual([1, 2, 3, 4], self.transformer._extract_metapaths(pairs))
def _create_oracle(self, elements):
all_elements = all_pairs(elements)
return dict(zip(all_elements, [SMALLER] * len(all_elements)))
if __name__ == '__main__':
unittest.main()
| 0.582135 | 0.645092 |
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"CheckpointingNotSupported": "00_callbacks.ipynb",
"GradientCheckpointing": "00_callbacks.ipynb",
"Singleton": "00_utils.ipynb",
"str_to_type": "00_utils.ipynb",
"print_versions": "00_utils.ipynb",
"set_seed": "00_utils.ipynb",
"PreCalculatedLoss": "00_utils.ipynb",
"PreCalculatedCrossEntropyLoss": "00_utils.ipynb",
"PreCalculatedBCELoss": "00_utils.ipynb",
"PreCalculatedMSELoss": "00_utils.ipynb",
"MultiTargetLoss": "00_utils.ipynb",
"get_hf_objects": "01_text-utils.ipynb",
"BlurrText": "01_text-utils.ipynb",
"Preprocessor": "11_text-data-core.ipynb",
"ClassificationPreprocessor": "11_text-data-core.ipynb",
"TextInput": "11_text-data-core.ipynb",
"BatchTokenizeTransform": "11_text-data-core.ipynb",
"BatchDecodeTransform": "11_text-data-core.ipynb",
"blurr_sort_func": "11_text-data-core.ipynb",
"TextBlock": "11_text-data-core.ipynb",
"get_blurr_tfm": "11_text-data-core.ipynb",
"first_blurr_tfm": "11_text-data-core.ipynb",
"TextBatchCreator": "11_text-data-core.ipynb",
"TextDataLoader": "11_text-data-core.ipynb",
"preproc_hf_dataset": "11_text-data-core.ipynb",
"blurr_splitter": "11_text-modeling-core.ipynb",
"BaseModelWrapper": "11_text-modeling-core.ipynb",
"BaseModelCallback": "11_text-modeling-core.ipynb",
"Learner.blurr_predict": "11_text-modeling-core.ipynb",
"Learner.blurr_generate": "11_text-modeling-core.ipynb",
"Blearner": "11_text-modeling-core.ipynb",
"BlearnerForSequenceClassification": "11_text-modeling-core.ipynb",
"LMPreprocessor": "12_text-data-language-modeling.ipynb",
"LMType": "12_text-data-language-modeling.ipynb",
"BaseLMStrategy": "12_text-data-language-modeling.ipynb",
"CausalLMStrategy": "12_text-data-language-modeling.ipynb",
"BertMLMStrategy": "12_text-data-language-modeling.ipynb",
"CausalLMTextInput": "12_text-data-language-modeling.ipynb",
"MLMTextInput": "12_text-data-language-modeling.ipynb",
"LMBatchTokenizeTransform": "12_text-data-language-modeling.ipynb",
"LMMetricsCallback": "12_text-modeling-language-modeling.ipynb",
"Learner.blurr_fill_mask": "12_text-modeling-language-modeling.ipynb",
"BlearnerForLM": "12_text-modeling-language-modeling.ipynb",
"TokenClassPreprocessor": "13_text-data-token-classification.ipynb",
"BaseLabelingStrategy": "13_text-data-token-classification.ipynb",
"OnlyFirstTokenLabelingStrategy": "13_text-data-token-classification.ipynb",
"SameLabelLabelingStrategy": "13_text-data-token-classification.ipynb",
"BILabelingStrategy": "13_text-data-token-classification.ipynb",
"get_token_labels_from_input_ids": "13_text-data-token-classification.ipynb",
"get_word_labels_from_token_labels": "13_text-data-token-classification.ipynb",
"TokenTensorCategory": "13_text-data-token-classification.ipynb",
"TokenCategorize": "13_text-data-token-classification.ipynb",
"TokenCategoryBlock": "13_text-data-token-classification.ipynb",
"TokenClassTextInput": "13_text-data-token-classification.ipynb",
"TokenClassBatchTokenizeTransform": "13_text-data-token-classification.ipynb",
"calculate_token_class_metrics": "13_text-modeling-token-classification.ipynb",
"TokenClassMetricsCallback": "13_text-modeling-token-classification.ipynb",
"TokenAggregationStrategies": "13_text-modeling-token-classification.ipynb",
"Learner.blurr_predict_tokens": "13_text-modeling-token-classification.ipynb",
"BlearnerForTokenClassification": "13_text-modeling-token-classification.ipynb",
"QAPreprocessor": "14_text-data-question-answering.ipynb",
"QATextInput": "14_text-data-question-answering.ipynb",
"QABatchTokenizeTransform": "14_text-data-question-answering.ipynb",
"squad_metric": "14_text-modeling-question-answering.ipynb",
"QAModelCallback": "14_text-modeling-question-answering.ipynb",
"QAMetricsCallback": "14_text-modeling-question-answering.ipynb",
"compute_qa_metrics": "14_text-modeling-question-answering.ipynb",
"PreCalculatedQALoss": "14_text-modeling-question-answering.ipynb",
"Learner.blurr_predict_answers": "14_text-modeling-question-answering.ipynb",
"BlearnerForQuestionAnswering": "14_text-modeling-question-answering.ipynb",
"Seq2SeqPreprocessor": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqTextInput": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqBatchTokenizeTransform": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqBatchDecodeTransform": "20_text-data-seq2seq-core.ipynb",
"default_text_gen_kwargs": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqTextBlock": "20_text-data-seq2seq-core.ipynb",
"blurr_seq2seq_splitter": "20_text-modeling-seq2seq-core.ipynb",
"Seq2SeqMetricsCallback": "20_text-modeling-seq2seq-core.ipynb",
"SummarizationPreprocessor": "21_text-data-seq2seq-summarization.ipynb",
"Learner.blurr_summarize": "21_text-modeling-seq2seq-summarization.ipynb",
"BlearnerForSummarization": "21_text-modeling-seq2seq-summarization.ipynb",
"TranslationPreprocessor": "22_text-data-seq2seq-translation.ipynb",
"Learner.blurr_translate": "22_text-modeling-seq2seq-translation.ipynb",
"BlearnerForTranslation": "22_text-modeling-seq2seq-translation.ipynb"}
modules = ["callbacks.py",
"utils.py",
"text/callbacks.py",
"text/utils.py",
"text/data/core.py",
"text/modeling/core.py",
"text/data/language_modeling.py",
"text/modeling/language_modeling.py",
"text/data/token_classification.py",
"text/modeling/token_classification.py",
"text/data/question_answering.py",
"text/modeling/question_answering.py",
"text/data/seq2seq/core.py",
"text/modeling/seq2seq/core.py",
"text/data/seq2seq/summarization.py",
"text/modeling/seq2seq/summarization.py",
"text/data/seq2seq/translation.py",
"text/modeling/seq2seq/translation.py",
"examples/text/high_level_api.py",
"examples/text/glue.py",
"examples/text/glue_low_level_api.py",
"examples/text/multilabel_classification.py",
"examples/text/causal_lm_gpt2.py"]
doc_url = "https://ohmeow.github.io/blurr/"
git_url = "https://github.com/ohmeow/blurr/tree/master/"
def custom_doc_links(name): return None
|
blurr/_nbdev.py
|
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"CheckpointingNotSupported": "00_callbacks.ipynb",
"GradientCheckpointing": "00_callbacks.ipynb",
"Singleton": "00_utils.ipynb",
"str_to_type": "00_utils.ipynb",
"print_versions": "00_utils.ipynb",
"set_seed": "00_utils.ipynb",
"PreCalculatedLoss": "00_utils.ipynb",
"PreCalculatedCrossEntropyLoss": "00_utils.ipynb",
"PreCalculatedBCELoss": "00_utils.ipynb",
"PreCalculatedMSELoss": "00_utils.ipynb",
"MultiTargetLoss": "00_utils.ipynb",
"get_hf_objects": "01_text-utils.ipynb",
"BlurrText": "01_text-utils.ipynb",
"Preprocessor": "11_text-data-core.ipynb",
"ClassificationPreprocessor": "11_text-data-core.ipynb",
"TextInput": "11_text-data-core.ipynb",
"BatchTokenizeTransform": "11_text-data-core.ipynb",
"BatchDecodeTransform": "11_text-data-core.ipynb",
"blurr_sort_func": "11_text-data-core.ipynb",
"TextBlock": "11_text-data-core.ipynb",
"get_blurr_tfm": "11_text-data-core.ipynb",
"first_blurr_tfm": "11_text-data-core.ipynb",
"TextBatchCreator": "11_text-data-core.ipynb",
"TextDataLoader": "11_text-data-core.ipynb",
"preproc_hf_dataset": "11_text-data-core.ipynb",
"blurr_splitter": "11_text-modeling-core.ipynb",
"BaseModelWrapper": "11_text-modeling-core.ipynb",
"BaseModelCallback": "11_text-modeling-core.ipynb",
"Learner.blurr_predict": "11_text-modeling-core.ipynb",
"Learner.blurr_generate": "11_text-modeling-core.ipynb",
"Blearner": "11_text-modeling-core.ipynb",
"BlearnerForSequenceClassification": "11_text-modeling-core.ipynb",
"LMPreprocessor": "12_text-data-language-modeling.ipynb",
"LMType": "12_text-data-language-modeling.ipynb",
"BaseLMStrategy": "12_text-data-language-modeling.ipynb",
"CausalLMStrategy": "12_text-data-language-modeling.ipynb",
"BertMLMStrategy": "12_text-data-language-modeling.ipynb",
"CausalLMTextInput": "12_text-data-language-modeling.ipynb",
"MLMTextInput": "12_text-data-language-modeling.ipynb",
"LMBatchTokenizeTransform": "12_text-data-language-modeling.ipynb",
"LMMetricsCallback": "12_text-modeling-language-modeling.ipynb",
"Learner.blurr_fill_mask": "12_text-modeling-language-modeling.ipynb",
"BlearnerForLM": "12_text-modeling-language-modeling.ipynb",
"TokenClassPreprocessor": "13_text-data-token-classification.ipynb",
"BaseLabelingStrategy": "13_text-data-token-classification.ipynb",
"OnlyFirstTokenLabelingStrategy": "13_text-data-token-classification.ipynb",
"SameLabelLabelingStrategy": "13_text-data-token-classification.ipynb",
"BILabelingStrategy": "13_text-data-token-classification.ipynb",
"get_token_labels_from_input_ids": "13_text-data-token-classification.ipynb",
"get_word_labels_from_token_labels": "13_text-data-token-classification.ipynb",
"TokenTensorCategory": "13_text-data-token-classification.ipynb",
"TokenCategorize": "13_text-data-token-classification.ipynb",
"TokenCategoryBlock": "13_text-data-token-classification.ipynb",
"TokenClassTextInput": "13_text-data-token-classification.ipynb",
"TokenClassBatchTokenizeTransform": "13_text-data-token-classification.ipynb",
"calculate_token_class_metrics": "13_text-modeling-token-classification.ipynb",
"TokenClassMetricsCallback": "13_text-modeling-token-classification.ipynb",
"TokenAggregationStrategies": "13_text-modeling-token-classification.ipynb",
"Learner.blurr_predict_tokens": "13_text-modeling-token-classification.ipynb",
"BlearnerForTokenClassification": "13_text-modeling-token-classification.ipynb",
"QAPreprocessor": "14_text-data-question-answering.ipynb",
"QATextInput": "14_text-data-question-answering.ipynb",
"QABatchTokenizeTransform": "14_text-data-question-answering.ipynb",
"squad_metric": "14_text-modeling-question-answering.ipynb",
"QAModelCallback": "14_text-modeling-question-answering.ipynb",
"QAMetricsCallback": "14_text-modeling-question-answering.ipynb",
"compute_qa_metrics": "14_text-modeling-question-answering.ipynb",
"PreCalculatedQALoss": "14_text-modeling-question-answering.ipynb",
"Learner.blurr_predict_answers": "14_text-modeling-question-answering.ipynb",
"BlearnerForQuestionAnswering": "14_text-modeling-question-answering.ipynb",
"Seq2SeqPreprocessor": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqTextInput": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqBatchTokenizeTransform": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqBatchDecodeTransform": "20_text-data-seq2seq-core.ipynb",
"default_text_gen_kwargs": "20_text-data-seq2seq-core.ipynb",
"Seq2SeqTextBlock": "20_text-data-seq2seq-core.ipynb",
"blurr_seq2seq_splitter": "20_text-modeling-seq2seq-core.ipynb",
"Seq2SeqMetricsCallback": "20_text-modeling-seq2seq-core.ipynb",
"SummarizationPreprocessor": "21_text-data-seq2seq-summarization.ipynb",
"Learner.blurr_summarize": "21_text-modeling-seq2seq-summarization.ipynb",
"BlearnerForSummarization": "21_text-modeling-seq2seq-summarization.ipynb",
"TranslationPreprocessor": "22_text-data-seq2seq-translation.ipynb",
"Learner.blurr_translate": "22_text-modeling-seq2seq-translation.ipynb",
"BlearnerForTranslation": "22_text-modeling-seq2seq-translation.ipynb"}
modules = ["callbacks.py",
"utils.py",
"text/callbacks.py",
"text/utils.py",
"text/data/core.py",
"text/modeling/core.py",
"text/data/language_modeling.py",
"text/modeling/language_modeling.py",
"text/data/token_classification.py",
"text/modeling/token_classification.py",
"text/data/question_answering.py",
"text/modeling/question_answering.py",
"text/data/seq2seq/core.py",
"text/modeling/seq2seq/core.py",
"text/data/seq2seq/summarization.py",
"text/modeling/seq2seq/summarization.py",
"text/data/seq2seq/translation.py",
"text/modeling/seq2seq/translation.py",
"examples/text/high_level_api.py",
"examples/text/glue.py",
"examples/text/glue_low_level_api.py",
"examples/text/multilabel_classification.py",
"examples/text/causal_lm_gpt2.py"]
doc_url = "https://ohmeow.github.io/blurr/"
git_url = "https://github.com/ohmeow/blurr/tree/master/"
def custom_doc_links(name): return None
| 0.547948 | 0.411702 |
import math
import pandas as pd
import os
class BaseGeoCalculations():
@staticmethod
def __getLength(lon_max, lon_min):
return lon_max - lon_min
@staticmethod
def __getWidth(lat_max, lat_min):
return lat_max - lat_min
@staticmethod
# Returns a tuple with the side length and width in grades.
def __getLengthWidth(lon_max, lon_min, lat_max, lat_min):
side_length = BaseGeoCalculations.__getLength(lon_max, lon_min)
side_width = BaseGeoCalculations.__getWidth(lat_max, lat_min)
return (side_length, side_width)
@staticmethod
def getCenter(lon_max, lon_min, lat_max, lat_min):
side_length, side_width = BaseGeoCalculations.__getLengthWidth(
lon_max, lon_min, lat_max, lat_min)
lon_center = side_length/2 + lon_min
lat_center = side_length/2 + lat_min
return {"lon_center": lon_center, "lat_center": lat_center}
@staticmethod
def addBuffer(lon_max, lon_min, lat_max, lat_min):
# Calculates new vertices for the map adding a buffer of 1/5 of the length
side_length, side_width = BaseGeoCalculations.__getLengthWidth(
lon_max, lon_min, lat_max, lat_min)
old_vertices = [lon_max, lon_min, lat_max, lat_min]
buffer_x = side_length/5
buffer_y = side_width/5
new_lon_max = lon_max + buffer_x
new_lon_min = lon_min - buffer_x
new_lat_max = lat_max + buffer_y
new_lat_min = lat_min - buffer_y
return {"lon_max": new_lon_max, "lon_min": new_lon_min,
"lat_max": new_lat_max, "lat_min": new_lat_min}
class MapParameters(BaseGeoCalculations):
directory = os.getcwd()
path_csv = os.path.join(directory, "ACExpansion", "utils", "AC_american_records_csv.csv")
data = pd.read_csv(path_csv)
def __init__(self, country):
super().__init__()
self.country = country
self.data_country = self.filter_data_by_country()
def filter_data_by_country(self):
data_country = MapParameters.data[
MapParameters.data.country==self.country]
return data_country
def getVertices(self):
lon_max = self.data_country["longitude"].max()
lon_min = self.data_country["longitude"].min()
lat_max = self.data_country["latitude"].max()
lat_min = self.data_country["latitude"].min()
return {"lon_max":lon_max, "lon_min":lon_min,
"lat_max":lat_max, "lat_min":lat_min}
def getMapParameters(self):
vertices = self.getVertices()
buffer_coord = self.addBuffer(**vertices)
map_center = self.getCenter(**buffer_coord)
return {**map_center, **buffer_coord, "country": self.country}
|
AcridotheresCristatellusExpansion/ACExpansion/utils/utils_get_map_parameters.py
|
import math
import pandas as pd
import os
class BaseGeoCalculations():
@staticmethod
def __getLength(lon_max, lon_min):
return lon_max - lon_min
@staticmethod
def __getWidth(lat_max, lat_min):
return lat_max - lat_min
@staticmethod
# Returns a tuple with the side length and width in grades.
def __getLengthWidth(lon_max, lon_min, lat_max, lat_min):
side_length = BaseGeoCalculations.__getLength(lon_max, lon_min)
side_width = BaseGeoCalculations.__getWidth(lat_max, lat_min)
return (side_length, side_width)
@staticmethod
def getCenter(lon_max, lon_min, lat_max, lat_min):
side_length, side_width = BaseGeoCalculations.__getLengthWidth(
lon_max, lon_min, lat_max, lat_min)
lon_center = side_length/2 + lon_min
lat_center = side_length/2 + lat_min
return {"lon_center": lon_center, "lat_center": lat_center}
@staticmethod
def addBuffer(lon_max, lon_min, lat_max, lat_min):
# Calculates new vertices for the map adding a buffer of 1/5 of the length
side_length, side_width = BaseGeoCalculations.__getLengthWidth(
lon_max, lon_min, lat_max, lat_min)
old_vertices = [lon_max, lon_min, lat_max, lat_min]
buffer_x = side_length/5
buffer_y = side_width/5
new_lon_max = lon_max + buffer_x
new_lon_min = lon_min - buffer_x
new_lat_max = lat_max + buffer_y
new_lat_min = lat_min - buffer_y
return {"lon_max": new_lon_max, "lon_min": new_lon_min,
"lat_max": new_lat_max, "lat_min": new_lat_min}
class MapParameters(BaseGeoCalculations):
directory = os.getcwd()
path_csv = os.path.join(directory, "ACExpansion", "utils", "AC_american_records_csv.csv")
data = pd.read_csv(path_csv)
def __init__(self, country):
super().__init__()
self.country = country
self.data_country = self.filter_data_by_country()
def filter_data_by_country(self):
data_country = MapParameters.data[
MapParameters.data.country==self.country]
return data_country
def getVertices(self):
lon_max = self.data_country["longitude"].max()
lon_min = self.data_country["longitude"].min()
lat_max = self.data_country["latitude"].max()
lat_min = self.data_country["latitude"].min()
return {"lon_max":lon_max, "lon_min":lon_min,
"lat_max":lat_max, "lat_min":lat_min}
def getMapParameters(self):
vertices = self.getVertices()
buffer_coord = self.addBuffer(**vertices)
map_center = self.getCenter(**buffer_coord)
return {**map_center, **buffer_coord, "country": self.country}
| 0.72952 | 0.193471 |
import sys
import importlib
def _get_deps_info():
"""Overview of the installed version of main dependencies
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"pip",
"setuptools",
"imblearn",
"sklearn",
"numpy",
"scipy",
"Cython",
"pandas",
"keras",
"tensorflow",
"joblib",
]
def get_version(module):
return module.__version__
deps_info = {}
for modname in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
except ImportError:
deps_info[modname] = None
return deps_info
def show_versions(github=False):
"""Print debugging information.
.. versionadded:: 0.5
Parameters
----------
github : bool,
If true, wrap system info with GitHub markup.
"""
from sklearn.utils._show_versions import _get_sys_info
_sys_info = _get_sys_info()
_deps_info = _get_deps_info()
_github_markup = (
"<details>"
"<summary>System, Dependency Information</summary>\n\n"
"**System Information**\n\n"
"{0}\n"
"**Python Dependencies**\n\n"
"{1}\n"
"</details>"
)
if github:
_sys_markup = ""
_deps_markup = ""
for k, stat in _sys_info.items():
_sys_markup += f"* {k:<10}: `{stat}`\n"
for k, stat in _deps_info.items():
_deps_markup += f"* {k:<10}: `{stat}`\n"
print(_github_markup.format(_sys_markup, _deps_markup))
else:
print("\nSystem:")
for k, stat in _sys_info.items():
print(f"{k:>11}: {stat}")
print("\nPython dependencies:")
for k, stat in _deps_info.items():
print(f"{k:>11}: {stat}")
|
imblearn/utils/_show_versions.py
|
import sys
import importlib
def _get_deps_info():
"""Overview of the installed version of main dependencies
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"pip",
"setuptools",
"imblearn",
"sklearn",
"numpy",
"scipy",
"Cython",
"pandas",
"keras",
"tensorflow",
"joblib",
]
def get_version(module):
return module.__version__
deps_info = {}
for modname in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
except ImportError:
deps_info[modname] = None
return deps_info
def show_versions(github=False):
"""Print debugging information.
.. versionadded:: 0.5
Parameters
----------
github : bool,
If true, wrap system info with GitHub markup.
"""
from sklearn.utils._show_versions import _get_sys_info
_sys_info = _get_sys_info()
_deps_info = _get_deps_info()
_github_markup = (
"<details>"
"<summary>System, Dependency Information</summary>\n\n"
"**System Information**\n\n"
"{0}\n"
"**Python Dependencies**\n\n"
"{1}\n"
"</details>"
)
if github:
_sys_markup = ""
_deps_markup = ""
for k, stat in _sys_info.items():
_sys_markup += f"* {k:<10}: `{stat}`\n"
for k, stat in _deps_info.items():
_deps_markup += f"* {k:<10}: `{stat}`\n"
print(_github_markup.format(_sys_markup, _deps_markup))
else:
print("\nSystem:")
for k, stat in _sys_info.items():
print(f"{k:>11}: {stat}")
print("\nPython dependencies:")
for k, stat in _deps_info.items():
print(f"{k:>11}: {stat}")
| 0.437583 | 0.208139 |
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import keras
from framesandoflow import files2frames, images_normalize
class FramesGenerator(keras.utils.Sequence):
"""
Read and yields video frames/optical flow for Keras.model.fit_generator
"""
def __init__(self, sPath,
nBatchSize, nFrames, nHeight, nWidth, nChannels,
liClassesFull=None, bShuffle=True):
"""
Assume directory structure:
... / sPath / class / videoname / frames.jpg
"""
self.nBatchSize = nBatchSize
self.nFrames = nFrames
self.nHeight = nHeight
self.nWidth = nWidth
self.nChannels = nChannels
self.tuXshape = (nFrames, nHeight, nWidth, nChannels)
self.bShuffle = bShuffle
# retrieve all videos = frame directories
a = []
f = os.listdir(sPath)
for item in f:
inner = sPath + "/" + item
inner_path = os.listdir(inner)
for item2 in inner_path:
l = inner + "/" + item2
a.append(l)
self.dfVideos = pd.DataFrame(sorted(a), columns=["sFrameDir"])
self.nSamples = len(self.dfVideos)
if self.nSamples == 0: raise ValueError("Found no frame directories files in " + sPath)
print("Detected %d samples in %s ..." % (self.nSamples, sPath))
# extract class labels from path
seLabels = self.dfVideos.sFrameDir.apply(lambda s: s.split("/")[-2])
self.dfVideos.loc[:, "sLabel"] = seLabels
# extract list of unique classes from all detected labels
self.liClasses = sorted(list(self.dfVideos.sLabel.unique()))
self.nClasses = len(self.liClasses)
# encode labels
trLabelEncoder = LabelEncoder() # creates instace of LabelEncoder()
trLabelEncoder.fit(self.liClasses) # encodes label into class value from 0 to nclasses-1
self.dfVideos.loc[:, "nLabel"] = trLabelEncoder.transform(
self.dfVideos.sLabel) # convert textual labels into numerical encoded labels
# inverse transform does the opposite
self.on_epoch_end()
return
def __len__(self):
"""
Denotes the number of batches per epoch
"""
return int(np.ceil(self.nSamples / self.nBatchSize))
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(self.nSamples)
if self.bShuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, nStep):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[nStep * self.nBatchSize:(nStep + 1) * self.nBatchSize]
# get batch of videos
dfVideosBatch = self.dfVideos.loc[indexes, :]
nBatchSize = len(dfVideosBatch)
# initialize arrays
arX = np.empty((nBatchSize,) + self.tuXshape, dtype=float)
arY = np.empty((nBatchSize), dtype=int)
# Generate data
for i in range(nBatchSize):
# generate data for single video(frames)
arX[i,], arY[i] = self.__data_generation(dfVideosBatch.iloc[i, :])
# onehot the labels
return arX, keras.utils.to_categorical(arY, num_classes=self.nClasses)
def __data_generation(self, seVideo):
"""
Returns frames for 1 video, including normalizing & preprocessing
"""
# Get the frames from disc
ar_nFrames = files2frames(seVideo.sFrameDir)
# only use the first nChannels (typically 3, but maybe 2 for optical flow)
ar_nFrames = ar_nFrames[..., 0:self.nChannels]
ar_fFrames = images_normalize(ar_nFrames, self.nFrames, self.nHeight, self.nWidth, bRescale=True)
return ar_fFrames, seVideo.nLabel
def data_generation(self, seVideo):
return self.__data_generation(seVideo)
class VideoClasses():
"""
Loads the video classes (incl descriptions) from a csv file
"""
def __init__(self, sClassFile: str):
self.dfClass = pd.read_csv(sClassFile)
self.dfClass = self.dfClass.sort_values("sClass").reset_index(drop=True)
self.liClasses = list(self.dfClass.sClass)
self.nClasses = len(self.dfClass)
print("Loaded %d classes from %s" % (self.nClasses, sClassFile))
return
|
datagenerator.py
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import keras
from framesandoflow import files2frames, images_normalize
class FramesGenerator(keras.utils.Sequence):
"""
Read and yields video frames/optical flow for Keras.model.fit_generator
"""
def __init__(self, sPath,
nBatchSize, nFrames, nHeight, nWidth, nChannels,
liClassesFull=None, bShuffle=True):
"""
Assume directory structure:
... / sPath / class / videoname / frames.jpg
"""
self.nBatchSize = nBatchSize
self.nFrames = nFrames
self.nHeight = nHeight
self.nWidth = nWidth
self.nChannels = nChannels
self.tuXshape = (nFrames, nHeight, nWidth, nChannels)
self.bShuffle = bShuffle
# retrieve all videos = frame directories
a = []
f = os.listdir(sPath)
for item in f:
inner = sPath + "/" + item
inner_path = os.listdir(inner)
for item2 in inner_path:
l = inner + "/" + item2
a.append(l)
self.dfVideos = pd.DataFrame(sorted(a), columns=["sFrameDir"])
self.nSamples = len(self.dfVideos)
if self.nSamples == 0: raise ValueError("Found no frame directories files in " + sPath)
print("Detected %d samples in %s ..." % (self.nSamples, sPath))
# extract class labels from path
seLabels = self.dfVideos.sFrameDir.apply(lambda s: s.split("/")[-2])
self.dfVideos.loc[:, "sLabel"] = seLabels
# extract list of unique classes from all detected labels
self.liClasses = sorted(list(self.dfVideos.sLabel.unique()))
self.nClasses = len(self.liClasses)
# encode labels
trLabelEncoder = LabelEncoder() # creates instace of LabelEncoder()
trLabelEncoder.fit(self.liClasses) # encodes label into class value from 0 to nclasses-1
self.dfVideos.loc[:, "nLabel"] = trLabelEncoder.transform(
self.dfVideos.sLabel) # convert textual labels into numerical encoded labels
# inverse transform does the opposite
self.on_epoch_end()
return
def __len__(self):
"""
Denotes the number of batches per epoch
"""
return int(np.ceil(self.nSamples / self.nBatchSize))
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(self.nSamples)
if self.bShuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, nStep):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[nStep * self.nBatchSize:(nStep + 1) * self.nBatchSize]
# get batch of videos
dfVideosBatch = self.dfVideos.loc[indexes, :]
nBatchSize = len(dfVideosBatch)
# initialize arrays
arX = np.empty((nBatchSize,) + self.tuXshape, dtype=float)
arY = np.empty((nBatchSize), dtype=int)
# Generate data
for i in range(nBatchSize):
# generate data for single video(frames)
arX[i,], arY[i] = self.__data_generation(dfVideosBatch.iloc[i, :])
# onehot the labels
return arX, keras.utils.to_categorical(arY, num_classes=self.nClasses)
def __data_generation(self, seVideo):
"""
Returns frames for 1 video, including normalizing & preprocessing
"""
# Get the frames from disc
ar_nFrames = files2frames(seVideo.sFrameDir)
# only use the first nChannels (typically 3, but maybe 2 for optical flow)
ar_nFrames = ar_nFrames[..., 0:self.nChannels]
ar_fFrames = images_normalize(ar_nFrames, self.nFrames, self.nHeight, self.nWidth, bRescale=True)
return ar_fFrames, seVideo.nLabel
def data_generation(self, seVideo):
return self.__data_generation(seVideo)
class VideoClasses():
"""
Loads the video classes (incl descriptions) from a csv file
"""
def __init__(self, sClassFile: str):
self.dfClass = pd.read_csv(sClassFile)
self.dfClass = self.dfClass.sort_values("sClass").reset_index(drop=True)
self.liClasses = list(self.dfClass.sClass)
self.nClasses = len(self.dfClass)
print("Loaded %d classes from %s" % (self.nClasses, sClassFile))
return
| 0.735262 | 0.29726 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GeoLocation.lat'
db.alter_column(u'faver_geolocation', 'lat', self.gf('django.db.models.fields.CharField')(max_length=15))
# Changing field 'GeoLocation.long'
db.alter_column(u'faver_geolocation', 'long', self.gf('django.db.models.fields.CharField')(max_length=15))
def backwards(self, orm):
# Changing field 'GeoLocation.lat'
db.alter_column(u'faver_geolocation', 'lat', self.gf('django.db.models.fields.CharField')(max_length=10))
# Changing field 'GeoLocation.long'
db.alter_column(u'faver_geolocation', 'long', self.gf('django.db.models.fields.CharField')(max_length=10))
models = {
u'faver.geolocation': {
'Meta': {'object_name': 'GeoLocation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'long': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'faver.route': {
'Meta': {'object_name': 'Route'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nr': ('django.db.models.fields.IntegerField', [], {}),
'stops': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['faver.StationStop']", 'symmetrical': 'False'})
},
u'faver.station': {
'Meta': {'object_name': 'Station'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rtec_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'faver.stationstop': {
'Meta': {'object_name': 'StationStop'},
'day_type': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['faver.GeoLocation']", 'null': 'True', 'blank': 'True'}),
'order_nr': ('django.db.models.fields.IntegerField', [], {}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['faver.Station']"})
},
u'faver.stoptime': {
'Meta': {'object_name': 'StopTime'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stopstime'", 'null': 'True', 'to': u"orm['faver.StationStop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['faver']
|
trolly/faver/migrations/0007_auto__chg_field_geolocation_lat__chg_field_geolocation_long.py
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GeoLocation.lat'
db.alter_column(u'faver_geolocation', 'lat', self.gf('django.db.models.fields.CharField')(max_length=15))
# Changing field 'GeoLocation.long'
db.alter_column(u'faver_geolocation', 'long', self.gf('django.db.models.fields.CharField')(max_length=15))
def backwards(self, orm):
# Changing field 'GeoLocation.lat'
db.alter_column(u'faver_geolocation', 'lat', self.gf('django.db.models.fields.CharField')(max_length=10))
# Changing field 'GeoLocation.long'
db.alter_column(u'faver_geolocation', 'long', self.gf('django.db.models.fields.CharField')(max_length=10))
models = {
u'faver.geolocation': {
'Meta': {'object_name': 'GeoLocation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'long': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'faver.route': {
'Meta': {'object_name': 'Route'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nr': ('django.db.models.fields.IntegerField', [], {}),
'stops': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['faver.StationStop']", 'symmetrical': 'False'})
},
u'faver.station': {
'Meta': {'object_name': 'Station'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rtec_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'faver.stationstop': {
'Meta': {'object_name': 'StationStop'},
'day_type': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['faver.GeoLocation']", 'null': 'True', 'blank': 'True'}),
'order_nr': ('django.db.models.fields.IntegerField', [], {}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['faver.Station']"})
},
u'faver.stoptime': {
'Meta': {'object_name': 'StopTime'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'station': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stopstime'", 'null': 'True', 'to': u"orm['faver.StationStop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['faver']
| 0.502686 | 0.090574 |
import base64
import json
from typing import Any, Dict, List
from google.protobuf.json_format import Parse, ParseDict
from cosmpy.common.rest_client import RestClient
from cosmpy.protos.cosmos.crypto.secp256k1.keys_pb2 import ( # noqa: F401 # pylint: disable=unused-import
PubKey as ProtoPubKey,
)
from cosmpy.protos.cosmos.tx.v1beta1.service_pb2 import (
BroadcastTxRequest,
BroadcastTxResponse,
GetTxRequest,
GetTxResponse,
GetTxsEventRequest,
GetTxsEventResponse,
SimulateRequest,
SimulateResponse,
)
from cosmpy.protos.cosmwasm.wasm.v1beta1.tx_pb2 import ( # noqa: F401 # pylint: disable=unused-import
MsgExecuteContract,
MsgInstantiateContract,
MsgStoreCode,
)
from cosmpy.tx.interface import TxInterface
# Unused imports are required to make sure that related types get generated - Parse and ParseDict fail without them
class TxRestClient(TxInterface):
"""Tx REST client."""
API_URL = "/cosmos/tx/v1beta1"
def __init__(self, rest_client: RestClient) -> None:
"""
Create a Tx rest client
:param rest_client: RestClient api
"""
self.rest_client = rest_client
def Simulate(self, request: SimulateRequest) -> SimulateResponse:
"""
Simulate executing a transaction to estimate gas usage.
:param request: SimulateRequest
:return: SimulateResponse
"""
response = self.rest_client.get(
f"{self.API_URL}/simulate",
)
return Parse(response, SimulateResponse())
def GetTx(self, request: GetTxRequest) -> GetTxResponse:
"""
GetTx fetches a tx by hash.
:param request: GetTxRequest
:return: GetTxResponse
"""
response = self.rest_client.get(f"{self.API_URL}/txs/{request.hash}")
# JSON in JSON in case of CosmWasm messages workaround
dict_response = json.loads(response)
self._fix_messages(dict_response["tx"]["body"]["messages"])
self._fix_messages(dict_response["tx_response"]["tx"]["body"]["messages"])
return ParseDict(dict_response, GetTxResponse())
def BroadcastTx(self, request: BroadcastTxRequest) -> BroadcastTxResponse:
"""
BroadcastTx broadcast transaction.
:param request: BroadcastTxRequest
:return: BroadcastTxResponse
"""
response = self.rest_client.post(f"{self.API_URL}/txs", request)
return Parse(response, BroadcastTxResponse())
def GetTxsEvent(self, request: GetTxsEventRequest) -> GetTxsEventResponse:
"""
GetTxsEvent fetches txs by event.
:param request: GetTxsEventRequest
:return: GetTxsEventResponse
"""
response = self.rest_client.get(f"{self.API_URL}/txs", request)
# JSON in JSON in case of CosmWasm messages workaround
dict_response = json.loads(response)
for tx in dict_response["txs"]:
self._fix_messages(tx["body"]["messages"])
for tx_response in dict_response["tx_responses"]:
self._fix_messages(tx_response["tx"]["body"]["messages"])
return ParseDict(dict_response, GetTxsEventResponse())
@staticmethod
def _fix_messages(messages: List[Dict[str, Any]]):
"""
Fix for REST api response in case of CosmWasm messages contains dict instead of base64 encoded string
:param messages: List of message in Tx response
"""
for message in messages:
if message["@type"] == "/cosmwasm.wasm.v1.MsgInstantiateContract":
message["msg"] = base64.b64encode(
json.dumps(message["msg"]).encode("UTF8")
).decode()
if message["@type"] == "/cosmwasm.wasm.v1.MsgExecuteContract":
message["msg"] = base64.b64encode(
json.dumps(message["msg"]).encode("UTF8")
).decode()
|
cosmpy/tx/rest_client.py
|
import base64
import json
from typing import Any, Dict, List
from google.protobuf.json_format import Parse, ParseDict
from cosmpy.common.rest_client import RestClient
from cosmpy.protos.cosmos.crypto.secp256k1.keys_pb2 import ( # noqa: F401 # pylint: disable=unused-import
PubKey as ProtoPubKey,
)
from cosmpy.protos.cosmos.tx.v1beta1.service_pb2 import (
BroadcastTxRequest,
BroadcastTxResponse,
GetTxRequest,
GetTxResponse,
GetTxsEventRequest,
GetTxsEventResponse,
SimulateRequest,
SimulateResponse,
)
from cosmpy.protos.cosmwasm.wasm.v1beta1.tx_pb2 import ( # noqa: F401 # pylint: disable=unused-import
MsgExecuteContract,
MsgInstantiateContract,
MsgStoreCode,
)
from cosmpy.tx.interface import TxInterface
# Unused imports are required to make sure that related types get generated - Parse and ParseDict fail without them
class TxRestClient(TxInterface):
"""Tx REST client."""
API_URL = "/cosmos/tx/v1beta1"
def __init__(self, rest_client: RestClient) -> None:
"""
Create a Tx rest client
:param rest_client: RestClient api
"""
self.rest_client = rest_client
def Simulate(self, request: SimulateRequest) -> SimulateResponse:
"""
Simulate executing a transaction to estimate gas usage.
:param request: SimulateRequest
:return: SimulateResponse
"""
response = self.rest_client.get(
f"{self.API_URL}/simulate",
)
return Parse(response, SimulateResponse())
def GetTx(self, request: GetTxRequest) -> GetTxResponse:
"""
GetTx fetches a tx by hash.
:param request: GetTxRequest
:return: GetTxResponse
"""
response = self.rest_client.get(f"{self.API_URL}/txs/{request.hash}")
# JSON in JSON in case of CosmWasm messages workaround
dict_response = json.loads(response)
self._fix_messages(dict_response["tx"]["body"]["messages"])
self._fix_messages(dict_response["tx_response"]["tx"]["body"]["messages"])
return ParseDict(dict_response, GetTxResponse())
def BroadcastTx(self, request: BroadcastTxRequest) -> BroadcastTxResponse:
"""
BroadcastTx broadcast transaction.
:param request: BroadcastTxRequest
:return: BroadcastTxResponse
"""
response = self.rest_client.post(f"{self.API_URL}/txs", request)
return Parse(response, BroadcastTxResponse())
def GetTxsEvent(self, request: GetTxsEventRequest) -> GetTxsEventResponse:
"""
GetTxsEvent fetches txs by event.
:param request: GetTxsEventRequest
:return: GetTxsEventResponse
"""
response = self.rest_client.get(f"{self.API_URL}/txs", request)
# JSON in JSON in case of CosmWasm messages workaround
dict_response = json.loads(response)
for tx in dict_response["txs"]:
self._fix_messages(tx["body"]["messages"])
for tx_response in dict_response["tx_responses"]:
self._fix_messages(tx_response["tx"]["body"]["messages"])
return ParseDict(dict_response, GetTxsEventResponse())
@staticmethod
def _fix_messages(messages: List[Dict[str, Any]]):
"""
Fix for REST api response in case of CosmWasm messages contains dict instead of base64 encoded string
:param messages: List of message in Tx response
"""
for message in messages:
if message["@type"] == "/cosmwasm.wasm.v1.MsgInstantiateContract":
message["msg"] = base64.b64encode(
json.dumps(message["msg"]).encode("UTF8")
).decode()
if message["@type"] == "/cosmwasm.wasm.v1.MsgExecuteContract":
message["msg"] = base64.b64encode(
json.dumps(message["msg"]).encode("UTF8")
).decode()
| 0.782579 | 0.077204 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_design_objects import SizingSystem
log = logging.getLogger(__name__)
class TestSizingSystem(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_sizingsystem(self):
pyidf.validation_level = ValidationLevel.error
obj = SizingSystem()
# object-list
var_airloop_name = "object-list|AirLoop Name"
obj.airloop_name = var_airloop_name
# alpha
var_type_of_load_to_size_on = "Sensible"
obj.type_of_load_to_size_on = var_type_of_load_to_size_on
# real
var_design_outdoor_air_flow_rate = 0.0
obj.design_outdoor_air_flow_rate = var_design_outdoor_air_flow_rate
# real
var_central_heating_maximum_system_air_flow_ratio = 0.5
obj.central_heating_maximum_system_air_flow_ratio = var_central_heating_maximum_system_air_flow_ratio
# real
var_preheat_design_temperature = 5.5
obj.preheat_design_temperature = var_preheat_design_temperature
# real
var_preheat_design_humidity_ratio = 6.6
obj.preheat_design_humidity_ratio = var_preheat_design_humidity_ratio
# real
var_precool_design_temperature = 7.7
obj.precool_design_temperature = var_precool_design_temperature
# real
var_precool_design_humidity_ratio = 8.8
obj.precool_design_humidity_ratio = var_precool_design_humidity_ratio
# real
var_central_cooling_design_supply_air_temperature = 9.9
obj.central_cooling_design_supply_air_temperature = var_central_cooling_design_supply_air_temperature
# real
var_central_heating_design_supply_air_temperature = 10.1
obj.central_heating_design_supply_air_temperature = var_central_heating_design_supply_air_temperature
# alpha
var_type_of_zone_sum_to_use = "Coincident"
obj.type_of_zone_sum_to_use = var_type_of_zone_sum_to_use
# alpha
var_a_100_outdoor_air_in_cooling = "Yes"
obj.a_100_outdoor_air_in_cooling = var_a_100_outdoor_air_in_cooling
# alpha
var_a_100_outdoor_air_in_heating = "Yes"
obj.a_100_outdoor_air_in_heating = var_a_100_outdoor_air_in_heating
# real
var_central_cooling_design_supply_air_humidity_ratio = 14.14
obj.central_cooling_design_supply_air_humidity_ratio = var_central_cooling_design_supply_air_humidity_ratio
# real
var_central_heating_design_supply_air_humidity_ratio = 15.15
obj.central_heating_design_supply_air_humidity_ratio = var_central_heating_design_supply_air_humidity_ratio
# alpha
var_cooling_supply_air_flow_rate_method = "Flow/System"
obj.cooling_supply_air_flow_rate_method = var_cooling_supply_air_flow_rate_method
# real
var_cooling_supply_air_flow_rate = 0.0
obj.cooling_supply_air_flow_rate = var_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_floor_area = 0.0
obj.cooling_supply_air_flow_rate_per_floor_area = var_cooling_supply_air_flow_rate_per_floor_area
# real
var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.cooling_fraction_of_autosized_cooling_supply_air_flow_rate = var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_unit_cooling_capacity = 0.0
obj.cooling_supply_air_flow_rate_per_unit_cooling_capacity = var_cooling_supply_air_flow_rate_per_unit_cooling_capacity
# alpha
var_heating_supply_air_flow_rate_method = "Flow/System"
obj.heating_supply_air_flow_rate_method = var_heating_supply_air_flow_rate_method
# real
var_heating_supply_air_flow_rate = 0.0
obj.heating_supply_air_flow_rate = var_heating_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_floor_area = 0.0
obj.heating_supply_air_flow_rate_per_floor_area = var_heating_supply_air_flow_rate_per_floor_area
# real
var_heating_fraction_of_autosized_heating_supply_air_flow_rate = 0.0
obj.heating_fraction_of_autosized_heating_supply_air_flow_rate = var_heating_fraction_of_autosized_heating_supply_air_flow_rate
# real
var_heating_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.heating_fraction_of_autosized_cooling_supply_air_flow_rate = var_heating_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_unit_heating_capacity = 0.0
obj.heating_supply_air_flow_rate_per_unit_heating_capacity = var_heating_supply_air_flow_rate_per_unit_heating_capacity
# alpha
var_system_outdoor_air_method = "ZoneSum"
obj.system_outdoor_air_method = var_system_outdoor_air_method
# real
var_zone_maximum_outdoor_air_fraction = 0.0001
obj.zone_maximum_outdoor_air_fraction = var_zone_maximum_outdoor_air_fraction
# alpha
var_cooling_design_capacity_method = "None"
obj.cooling_design_capacity_method = var_cooling_design_capacity_method
# real
var_cooling_design_capacity = 0.0
obj.cooling_design_capacity = var_cooling_design_capacity
# real
var_cooling_design_capacity_per_floor_area = 0.0
obj.cooling_design_capacity_per_floor_area = var_cooling_design_capacity_per_floor_area
# real
var_fraction_of_autosized_cooling_design_capacity = 0.0
obj.fraction_of_autosized_cooling_design_capacity = var_fraction_of_autosized_cooling_design_capacity
# alpha
var_heating_design_capacity_method = "None"
obj.heating_design_capacity_method = var_heating_design_capacity_method
# real
var_heating_design_capacity = 0.0
obj.heating_design_capacity = var_heating_design_capacity
# real
var_heating_design_capacity_per_floor_area = 0.0
obj.heating_design_capacity_per_floor_area = var_heating_design_capacity_per_floor_area
# real
var_fraction_of_autosized_heating_design_capacity = 0.0
obj.fraction_of_autosized_heating_design_capacity = var_fraction_of_autosized_heating_design_capacity
# alpha
var_central_cooling_capacity_control_method = "VAV"
obj.central_cooling_capacity_control_method = var_central_cooling_capacity_control_method
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.sizingsystems[0].airloop_name, var_airloop_name)
self.assertEqual(idf2.sizingsystems[0].type_of_load_to_size_on, var_type_of_load_to_size_on)
self.assertAlmostEqual(idf2.sizingsystems[0].design_outdoor_air_flow_rate, var_design_outdoor_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_maximum_system_air_flow_ratio, var_central_heating_maximum_system_air_flow_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].preheat_design_temperature, var_preheat_design_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].preheat_design_humidity_ratio, var_preheat_design_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].precool_design_temperature, var_precool_design_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].precool_design_humidity_ratio, var_precool_design_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].central_cooling_design_supply_air_temperature, var_central_cooling_design_supply_air_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_design_supply_air_temperature, var_central_heating_design_supply_air_temperature)
self.assertEqual(idf2.sizingsystems[0].type_of_zone_sum_to_use, var_type_of_zone_sum_to_use)
self.assertEqual(idf2.sizingsystems[0].a_100_outdoor_air_in_cooling, var_a_100_outdoor_air_in_cooling)
self.assertEqual(idf2.sizingsystems[0].a_100_outdoor_air_in_heating, var_a_100_outdoor_air_in_heating)
self.assertAlmostEqual(idf2.sizingsystems[0].central_cooling_design_supply_air_humidity_ratio, var_central_cooling_design_supply_air_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_design_supply_air_humidity_ratio, var_central_heating_design_supply_air_humidity_ratio)
self.assertEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_method, var_cooling_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate, var_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_per_floor_area, var_cooling_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_fraction_of_autosized_cooling_supply_air_flow_rate, var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_per_unit_cooling_capacity, var_cooling_supply_air_flow_rate_per_unit_cooling_capacity)
self.assertEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_method, var_heating_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate, var_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_per_floor_area, var_heating_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_fraction_of_autosized_heating_supply_air_flow_rate, var_heating_fraction_of_autosized_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_fraction_of_autosized_cooling_supply_air_flow_rate, var_heating_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_per_unit_heating_capacity, var_heating_supply_air_flow_rate_per_unit_heating_capacity)
self.assertEqual(idf2.sizingsystems[0].system_outdoor_air_method, var_system_outdoor_air_method)
self.assertAlmostEqual(idf2.sizingsystems[0].zone_maximum_outdoor_air_fraction, var_zone_maximum_outdoor_air_fraction)
self.assertEqual(idf2.sizingsystems[0].cooling_design_capacity_method, var_cooling_design_capacity_method)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_design_capacity, var_cooling_design_capacity)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_design_capacity_per_floor_area, var_cooling_design_capacity_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].fraction_of_autosized_cooling_design_capacity, var_fraction_of_autosized_cooling_design_capacity)
self.assertEqual(idf2.sizingsystems[0].heating_design_capacity_method, var_heating_design_capacity_method)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_design_capacity, var_heating_design_capacity)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_design_capacity_per_floor_area, var_heating_design_capacity_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].fraction_of_autosized_heating_design_capacity, var_fraction_of_autosized_heating_design_capacity)
self.assertEqual(idf2.sizingsystems[0].central_cooling_capacity_control_method, var_central_cooling_capacity_control_method)
|
tests/test_sizingsystem.py
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_design_objects import SizingSystem
log = logging.getLogger(__name__)
class TestSizingSystem(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_sizingsystem(self):
pyidf.validation_level = ValidationLevel.error
obj = SizingSystem()
# object-list
var_airloop_name = "object-list|AirLoop Name"
obj.airloop_name = var_airloop_name
# alpha
var_type_of_load_to_size_on = "Sensible"
obj.type_of_load_to_size_on = var_type_of_load_to_size_on
# real
var_design_outdoor_air_flow_rate = 0.0
obj.design_outdoor_air_flow_rate = var_design_outdoor_air_flow_rate
# real
var_central_heating_maximum_system_air_flow_ratio = 0.5
obj.central_heating_maximum_system_air_flow_ratio = var_central_heating_maximum_system_air_flow_ratio
# real
var_preheat_design_temperature = 5.5
obj.preheat_design_temperature = var_preheat_design_temperature
# real
var_preheat_design_humidity_ratio = 6.6
obj.preheat_design_humidity_ratio = var_preheat_design_humidity_ratio
# real
var_precool_design_temperature = 7.7
obj.precool_design_temperature = var_precool_design_temperature
# real
var_precool_design_humidity_ratio = 8.8
obj.precool_design_humidity_ratio = var_precool_design_humidity_ratio
# real
var_central_cooling_design_supply_air_temperature = 9.9
obj.central_cooling_design_supply_air_temperature = var_central_cooling_design_supply_air_temperature
# real
var_central_heating_design_supply_air_temperature = 10.1
obj.central_heating_design_supply_air_temperature = var_central_heating_design_supply_air_temperature
# alpha
var_type_of_zone_sum_to_use = "Coincident"
obj.type_of_zone_sum_to_use = var_type_of_zone_sum_to_use
# alpha
var_a_100_outdoor_air_in_cooling = "Yes"
obj.a_100_outdoor_air_in_cooling = var_a_100_outdoor_air_in_cooling
# alpha
var_a_100_outdoor_air_in_heating = "Yes"
obj.a_100_outdoor_air_in_heating = var_a_100_outdoor_air_in_heating
# real
var_central_cooling_design_supply_air_humidity_ratio = 14.14
obj.central_cooling_design_supply_air_humidity_ratio = var_central_cooling_design_supply_air_humidity_ratio
# real
var_central_heating_design_supply_air_humidity_ratio = 15.15
obj.central_heating_design_supply_air_humidity_ratio = var_central_heating_design_supply_air_humidity_ratio
# alpha
var_cooling_supply_air_flow_rate_method = "Flow/System"
obj.cooling_supply_air_flow_rate_method = var_cooling_supply_air_flow_rate_method
# real
var_cooling_supply_air_flow_rate = 0.0
obj.cooling_supply_air_flow_rate = var_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_floor_area = 0.0
obj.cooling_supply_air_flow_rate_per_floor_area = var_cooling_supply_air_flow_rate_per_floor_area
# real
var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.cooling_fraction_of_autosized_cooling_supply_air_flow_rate = var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_cooling_supply_air_flow_rate_per_unit_cooling_capacity = 0.0
obj.cooling_supply_air_flow_rate_per_unit_cooling_capacity = var_cooling_supply_air_flow_rate_per_unit_cooling_capacity
# alpha
var_heating_supply_air_flow_rate_method = "Flow/System"
obj.heating_supply_air_flow_rate_method = var_heating_supply_air_flow_rate_method
# real
var_heating_supply_air_flow_rate = 0.0
obj.heating_supply_air_flow_rate = var_heating_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_floor_area = 0.0
obj.heating_supply_air_flow_rate_per_floor_area = var_heating_supply_air_flow_rate_per_floor_area
# real
var_heating_fraction_of_autosized_heating_supply_air_flow_rate = 0.0
obj.heating_fraction_of_autosized_heating_supply_air_flow_rate = var_heating_fraction_of_autosized_heating_supply_air_flow_rate
# real
var_heating_fraction_of_autosized_cooling_supply_air_flow_rate = 0.0
obj.heating_fraction_of_autosized_cooling_supply_air_flow_rate = var_heating_fraction_of_autosized_cooling_supply_air_flow_rate
# real
var_heating_supply_air_flow_rate_per_unit_heating_capacity = 0.0
obj.heating_supply_air_flow_rate_per_unit_heating_capacity = var_heating_supply_air_flow_rate_per_unit_heating_capacity
# alpha
var_system_outdoor_air_method = "ZoneSum"
obj.system_outdoor_air_method = var_system_outdoor_air_method
# real
var_zone_maximum_outdoor_air_fraction = 0.0001
obj.zone_maximum_outdoor_air_fraction = var_zone_maximum_outdoor_air_fraction
# alpha
var_cooling_design_capacity_method = "None"
obj.cooling_design_capacity_method = var_cooling_design_capacity_method
# real
var_cooling_design_capacity = 0.0
obj.cooling_design_capacity = var_cooling_design_capacity
# real
var_cooling_design_capacity_per_floor_area = 0.0
obj.cooling_design_capacity_per_floor_area = var_cooling_design_capacity_per_floor_area
# real
var_fraction_of_autosized_cooling_design_capacity = 0.0
obj.fraction_of_autosized_cooling_design_capacity = var_fraction_of_autosized_cooling_design_capacity
# alpha
var_heating_design_capacity_method = "None"
obj.heating_design_capacity_method = var_heating_design_capacity_method
# real
var_heating_design_capacity = 0.0
obj.heating_design_capacity = var_heating_design_capacity
# real
var_heating_design_capacity_per_floor_area = 0.0
obj.heating_design_capacity_per_floor_area = var_heating_design_capacity_per_floor_area
# real
var_fraction_of_autosized_heating_design_capacity = 0.0
obj.fraction_of_autosized_heating_design_capacity = var_fraction_of_autosized_heating_design_capacity
# alpha
var_central_cooling_capacity_control_method = "VAV"
obj.central_cooling_capacity_control_method = var_central_cooling_capacity_control_method
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.sizingsystems[0].airloop_name, var_airloop_name)
self.assertEqual(idf2.sizingsystems[0].type_of_load_to_size_on, var_type_of_load_to_size_on)
self.assertAlmostEqual(idf2.sizingsystems[0].design_outdoor_air_flow_rate, var_design_outdoor_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_maximum_system_air_flow_ratio, var_central_heating_maximum_system_air_flow_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].preheat_design_temperature, var_preheat_design_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].preheat_design_humidity_ratio, var_preheat_design_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].precool_design_temperature, var_precool_design_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].precool_design_humidity_ratio, var_precool_design_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].central_cooling_design_supply_air_temperature, var_central_cooling_design_supply_air_temperature)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_design_supply_air_temperature, var_central_heating_design_supply_air_temperature)
self.assertEqual(idf2.sizingsystems[0].type_of_zone_sum_to_use, var_type_of_zone_sum_to_use)
self.assertEqual(idf2.sizingsystems[0].a_100_outdoor_air_in_cooling, var_a_100_outdoor_air_in_cooling)
self.assertEqual(idf2.sizingsystems[0].a_100_outdoor_air_in_heating, var_a_100_outdoor_air_in_heating)
self.assertAlmostEqual(idf2.sizingsystems[0].central_cooling_design_supply_air_humidity_ratio, var_central_cooling_design_supply_air_humidity_ratio)
self.assertAlmostEqual(idf2.sizingsystems[0].central_heating_design_supply_air_humidity_ratio, var_central_heating_design_supply_air_humidity_ratio)
self.assertEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_method, var_cooling_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate, var_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_per_floor_area, var_cooling_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_fraction_of_autosized_cooling_supply_air_flow_rate, var_cooling_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_supply_air_flow_rate_per_unit_cooling_capacity, var_cooling_supply_air_flow_rate_per_unit_cooling_capacity)
self.assertEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_method, var_heating_supply_air_flow_rate_method)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate, var_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_per_floor_area, var_heating_supply_air_flow_rate_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_fraction_of_autosized_heating_supply_air_flow_rate, var_heating_fraction_of_autosized_heating_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_fraction_of_autosized_cooling_supply_air_flow_rate, var_heating_fraction_of_autosized_cooling_supply_air_flow_rate)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_supply_air_flow_rate_per_unit_heating_capacity, var_heating_supply_air_flow_rate_per_unit_heating_capacity)
self.assertEqual(idf2.sizingsystems[0].system_outdoor_air_method, var_system_outdoor_air_method)
self.assertAlmostEqual(idf2.sizingsystems[0].zone_maximum_outdoor_air_fraction, var_zone_maximum_outdoor_air_fraction)
self.assertEqual(idf2.sizingsystems[0].cooling_design_capacity_method, var_cooling_design_capacity_method)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_design_capacity, var_cooling_design_capacity)
self.assertAlmostEqual(idf2.sizingsystems[0].cooling_design_capacity_per_floor_area, var_cooling_design_capacity_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].fraction_of_autosized_cooling_design_capacity, var_fraction_of_autosized_cooling_design_capacity)
self.assertEqual(idf2.sizingsystems[0].heating_design_capacity_method, var_heating_design_capacity_method)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_design_capacity, var_heating_design_capacity)
self.assertAlmostEqual(idf2.sizingsystems[0].heating_design_capacity_per_floor_area, var_heating_design_capacity_per_floor_area)
self.assertAlmostEqual(idf2.sizingsystems[0].fraction_of_autosized_heating_design_capacity, var_fraction_of_autosized_heating_design_capacity)
self.assertEqual(idf2.sizingsystems[0].central_cooling_capacity_control_method, var_central_cooling_capacity_control_method)
| 0.355663 | 0.141281 |
from typing import Optional
from torch import nn
from torch.nn import functional as F
from ..ff import FF
class SpeechLSTM(nn.Module):
"""A bidirectional LSTM encoder with subsampling for speech features.
The number of LSTM layers is defined by the `layers` argument, i.e.
`1_1_2_2_1_1` denotes 6 LSTM layers where the middle two applies
a subsampling factor of 2 to their inputs. Subsampling in this context
means that every N'th state will be passed to the next layer as input.
Each LSTM layer is followed by a feed-forward projection layer whose
non-linearity is given by the `activ` argument.
Note:
The input tensor should contain samples of equal lengths i.e.
`bucket_by` in training configuration should be set to the acoustic
features modality.
Args:
input_size: Input feature dimensionality.
hidden_size: LSTM hidden state dimensionality.
proj_size: Projection layer size.
activ: Non-linearity to apply to intermediate projection
layers. (Default: 'tanh')
layers: A '_' separated list of integers that defines the subsampling
factor for each LSTM.
dropout: Use dropout (Default: 0.)
Input:
x: A `torch.Tensor` of shape `(n_timesteps, n_samples, input_size)`
Output:
hs: A `torch.Tensor` of shape `(n_timesteps, n_samples, hidden_size * 2)`
that contains encoder hidden states for all timesteps.
mask: `None` since this layer expects all equal frame inputs.
"""
def __init__(self, input_size: int, hidden_size: int, proj_size: int,
layers: str, activ: Optional[str] = 'tanh',
dropout: float = 0.0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.proj_size = proj_size
self.activ = activ
self.layers = [int(i) for i in layers.split('_')]
self.dropout = dropout
self.n_layers = len(self.layers)
# Doubles its size because of concatenation of forw-backw encs
self.ctx_size = self.hidden_size * 2
# Fill 0-vector as <eos> to the end of the frames
self.pad_tuple = (0, 0, 0, 0, 0, 1)
# Projections and LSTMs
self.ffs = nn.ModuleList()
self.lstms = nn.ModuleList()
if self.dropout > 0:
self.do = nn.Dropout(self.dropout)
for i, ss_factor in enumerate(self.layers):
# Add LSTMs
self.lstms.append(nn.LSTM(
self.input_size if i == 0 else self.hidden_size,
self.hidden_size, bidirectional=True))
# Add non-linear bottlenecks
self.ffs.append(FF(
self.ctx_size, self.proj_size, activ=self.activ))
def forward(self, x, **kwargs):
# Generate a mask to detect padded sequences
mask = x.ne(0).float().sum(2).ne(0).float()
if mask.eq(0).nonzero().numel() > 0:
raise RuntimeError("Non-homogeneous batch detected in SpeechLSTM layer.")
# Pad with <eos> zero
hs = F.pad(x, self.pad_tuple)
for (ss_factor, f_lstm, f_ff) in zip(self.layers, self.lstms, self.ffs):
if ss_factor > 1:
# Skip states
hs = f_ff(f_lstm(hs[::ss_factor])[0])
else:
hs = f_ff(f_lstm(hs)[0])
if self.dropout > 0:
hs = self.do(hs)
# No mask is returned as batch should contain same-length sequences
return hs, None
|
pysimt/layers/encoders/speech_lstm.py
|
from typing import Optional
from torch import nn
from torch.nn import functional as F
from ..ff import FF
class SpeechLSTM(nn.Module):
"""A bidirectional LSTM encoder with subsampling for speech features.
The number of LSTM layers is defined by the `layers` argument, i.e.
`1_1_2_2_1_1` denotes 6 LSTM layers where the middle two applies
a subsampling factor of 2 to their inputs. Subsampling in this context
means that every N'th state will be passed to the next layer as input.
Each LSTM layer is followed by a feed-forward projection layer whose
non-linearity is given by the `activ` argument.
Note:
The input tensor should contain samples of equal lengths i.e.
`bucket_by` in training configuration should be set to the acoustic
features modality.
Args:
input_size: Input feature dimensionality.
hidden_size: LSTM hidden state dimensionality.
proj_size: Projection layer size.
activ: Non-linearity to apply to intermediate projection
layers. (Default: 'tanh')
layers: A '_' separated list of integers that defines the subsampling
factor for each LSTM.
dropout: Use dropout (Default: 0.)
Input:
x: A `torch.Tensor` of shape `(n_timesteps, n_samples, input_size)`
Output:
hs: A `torch.Tensor` of shape `(n_timesteps, n_samples, hidden_size * 2)`
that contains encoder hidden states for all timesteps.
mask: `None` since this layer expects all equal frame inputs.
"""
def __init__(self, input_size: int, hidden_size: int, proj_size: int,
layers: str, activ: Optional[str] = 'tanh',
dropout: float = 0.0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.proj_size = proj_size
self.activ = activ
self.layers = [int(i) for i in layers.split('_')]
self.dropout = dropout
self.n_layers = len(self.layers)
# Doubles its size because of concatenation of forw-backw encs
self.ctx_size = self.hidden_size * 2
# Fill 0-vector as <eos> to the end of the frames
self.pad_tuple = (0, 0, 0, 0, 0, 1)
# Projections and LSTMs
self.ffs = nn.ModuleList()
self.lstms = nn.ModuleList()
if self.dropout > 0:
self.do = nn.Dropout(self.dropout)
for i, ss_factor in enumerate(self.layers):
# Add LSTMs
self.lstms.append(nn.LSTM(
self.input_size if i == 0 else self.hidden_size,
self.hidden_size, bidirectional=True))
# Add non-linear bottlenecks
self.ffs.append(FF(
self.ctx_size, self.proj_size, activ=self.activ))
def forward(self, x, **kwargs):
# Generate a mask to detect padded sequences
mask = x.ne(0).float().sum(2).ne(0).float()
if mask.eq(0).nonzero().numel() > 0:
raise RuntimeError("Non-homogeneous batch detected in SpeechLSTM layer.")
# Pad with <eos> zero
hs = F.pad(x, self.pad_tuple)
for (ss_factor, f_lstm, f_ff) in zip(self.layers, self.lstms, self.ffs):
if ss_factor > 1:
# Skip states
hs = f_ff(f_lstm(hs[::ss_factor])[0])
else:
hs = f_ff(f_lstm(hs)[0])
if self.dropout > 0:
hs = self.do(hs)
# No mask is returned as batch should contain same-length sequences
return hs, None
| 0.962205 | 0.671773 |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
import numpy as np
from scipy import interpolate, linalg, signal
class FirstOrder(signal.TransferFunction):
"""First order heat capacity differential equation model.
The first oder heat capacity differential equation is
C * dy/dt + K * y = u
with C the heat capacity, K the thermal conductivity, y the temperature and
u the heater power. This is a special case of a linear time invariant first
order system
a0 * dy/dt + a1 * y = b0 * du(t)/dt + b1 * u(t)
The corresponding transferfunction is
b0 * s^1 + b1 * s^0
G(s) = -------------------
a0 * s^1 + a1 * s^0
.. note::
We normalize the transfer function to `a0 = 1.` on instatiation.
:param b: Numerator polynom.
:param a: Denominator polynom.
"""
def __init__(self, b, a):
# Normalize transfer function.
b = np.array(b) / a[0]
a = np.array(a) / a[0]
super(FirstOrder, self).__init__(b, a)
@classmethod
def from_ck(cls, heat_capacity, thermal_conductivity):
b = [0., 1. / heat_capacity]
a = [1., thermal_conductivity / heat_capacity]
return cls(b, a)
@property
def heat_capacity(self):
return 1. / self.num[-1]
@property
def thermal_conductivity(self):
return self.den[1] * self.heat_capacity
@classmethod
def fit(cls, t, y, u):
"""Fits a first order heat capacity model.
:param t: A sequence of timestamps.
:param y: A sequence of temperatures.
:param u: A sequence of heater power values.
"""
yspline = interpolate.UnivariateSpline(t, y, s=0)
uspline = interpolate.UnivariateSpline(t, u, s=0)
ti = np.linspace(np.min(t), np.max(t), len(t))
yi = yspline(ti)
dyi = yspline.derivative(n=1)(ti)
ui = uspline(ti)
result = linalg.lstsq(np.hstack((yi[:,None], ui[:, None])) , dyi)[0]
b = np.r_[result[1]]
a = np.r_[1., - result[0]]
return cls(b, a)
|
heatcapacity/fit.py
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
import numpy as np
from scipy import interpolate, linalg, signal
class FirstOrder(signal.TransferFunction):
"""First order heat capacity differential equation model.
The first oder heat capacity differential equation is
C * dy/dt + K * y = u
with C the heat capacity, K the thermal conductivity, y the temperature and
u the heater power. This is a special case of a linear time invariant first
order system
a0 * dy/dt + a1 * y = b0 * du(t)/dt + b1 * u(t)
The corresponding transferfunction is
b0 * s^1 + b1 * s^0
G(s) = -------------------
a0 * s^1 + a1 * s^0
.. note::
We normalize the transfer function to `a0 = 1.` on instatiation.
:param b: Numerator polynom.
:param a: Denominator polynom.
"""
def __init__(self, b, a):
# Normalize transfer function.
b = np.array(b) / a[0]
a = np.array(a) / a[0]
super(FirstOrder, self).__init__(b, a)
@classmethod
def from_ck(cls, heat_capacity, thermal_conductivity):
b = [0., 1. / heat_capacity]
a = [1., thermal_conductivity / heat_capacity]
return cls(b, a)
@property
def heat_capacity(self):
return 1. / self.num[-1]
@property
def thermal_conductivity(self):
return self.den[1] * self.heat_capacity
@classmethod
def fit(cls, t, y, u):
"""Fits a first order heat capacity model.
:param t: A sequence of timestamps.
:param y: A sequence of temperatures.
:param u: A sequence of heater power values.
"""
yspline = interpolate.UnivariateSpline(t, y, s=0)
uspline = interpolate.UnivariateSpline(t, u, s=0)
ti = np.linspace(np.min(t), np.max(t), len(t))
yi = yspline(ti)
dyi = yspline.derivative(n=1)(ti)
ui = uspline(ti)
result = linalg.lstsq(np.hstack((yi[:,None], ui[:, None])) , dyi)[0]
b = np.r_[result[1]]
a = np.r_[1., - result[0]]
return cls(b, a)
| 0.928975 | 0.52476 |
from sage.structure.sage_object import SageObject
from sage.rings.all import Integer, infinity, ZZ, QQ, CC
from sage.modules.free_module import span
from sage.modular.modform.constructor import Newform, CuspForms
from sage.modular.arithgroup.congroup_gamma0 import is_Gamma0
from sage.misc.misc_c import prod
class Lseries(SageObject):
"""
Base class for `L`-series attached to modular abelian varieties.
This is a common base class for complex and `p`-adic `L`-series
of modular abelian varieties.
"""
def __init__(self, abvar):
"""
Called when creating an L-series.
INPUT:
- ``abvar`` -- a modular abelian variety
EXAMPLES::
sage: J0(11).lseries()
Complex L-series attached to Abelian variety J0(11) of dimension 1
sage: J0(11).padic_lseries(7)
7-adic L-series attached to Abelian variety J0(11) of dimension 1
"""
self.__abvar = abvar
def abelian_variety(self):
"""
Return the abelian variety that this `L`-series is attached to.
OUTPUT:
a modular abelian variety
EXAMPLES::
sage: J0(11).padic_lseries(7).abelian_variety()
Abelian variety J0(11) of dimension 1
"""
return self.__abvar
class Lseries_complex(Lseries):
"""
A complex `L`-series attached to a modular abelian variety.
EXAMPLES::
sage: A = J0(37)
sage: A.lseries()
Complex L-series attached to Abelian variety J0(37) of dimension 2
"""
def __call__(self, s, prec=53):
"""
Evaluate this complex `L`-series at `s`.
INPUT:
- ``s`` -- complex number
- ``prec`` -- integer (default: 53) the number of bits of precision
used in computing the lseries of the newforms.
OUTPUT:
a complex number L(A, s).
EXAMPLES::
sage: L = J0(23).lseries()
sage: L(1)
0.248431866590600
sage: L(1, prec=100)
0.24843186659059968120725033931
sage: L = J0(389)[0].lseries()
sage: L(1) # long time (2s) abstol 1e-10
-1.33139759782370e-19
sage: L(1, prec=100) # long time (2s) abstol 1e-20
6.0129758648142797032650287762e-39
sage: L.rational_part()
0
sage: L = J1(23)[0].lseries()
sage: L(1)
0.248431866590600
sage: J = J0(11) * J1(11)
sage: J.lseries()(1)
0.0644356903227915
sage: L = JH(17,[2]).lseries()
sage: L(1)
0.386769938387780
"""
abelian_variety = self.abelian_variety()
# Check for easy dimension zero case
if abelian_variety.dimension() == 0:
return CC(1)
try:
factors = self.__factors[prec]
return prod(L(s) for L in factors)
except AttributeError:
self.__factors = {}
except KeyError:
pass
abelian_variety = self.abelian_variety()
newforms = abelian_variety.newform_decomposition('a')
factors = [newform.lseries(embedding=i, prec=prec)
for newform in newforms
for i in range(newform.base_ring().degree())]
self.__factors[prec] = factors
return prod(L(s) for L in factors)
def __eq__(self, other):
"""
Compare this complex `L`-series to another one.
INPUT:
- ``other`` -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].lseries()
sage: M = J0(37)[1].lseries()
sage: L == M
False
sage: L == L
True
"""
if not isinstance(other, Lseries_complex):
return False
return self.abelian_variety() == other.abelian_variety()
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
INPUT:
- ``other`` -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].lseries()
sage: M = J0(37)[1].lseries()
sage: L != M
True
sage: L != L
False
"""
return not (self == other)
def _repr_(self):
"""
String representation of `L`-series.
OUTPUT:
a string
EXAMPLES::
sage: L = J0(37).lseries()
sage: L._repr_()
'Complex L-series attached to Abelian variety J0(37) of dimension 2'
"""
return "Complex L-series attached to %s" % self.abelian_variety()
def vanishes_at_1(self):
"""
Return True if `L(1)=0` and return False otherwise.
OUTPUT:
a boolean
EXAMPLES:
Numerically, the `L`-series for `J_0(389)` appears to vanish
at 1. This is confirmed by this algebraic computation::
sage: L = J0(389)[0].lseries(); L
Complex L-series attached to Simple abelian subvariety 389a(1,389) of dimension 1 of J0(389)
sage: L(1) # long time (2s) abstol 1e-10
-1.33139759782370e-19
sage: L.vanishes_at_1()
True
Numerically, one might guess that the `L`-series for `J_1(23)`
and `J_1(31)` vanish at 1. This algebraic computation shows
otherwise::
sage: L = J1(23).lseries(); L
Complex L-series attached to Abelian variety J1(23) of dimension 12
sage: L(1) # long time (about 3 s)
0.000129519861426989 + 1.14001148377577e-19*I
sage: L.vanishes_at_1()
False
sage: L(1, prec=100) # long time (about 3 s)
0.00012951986142702571478817757149 - 2.9734441752025676942763838067e-33*I
sage: L = J1(31).lseries(); L
Complex L-series attached to Abelian variety J1(31) of dimension 26
sage: abs(L(1) - 3.45014267547611e-7) < 1e-15 # long time (about 8 s)
True
sage: L.vanishes_at_1() # long time (about 6 s)
False
"""
abelian_variety = self.abelian_variety()
# Check for easy dimension zero case
if abelian_variety.dimension() == 0:
return False
if not abelian_variety.is_simple():
from .constructor import AbelianVariety
decomp = (AbelianVariety(f) for f in
abelian_variety.newform_decomposition('a'))
return any(S.lseries().vanishes_at_1() for S in decomp)
modular_symbols = abelian_variety.modular_symbols()
Phi = modular_symbols.rational_period_mapping()
ambient_module = modular_symbols.ambient_module()
e = ambient_module([0, infinity])
return Phi(e).is_zero()
def rational_part(self):
"""
Return the rational part of this `L`-function at the central critical
value 1.
OUTPUT:
a rational number
EXAMPLES::
sage: A, B = J0(43).decomposition()
sage: A.lseries().rational_part()
0
sage: B.lseries().rational_part()
2/7
"""
abelian_variety = self.abelian_variety()
modular_symbols = abelian_variety.modular_symbols()
Phi = modular_symbols.rational_period_mapping()
ambient_module = modular_symbols.ambient_module()
if self.vanishes_at_1():
return QQ(0)
else:
s = ambient_module.sturm_bound()
I = ambient_module.hecke_images(0, range(1, s+1))
PhiTe = span([Phi(ambient_module(I[n]))
for n in range(I.nrows())], ZZ)
ambient_plus = ambient_module.sign_submodule(1)
ambient_plus_cusp = ambient_plus.cuspidal_submodule()
PhiH1plus = span([Phi(x) for
x in ambient_plus_cusp.integral_basis()], ZZ)
return PhiTe.index_in(PhiH1plus)
lratio = rational_part
class Lseries_padic(Lseries):
"""
A `p`-adic `L`-series attached to a modular abelian variety.
"""
def __init__(self, abvar, p):
"""
Create a `p`-adic `L`-series.
EXAMPLES::
sage: J0(37)[0].padic_lseries(389)
389-adic L-series attached to Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)
"""
Lseries.__init__(self, abvar)
p = Integer(p)
if not p.is_prime():
raise ValueError("p (=%s) must be prime"%p)
self.__p = p
def __eq__(self, other):
"""
Compare this `p`-adic `L`-series to another one.
First the abelian varieties are compared; if they are the same,
then the primes are compared.
INPUT:
other -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: M = J0(37)[1].padic_lseries(5)
sage: K = J0(37)[0].padic_lseries(3)
sage: L == K
False
sage: L == M
False
sage: L == L
True
"""
if not isinstance(other, Lseries_padic):
return False
return (self.abelian_variety() == other.abelian_variety() and
self.__p == other.__p)
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
INPUT:
other -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: M = J0(37)[1].padic_lseries(5)
sage: K = J0(37)[0].padic_lseries(3)
sage: L != K
True
sage: L != M
True
sage: L != L
False
"""
return not (self == other)
def prime(self):
"""
Return the prime `p` of this `p`-adic `L`-series.
EXAMPLES::
sage: J0(11).padic_lseries(7).prime()
7
"""
return self.__p
def power_series(self, n=2, prec=5):
"""
Return the `n`-th approximation to this `p`-adic `L`-series as
a power series in `T`.
Each coefficient is a `p`-adic number
whose precision is provably correct.
NOTE: This is not yet implemented.
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: L.power_series()
Traceback (most recent call last):
...
NotImplementedError
sage: L.power_series(3,7)
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _repr_(self):
"""
String representation of this `p`-adic `L`-series.
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: L._repr_()
'5-adic L-series attached to Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)'
"""
return "%s-adic L-series attached to %s" % (self.__p,
self.abelian_variety())
|
src/sage/modular/abvar/lseries.py
|
from sage.structure.sage_object import SageObject
from sage.rings.all import Integer, infinity, ZZ, QQ, CC
from sage.modules.free_module import span
from sage.modular.modform.constructor import Newform, CuspForms
from sage.modular.arithgroup.congroup_gamma0 import is_Gamma0
from sage.misc.misc_c import prod
class Lseries(SageObject):
"""
Base class for `L`-series attached to modular abelian varieties.
This is a common base class for complex and `p`-adic `L`-series
of modular abelian varieties.
"""
def __init__(self, abvar):
"""
Called when creating an L-series.
INPUT:
- ``abvar`` -- a modular abelian variety
EXAMPLES::
sage: J0(11).lseries()
Complex L-series attached to Abelian variety J0(11) of dimension 1
sage: J0(11).padic_lseries(7)
7-adic L-series attached to Abelian variety J0(11) of dimension 1
"""
self.__abvar = abvar
def abelian_variety(self):
"""
Return the abelian variety that this `L`-series is attached to.
OUTPUT:
a modular abelian variety
EXAMPLES::
sage: J0(11).padic_lseries(7).abelian_variety()
Abelian variety J0(11) of dimension 1
"""
return self.__abvar
class Lseries_complex(Lseries):
"""
A complex `L`-series attached to a modular abelian variety.
EXAMPLES::
sage: A = J0(37)
sage: A.lseries()
Complex L-series attached to Abelian variety J0(37) of dimension 2
"""
def __call__(self, s, prec=53):
"""
Evaluate this complex `L`-series at `s`.
INPUT:
- ``s`` -- complex number
- ``prec`` -- integer (default: 53) the number of bits of precision
used in computing the lseries of the newforms.
OUTPUT:
a complex number L(A, s).
EXAMPLES::
sage: L = J0(23).lseries()
sage: L(1)
0.248431866590600
sage: L(1, prec=100)
0.24843186659059968120725033931
sage: L = J0(389)[0].lseries()
sage: L(1) # long time (2s) abstol 1e-10
-1.33139759782370e-19
sage: L(1, prec=100) # long time (2s) abstol 1e-20
6.0129758648142797032650287762e-39
sage: L.rational_part()
0
sage: L = J1(23)[0].lseries()
sage: L(1)
0.248431866590600
sage: J = J0(11) * J1(11)
sage: J.lseries()(1)
0.0644356903227915
sage: L = JH(17,[2]).lseries()
sage: L(1)
0.386769938387780
"""
abelian_variety = self.abelian_variety()
# Check for easy dimension zero case
if abelian_variety.dimension() == 0:
return CC(1)
try:
factors = self.__factors[prec]
return prod(L(s) for L in factors)
except AttributeError:
self.__factors = {}
except KeyError:
pass
abelian_variety = self.abelian_variety()
newforms = abelian_variety.newform_decomposition('a')
factors = [newform.lseries(embedding=i, prec=prec)
for newform in newforms
for i in range(newform.base_ring().degree())]
self.__factors[prec] = factors
return prod(L(s) for L in factors)
def __eq__(self, other):
"""
Compare this complex `L`-series to another one.
INPUT:
- ``other`` -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].lseries()
sage: M = J0(37)[1].lseries()
sage: L == M
False
sage: L == L
True
"""
if not isinstance(other, Lseries_complex):
return False
return self.abelian_variety() == other.abelian_variety()
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
INPUT:
- ``other`` -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].lseries()
sage: M = J0(37)[1].lseries()
sage: L != M
True
sage: L != L
False
"""
return not (self == other)
def _repr_(self):
"""
String representation of `L`-series.
OUTPUT:
a string
EXAMPLES::
sage: L = J0(37).lseries()
sage: L._repr_()
'Complex L-series attached to Abelian variety J0(37) of dimension 2'
"""
return "Complex L-series attached to %s" % self.abelian_variety()
def vanishes_at_1(self):
"""
Return True if `L(1)=0` and return False otherwise.
OUTPUT:
a boolean
EXAMPLES:
Numerically, the `L`-series for `J_0(389)` appears to vanish
at 1. This is confirmed by this algebraic computation::
sage: L = J0(389)[0].lseries(); L
Complex L-series attached to Simple abelian subvariety 389a(1,389) of dimension 1 of J0(389)
sage: L(1) # long time (2s) abstol 1e-10
-1.33139759782370e-19
sage: L.vanishes_at_1()
True
Numerically, one might guess that the `L`-series for `J_1(23)`
and `J_1(31)` vanish at 1. This algebraic computation shows
otherwise::
sage: L = J1(23).lseries(); L
Complex L-series attached to Abelian variety J1(23) of dimension 12
sage: L(1) # long time (about 3 s)
0.000129519861426989 + 1.14001148377577e-19*I
sage: L.vanishes_at_1()
False
sage: L(1, prec=100) # long time (about 3 s)
0.00012951986142702571478817757149 - 2.9734441752025676942763838067e-33*I
sage: L = J1(31).lseries(); L
Complex L-series attached to Abelian variety J1(31) of dimension 26
sage: abs(L(1) - 3.45014267547611e-7) < 1e-15 # long time (about 8 s)
True
sage: L.vanishes_at_1() # long time (about 6 s)
False
"""
abelian_variety = self.abelian_variety()
# Check for easy dimension zero case
if abelian_variety.dimension() == 0:
return False
if not abelian_variety.is_simple():
from .constructor import AbelianVariety
decomp = (AbelianVariety(f) for f in
abelian_variety.newform_decomposition('a'))
return any(S.lseries().vanishes_at_1() for S in decomp)
modular_symbols = abelian_variety.modular_symbols()
Phi = modular_symbols.rational_period_mapping()
ambient_module = modular_symbols.ambient_module()
e = ambient_module([0, infinity])
return Phi(e).is_zero()
def rational_part(self):
"""
Return the rational part of this `L`-function at the central critical
value 1.
OUTPUT:
a rational number
EXAMPLES::
sage: A, B = J0(43).decomposition()
sage: A.lseries().rational_part()
0
sage: B.lseries().rational_part()
2/7
"""
abelian_variety = self.abelian_variety()
modular_symbols = abelian_variety.modular_symbols()
Phi = modular_symbols.rational_period_mapping()
ambient_module = modular_symbols.ambient_module()
if self.vanishes_at_1():
return QQ(0)
else:
s = ambient_module.sturm_bound()
I = ambient_module.hecke_images(0, range(1, s+1))
PhiTe = span([Phi(ambient_module(I[n]))
for n in range(I.nrows())], ZZ)
ambient_plus = ambient_module.sign_submodule(1)
ambient_plus_cusp = ambient_plus.cuspidal_submodule()
PhiH1plus = span([Phi(x) for
x in ambient_plus_cusp.integral_basis()], ZZ)
return PhiTe.index_in(PhiH1plus)
lratio = rational_part
class Lseries_padic(Lseries):
"""
A `p`-adic `L`-series attached to a modular abelian variety.
"""
def __init__(self, abvar, p):
"""
Create a `p`-adic `L`-series.
EXAMPLES::
sage: J0(37)[0].padic_lseries(389)
389-adic L-series attached to Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)
"""
Lseries.__init__(self, abvar)
p = Integer(p)
if not p.is_prime():
raise ValueError("p (=%s) must be prime"%p)
self.__p = p
def __eq__(self, other):
"""
Compare this `p`-adic `L`-series to another one.
First the abelian varieties are compared; if they are the same,
then the primes are compared.
INPUT:
other -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: M = J0(37)[1].padic_lseries(5)
sage: K = J0(37)[0].padic_lseries(3)
sage: L == K
False
sage: L == M
False
sage: L == L
True
"""
if not isinstance(other, Lseries_padic):
return False
return (self.abelian_variety() == other.abelian_variety() and
self.__p == other.__p)
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
INPUT:
other -- object
OUTPUT:
boolean
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: M = J0(37)[1].padic_lseries(5)
sage: K = J0(37)[0].padic_lseries(3)
sage: L != K
True
sage: L != M
True
sage: L != L
False
"""
return not (self == other)
def prime(self):
"""
Return the prime `p` of this `p`-adic `L`-series.
EXAMPLES::
sage: J0(11).padic_lseries(7).prime()
7
"""
return self.__p
def power_series(self, n=2, prec=5):
"""
Return the `n`-th approximation to this `p`-adic `L`-series as
a power series in `T`.
Each coefficient is a `p`-adic number
whose precision is provably correct.
NOTE: This is not yet implemented.
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: L.power_series()
Traceback (most recent call last):
...
NotImplementedError
sage: L.power_series(3,7)
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _repr_(self):
"""
String representation of this `p`-adic `L`-series.
EXAMPLES::
sage: L = J0(37)[0].padic_lseries(5)
sage: L._repr_()
'5-adic L-series attached to Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)'
"""
return "%s-adic L-series attached to %s" % (self.__p,
self.abelian_variety())
| 0.875946 | 0.510558 |
import json
import time, datetime
import os
import shutil
class V2XDataCollector:
def __init__(self, environment:str):
self.environment = environment
self.loggingDirectory = None
self.initializationTimestamp = None
self.hostBsmLogfile = None
self.remoteBsmLogfile = None
self.spatLogfile = None
self.srmLogfile = None
self.ssmLogfile = None
self.msgCountsLogfile = None
configFile = open("/nojournal/bin/mmitss-phase3-master-config.json", 'r')
config = (json.load(configFile))
configFile.close()
self.hostBsmDecoderPort = config["PortNumber"]["HostBsmDecoder"]
if self.environment == "vehicle":
self.baseName = "vehicle"
else:
self.baseName = config["IntersectionName"]
self.path = "/nojournal/bin/v2x-data"
if not os.path.exists(self.path + "/archive"):
os.makedirs(self.path + "/archive")
self.archive_leftover_directories()
self.initialize_logfiles()
def initialize_logfiles(self):
self.initializationTimestamp = ('{:%m%d%Y_%H%M%S}'.format(datetime.datetime.now()))
self.loggingDirectory = self.path + "/" + self.baseName + "_" + self.initializationTimestamp + "/"
os.makedirs(self.loggingDirectory)
self.initialize_msgCountsLogfile()
self.initialize_bsmLogfile("remote")
self.initialize_spatLogfile()
self.initialize_srmLogfile()
self.initialize_ssmLogfile()
if self.environment == "vehicle":
self.initialize_bsmLogfile("host")
def initialize_msgCountsLogfile(self):
msgCountsLogfileName = self.loggingDirectory + self.baseName + "_" + "msgCountsLog_" + self.initializationTimestamp + ".csv"
self.msgCountsLogfile = open(msgCountsLogfileName, 'w', buffering=1)
if self.environment == "roadside":
self.msgCountsLogfile.write("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,interval_sec,msg_source,msg_type,msg_count,msg_served,msg_rejected\n")
elif self.environment == "vehicle":
self.msgCountsLogfile.write("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,interval_sec,msg_source,msg_type,msg_count\n")
def initialize_bsmLogfile(self, origin):
bsmLogfileName = self.loggingDirectory + self.baseName + "_" + origin + "BsmLog_" + self.initializationTimestamp + ".csv"
if self.environment == "vehicle":
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,"
+ "temporaryId,secMark,latitude,longitude,elevation,speed,heading,type,length,width\n")
elif self.environment == "roadside":
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,"
+ "temporaryId,secMark,latitude,longitude,elevation,speed,heading,type,length,width,onmap_status,position_on_map,current_approach,current_lane,current_signal_group,dist_to_stopbar\n")
if origin == "host":
self.hostBsmLogfile = open(bsmLogfileName, 'w')
self.hostBsmLogfile.write(csvHeader)
elif origin == "remote":
self.remoteBsmLogfile = open(bsmLogfileName, 'w')
self.remoteBsmLogfile.write(csvHeader)
def initialize_spatLogfile(self):
spatLogfileName = self.loggingDirectory + self.baseName + "_" + "spatLog_" + self.initializationTimestamp + ".csv"
self.spatLogfile = open(spatLogfileName, 'w')
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,regionalId,intersectionId,msgCount,moy,msom," +
"v1_currState,v1_minEndTime,v1_maxEndTime,v1_elapsedTime," +
"v2_currState,v2_minEndTime,v2_maxEndTime,v2_elapsedTime," +
"v3_currState,v3_minEndTime,v3_maxEndTime,v3_elapsedTime," +
"v4_currState,v4_minEndTime,v4_maxEndTime,v4_elapsedTime," +
"v5_currState,v5_minEndTime,v5_maxEndTime,v5_elapsedTime," +
"v6_currState,v6_minEndTime,v6_maxEndTime,v6_elapsedTime," +
"v7_currState,v7_minEndTime,v7_maxEndTime,v7_elapsedTime," +
"v8_currState,v8_minEndTime,v8_maxEndTime,v8_elapsedTime," +
"p1_currState,p1_minEndTime,p1_maxEndTime,p1_elapsedTime," +
"p2_currState,p2_minEndTime,p2_maxEndTime,p2_elapsedTime," +
"p3_currState,p3_minEndTime,p3_maxEndTime,p3_elapsedTime," +
"p4_currState,p4_minEndTime,p4_maxEndTime,p4_elapsedTime," +
"p5_currState,p5_minEndTime,p5_maxEndTime,p5_elapsedTime," +
"p6_currState,p6_minEndTime,p6_maxEndTime,p6_elapsedTime," +
"p7_currState,p7_minEndTime,p7_maxEndTime,p7_elapsedTime," +
"p8_currState,p8_minEndTime,p8_maxEndTime,p8_elapsedTime" + "\n")
self.spatLogfile.write(csvHeader)
def initialize_srmLogfile(self):
srmLogfileName = self.loggingDirectory + self.baseName + "_" + "srmLog_" + self.initializationTimestamp + ".csv"
self.srmLogfile = open(srmLogfileName, 'w', buffering=1)
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose" + ","
+ "timestamp_posix" + ","
+ "minuteOfYear" + ","
+ "msOfMinute" + ","
+ "msgCount" + ","
+ "regionalID" + ","
+ "intersectionID" + ","
+ "priorityRequestType" + ","
+ "basicVehicleRole" + ","
+ "laneID" + ","
+ "eTA_Minute" + ","
+ "eTA_Second" + ","
+ "eTA_Duration" + ","
+ "vehicleID" + ","
+ "latitude" + ","
+ "longitude" + ","
+ "elevation" + ","
+ "heading" + ","
+ "speed" + ","
+ "vehicleType"
+ "\n")
self.srmLogfile.write(csvHeader)
def initialize_ssmLogfile(self):
ssmLogfileName = self.loggingDirectory + self.baseName + "_" + "ssmLog_" + self.initializationTimestamp + ".csv"
self.ssmLogfile = open(ssmLogfileName, 'w', buffering=1)
csvHeader = ("log_timestamp_verbose" + ","
+ "log_timestamp_posix" + ","
+ "timestamp_verbose" + ","
+ "timestamp_posix" + ","
+ "minuteOfYear" + ","
+ "msOfMinute" + ","
+ "sequenceNumber" + ","
+ "updateCount" + ","
+ "regionalID" + ","
+ "noOfRequest" + ","
+ "intersectionID" +
"," + "r1_vehicleID,r1_msgCount,r1_basicVehicleRole,r1_inBoundLaneID,r1_ETA_Minute,r1_ETA_Second,r1_ETA_Duration,r1_priorityRequestStatus" +
"," + "r2_vehicleID,r2_msgCount,r2_basicVehicleRole,r2_inBoundLaneID,r2_ETA_Minute,r2_ETA_Second,r2_ETA_Duration,r2_priorityRequestStatus" +
"," + "r3_vehicleID,r3_msgCount,r3_basicVehicleRole,r3_inBoundLaneID,r3_ETA_Minute,r3_ETA_Second,r3_ETA_Duration,r3_priorityRequestStatus" +
"," + "r4_vehicleID,r4_msgCount,r4_basicVehicleRole,r4_inBoundLaneID,r4_ETA_Minute,r4_ETA_Second,r4_ETA_Duration,r4_priorityRequestStatus" +
"," + "r5_vehicleID,r5_msgCount,r5_basicVehicleRole,r5_inBoundLaneID,r5_ETA_Minute,r5_ETA_Second,r5_ETA_Duration,r5_priorityRequestStatus\n")
self.ssmLogfile.write(csvHeader)
def write_msgCount(self, msgCounts:json):
csvRow = self.msgCounts_json_to_csv(msgCounts)
self.msgCountsLogfile.write(csvRow)
def write_bsm(self, bsmJson:json, senderPort:int):
csvRow = self.bsm_json_to_csv(bsmJson)
if ((self.environment == "vehicle") and (senderPort == self.hostBsmDecoderPort)):
self.hostBsmLogfile.write(csvRow)
else:
self.remoteBsmLogfile.write(csvRow)
def write_spat(self, spatJson:json):
csvRow = self.spat_json_to_csv(spatJson)
self.spatLogfile.write(csvRow)
def write_srm(self, srmJson:json):
csvRow = self.srm_json_to_csv(srmJson)
self.srmLogfile.write(csvRow)
def write_ssm(self, ssmJson:json):
csvRow = self.ssm_json_to_csv(ssmJson)
self.ssmLogfile.write(csvRow)
def msgCounts_json_to_csv(self, jsonData:json):
log_timestamp_posix = str(time.time())
log_timestamp_verbose = str(datetime.datetime.now())
timestamp_posix = str(jsonData["MsgInformation"]["Timestamp_posix"])
timestamp_verbose = str(jsonData["MsgInformation"]["Timestamp_verbose"])
timeInterval = str(jsonData["MsgInformation"]["TimeInterval"])
msgSource = str(jsonData["MsgInformation"]["MsgSource"])
msgType = str(jsonData["MsgInformation"]["MsgCountType"])
msgCount = str(jsonData["MsgInformation"]["MsgCount"])
csv = (log_timestamp_verbose + "," +
log_timestamp_posix + "," +
timestamp_verbose + "," +
timestamp_posix + "," +
timeInterval + "," +
msgSource + "," +
msgType + "," +
msgCount)
if self.environment == "roadside":
msgServed = str(jsonData["MsgInformation"]["MsgServed"])
msgRejected = str(jsonData["MsgInformation"]["MsgRejected"])
csv = csv + "," + msgServed + "," + msgRejected
csv = csv + "\n"
return csv
def bsm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
temporaryId = str(jsonData["BasicVehicle"]["temporaryID"])
secMark = str(jsonData["BasicVehicle"]["secMark_Second"])
latitude = str(jsonData["BasicVehicle"]["position"]["latitude_DecimalDegree"])
longitude = str(jsonData["BasicVehicle"]["position"]["longitude_DecimalDegree"])
elevation = str(jsonData["BasicVehicle"]["position"]["elevation_Meter"])
speed = str(jsonData["BasicVehicle"]["speed_MeterPerSecond"])
heading = str(jsonData["BasicVehicle"]["heading_Degree"])
vehType = str(jsonData["BasicVehicle"]["type"])
length = str(jsonData["BasicVehicle"]["size"]["length_cm"])
width = str(jsonData["BasicVehicle"]["size"]["width_cm"])
if self.environment == "roadside":
onMap = str(jsonData["OnmapVehicle"]["onMap"])
approachId = str(jsonData["OnmapVehicle"]["approachId"])
laneId = str(jsonData["OnmapVehicle"]["laneId"])
signalGroup = str(jsonData["OnmapVehicle"]["signalGroup"])
distanceToStopbar = str(jsonData["OnmapVehicle"]["distanceToStopbar"])
locationOnMap = str(jsonData["OnmapVehicle"]["locationOnMap"])
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ temporaryId + ","
+ secMark + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ speed + ","
+ heading + ","
+ vehType + ","
+ length + ","
+ width + ","
+ onMap + ","
+ locationOnMap + ","
+ approachId + ","
+ laneId + ","
+ signalGroup + ","
+ distanceToStopbar + "\n")
elif self.environment == "vehicle":
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ temporaryId + ","
+ secMark + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ speed + ","
+ heading + ","
+ vehType + ","
+ length + ","
+ width + "\n")
return csv
def spat_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
regionalId = str(jsonData["Spat"]["IntersectionState"]["regionalID"])
intersectionId = str(jsonData["Spat"]["IntersectionState"]["intersectionID"])
msgCnt = str(jsonData["Spat"]["msgCnt"])
moy = str(jsonData["Spat"]["minuteOfYear"])
msom = str(jsonData["Spat"]["msOfMinute"])
v1_currState = str(jsonData["Spat"]["phaseState"][0]["currState"])
v1_minEndTime = str(jsonData["Spat"]["phaseState"][0]["minEndTime"])
v1_maxEndTime = str(jsonData["Spat"]["phaseState"][0]["maxEndTime"])
v1_elapsedTime = str(jsonData["Spat"]["phaseState"][0]["elapsedTime"])
v2_currState = str(jsonData["Spat"]["phaseState"][1]["currState"])
v2_minEndTime = str(jsonData["Spat"]["phaseState"][1]["minEndTime"])
v2_maxEndTime = str(jsonData["Spat"]["phaseState"][1]["maxEndTime"])
v2_elapsedTime = str(jsonData["Spat"]["phaseState"][1]["elapsedTime"])
v3_currState = str(jsonData["Spat"]["phaseState"][2]["currState"])
v3_minEndTime = str(jsonData["Spat"]["phaseState"][2]["minEndTime"])
v3_maxEndTime = str(jsonData["Spat"]["phaseState"][2]["maxEndTime"])
v3_elapsedTime = str(jsonData["Spat"]["phaseState"][2]["elapsedTime"])
v4_currState = str(jsonData["Spat"]["phaseState"][3]["currState"])
v4_minEndTime = str(jsonData["Spat"]["phaseState"][3]["minEndTime"])
v4_maxEndTime = str(jsonData["Spat"]["phaseState"][3]["maxEndTime"])
v4_elapsedTime = str(jsonData["Spat"]["phaseState"][3]["elapsedTime"])
v5_currState = str(jsonData["Spat"]["phaseState"][4]["currState"])
v5_minEndTime = str(jsonData["Spat"]["phaseState"][4]["minEndTime"])
v5_maxEndTime = str(jsonData["Spat"]["phaseState"][4]["maxEndTime"])
v5_elapsedTime = str(jsonData["Spat"]["phaseState"][4]["elapsedTime"])
v6_currState = str(jsonData["Spat"]["phaseState"][5]["currState"])
v6_minEndTime = str(jsonData["Spat"]["phaseState"][5]["minEndTime"])
v6_maxEndTime = str(jsonData["Spat"]["phaseState"][5]["maxEndTime"])
v6_elapsedTime = str(jsonData["Spat"]["phaseState"][5]["elapsedTime"])
v7_currState = str(jsonData["Spat"]["phaseState"][6]["currState"])
v7_minEndTime = str(jsonData["Spat"]["phaseState"][6]["minEndTime"])
v7_maxEndTime = str(jsonData["Spat"]["phaseState"][6]["maxEndTime"])
v7_elapsedTime = str(jsonData["Spat"]["phaseState"][6]["elapsedTime"])
v8_currState = str(jsonData["Spat"]["phaseState"][7]["currState"])
v8_minEndTime = str(jsonData["Spat"]["phaseState"][7]["minEndTime"])
v8_maxEndTime = str(jsonData["Spat"]["phaseState"][7]["maxEndTime"])
v8_elapsedTime = str(jsonData["Spat"]["phaseState"][7]["elapsedTime"])
p1_currState = str(jsonData["Spat"]["pedPhaseState"][0]["currState"])
p1_minEndTime = str(jsonData["Spat"]["pedPhaseState"][0]["minEndTime"])
p1_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][0]["maxEndTime"])
p1_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][0]["elapsedTime"])
p2_currState = str(jsonData["Spat"]["pedPhaseState"][1]["currState"])
p2_minEndTime = str(jsonData["Spat"]["pedPhaseState"][1]["minEndTime"])
p2_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][1]["maxEndTime"])
p2_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][1]["elapsedTime"])
p3_currState = str(jsonData["Spat"]["pedPhaseState"][2]["currState"])
p3_minEndTime = str(jsonData["Spat"]["pedPhaseState"][2]["minEndTime"])
p3_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][2]["maxEndTime"])
p3_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][2]["elapsedTime"])
p4_currState = str(jsonData["Spat"]["pedPhaseState"][3]["currState"])
p4_minEndTime = str(jsonData["Spat"]["pedPhaseState"][3]["minEndTime"])
p4_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][3]["maxEndTime"])
p4_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][3]["elapsedTime"])
p5_currState = str(jsonData["Spat"]["pedPhaseState"][4]["currState"])
p5_minEndTime = str(jsonData["Spat"]["pedPhaseState"][4]["minEndTime"])
p5_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][4]["maxEndTime"])
p5_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][4]["elapsedTime"])
p6_currState = str(jsonData["Spat"]["pedPhaseState"][5]["currState"])
p6_minEndTime = str(jsonData["Spat"]["pedPhaseState"][5]["minEndTime"])
p6_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][5]["maxEndTime"])
p6_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][5]["elapsedTime"])
p7_currState = str(jsonData["Spat"]["pedPhaseState"][6]["currState"])
p7_minEndTime = str(jsonData["Spat"]["pedPhaseState"][6]["minEndTime"])
p7_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][6]["maxEndTime"])
p7_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][6]["elapsedTime"])
p8_currState = str(jsonData["Spat"]["pedPhaseState"][7]["currState"])
p8_minEndTime = str(jsonData["Spat"]["pedPhaseState"][7]["minEndTime"])
p8_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][7]["maxEndTime"])
p8_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][7]["elapsedTime"])
csv = (log_timestamp_verbose + "," + log_timestamp_posix + "," + timestamp_verbose + "," + timestamp_posix + "," +
regionalId + "," + intersectionId + "," + msgCnt + "," + moy + "," + msom + "," +
v1_currState + "," + v1_minEndTime + "," + v1_maxEndTime + "," + v1_elapsedTime + "," +
v2_currState + "," + v2_minEndTime + "," + v2_maxEndTime + "," + v2_elapsedTime + "," +
v3_currState + "," + v3_minEndTime + "," + v3_maxEndTime + "," + v3_elapsedTime + "," +
v4_currState + "," + v4_minEndTime + "," + v4_maxEndTime + "," + v4_elapsedTime + "," +
v5_currState + "," + v5_minEndTime + "," + v5_maxEndTime + "," + v5_elapsedTime + "," +
v6_currState + "," + v6_minEndTime + "," + v6_maxEndTime + "," + v6_elapsedTime + "," +
v7_currState + "," + v7_minEndTime + "," + v7_maxEndTime + "," + v7_elapsedTime + "," +
v8_currState + "," + v8_minEndTime + "," + v8_maxEndTime + "," + v8_elapsedTime + "," +
p1_currState + "," + p1_minEndTime + "," + p1_maxEndTime + "," + p1_elapsedTime + "," +
p2_currState + "," + p2_minEndTime + "," + p2_maxEndTime + "," + p2_elapsedTime + "," +
p3_currState + "," + p3_minEndTime + "," + p3_maxEndTime + "," + p3_elapsedTime + "," +
p4_currState + "," + p4_minEndTime + "," + p4_maxEndTime + "," + p4_elapsedTime + "," +
p5_currState + "," + p5_minEndTime + "," + p5_maxEndTime + "," + p5_elapsedTime + "," +
p6_currState + "," + p6_minEndTime + "," + p6_maxEndTime + "," + p6_elapsedTime + "," +
p7_currState + "," + p7_minEndTime + "," + p7_maxEndTime + "," + p7_elapsedTime + "," +
p8_currState + "," + p8_minEndTime + "," + p8_maxEndTime + "," + p8_elapsedTime + "\n")
return csv
def srm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
minuteOfYear = str(jsonData["SignalRequest"]["minuteOfYear"])
msOfMinute = str(jsonData["SignalRequest"]["msOfMinute"])
msgCount = str(jsonData["SignalRequest"]["msgCount"])
regionalID = str(jsonData["SignalRequest"]["regionalID"])
intersectionID = str(jsonData["SignalRequest"]["intersectionID"])
priorityRequestType = str(jsonData["SignalRequest"]["priorityRequestType"])
basicVehicleRole = str(jsonData["SignalRequest"]["basicVehicleRole"])
laneID = str(jsonData["SignalRequest"]["inBoundLane"]["LaneID"])
eTA_Minute = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Minute"])
eTA_Second = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Second"])
eTA_Duration = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Duration"])
vehicleID = str(jsonData["SignalRequest"]["vehicleID"])
latitude = str(jsonData["SignalRequest"]["position"]["latitude_DecimalDegree"])
longitude = str(jsonData["SignalRequest"]["position"]["longitude_DecimalDegree"])
elevation = str(jsonData["SignalRequest"]["position"]["elevation_Meter"])
heading = str(jsonData["SignalRequest"]["heading_Degree"])
speed = str(jsonData["SignalRequest"]["speed_MeterPerSecond"])
vehicleType = str(jsonData["SignalRequest"]["vehicleType"])
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ minuteOfYear + ","
+ msOfMinute + ","
+ msgCount + ","
+ regionalID + ","
+ intersectionID + ","
+ priorityRequestType + ","
+ basicVehicleRole + ","
+ laneID + ","
+ eTA_Minute + ","
+ eTA_Second + ","
+ eTA_Duration + ","
+ vehicleID + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ heading + ","
+ speed + ","
+ vehicleType
+ "\n")
return csv
def ssm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
noOfRequest = (jsonData["noOfRequest"])
minuteOfYear = str(jsonData["SignalStatus"]["minuteOfYear"])
msOfMinute = str(jsonData["SignalStatus"]["msOfMinute"])
sequenceNumber = str(jsonData["SignalStatus"]["sequenceNumber"])
updateCount = str(jsonData["SignalStatus"]["updateCount"])
regionalID = str(jsonData["SignalStatus"]["regionalID"])
intersectionID = str(jsonData["SignalStatus"]["intersectionID"])
static_csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ minuteOfYear + ","
+ msOfMinute + ","
+ sequenceNumber + ","
+ updateCount + ","
+ regionalID + ","
+ str(noOfRequest) + ","
+ intersectionID)
dynamic_csv = ""
for request in range(0,noOfRequest):
vehicleID = str(jsonData["SignalStatus"]["requestorInfo"][request]["vehicleID"])
msgCount = str(jsonData["SignalStatus"]["requestorInfo"][request]["msgCount"])
basicVehicleRole = str(jsonData["SignalStatus"]["requestorInfo"][request]["basicVehicleRole"])
inBoundLaneID = str(jsonData["SignalStatus"]["requestorInfo"][request]["inBoundLaneID"])
ETA_Minute = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Minute"])
ETA_Second = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Second"])
ETA_Duration = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Duration"])
priorityRequestStatus = str(jsonData["SignalStatus"]["requestorInfo"][request]["priorityRequestStatus"])
request_csv = ("," + vehicleID +
"," + msgCount +
"," + basicVehicleRole +
"," + inBoundLaneID +
"," + ETA_Minute +
"," + ETA_Second +
"," + ETA_Duration +
"," + priorityRequestStatus )
dynamic_csv = dynamic_csv + request_csv
csv = static_csv + dynamic_csv + "\n"
return csv
def decode_and_store_data(self, data:bytes, senderPort:int):
try:
receivedMsg = json.loads(data.decode())
if receivedMsg["MsgType"] == "BSM":
self.write_bsm(receivedMsg,senderPort)
elif receivedMsg["MsgType"] == "SPaT":
self.write_spat(receivedMsg)
elif receivedMsg["MsgType"] == "SRM":
self.write_srm(receivedMsg)
elif receivedMsg["MsgType"] == "SSM":
self.write_ssm(receivedMsg)
elif receivedMsg["MsgType"] == "MsgCount":
self.write_msgCount(receivedMsg)
except:
print("Failed decoding of received message at: " + str(time.time()))
def close_logfiles(self):
if not self.msgCountsLogfile.closed:
self.msgCountsLogfile.close()
if not self.remoteBsmLogfile.closed:
self.remoteBsmLogfile.close()
if not self.spatLogfile.closed:
self.spatLogfile.close()
if not self.srmLogfile.closed:
self.srmLogfile.close()
if not self.ssmLogfile.closed:
self.ssmLogfile.close()
if self.environment == "vehicle":
if not self.hostBsmLogfile.closed:
self.hostBsmLogfile.close()
self.archive_current_directory()
def archive_current_directory(self):
shutil.move(self.loggingDirectory, (self.path + "/archive/"))
def archive_leftover_directories(self):
directories = list(os.walk(self.path))[0][1]
directories.remove("archive")
if len(directories) > 0:
for directory in directories:
shutil.move((self.path + "/" + directory), (self.path + "/archive/"))
directories = list(os.walk(self.path))[0][1]
directories.remove("archive")
if __name__ == "__main__":
pass
|
src/common/v2x-data-collector/V2XDataCollector.py
|
import json
import time, datetime
import os
import shutil
class V2XDataCollector:
def __init__(self, environment:str):
self.environment = environment
self.loggingDirectory = None
self.initializationTimestamp = None
self.hostBsmLogfile = None
self.remoteBsmLogfile = None
self.spatLogfile = None
self.srmLogfile = None
self.ssmLogfile = None
self.msgCountsLogfile = None
configFile = open("/nojournal/bin/mmitss-phase3-master-config.json", 'r')
config = (json.load(configFile))
configFile.close()
self.hostBsmDecoderPort = config["PortNumber"]["HostBsmDecoder"]
if self.environment == "vehicle":
self.baseName = "vehicle"
else:
self.baseName = config["IntersectionName"]
self.path = "/nojournal/bin/v2x-data"
if not os.path.exists(self.path + "/archive"):
os.makedirs(self.path + "/archive")
self.archive_leftover_directories()
self.initialize_logfiles()
def initialize_logfiles(self):
self.initializationTimestamp = ('{:%m%d%Y_%H%M%S}'.format(datetime.datetime.now()))
self.loggingDirectory = self.path + "/" + self.baseName + "_" + self.initializationTimestamp + "/"
os.makedirs(self.loggingDirectory)
self.initialize_msgCountsLogfile()
self.initialize_bsmLogfile("remote")
self.initialize_spatLogfile()
self.initialize_srmLogfile()
self.initialize_ssmLogfile()
if self.environment == "vehicle":
self.initialize_bsmLogfile("host")
def initialize_msgCountsLogfile(self):
msgCountsLogfileName = self.loggingDirectory + self.baseName + "_" + "msgCountsLog_" + self.initializationTimestamp + ".csv"
self.msgCountsLogfile = open(msgCountsLogfileName, 'w', buffering=1)
if self.environment == "roadside":
self.msgCountsLogfile.write("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,interval_sec,msg_source,msg_type,msg_count,msg_served,msg_rejected\n")
elif self.environment == "vehicle":
self.msgCountsLogfile.write("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,interval_sec,msg_source,msg_type,msg_count\n")
def initialize_bsmLogfile(self, origin):
bsmLogfileName = self.loggingDirectory + self.baseName + "_" + origin + "BsmLog_" + self.initializationTimestamp + ".csv"
if self.environment == "vehicle":
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,"
+ "temporaryId,secMark,latitude,longitude,elevation,speed,heading,type,length,width\n")
elif self.environment == "roadside":
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,"
+ "temporaryId,secMark,latitude,longitude,elevation,speed,heading,type,length,width,onmap_status,position_on_map,current_approach,current_lane,current_signal_group,dist_to_stopbar\n")
if origin == "host":
self.hostBsmLogfile = open(bsmLogfileName, 'w')
self.hostBsmLogfile.write(csvHeader)
elif origin == "remote":
self.remoteBsmLogfile = open(bsmLogfileName, 'w')
self.remoteBsmLogfile.write(csvHeader)
def initialize_spatLogfile(self):
spatLogfileName = self.loggingDirectory + self.baseName + "_" + "spatLog_" + self.initializationTimestamp + ".csv"
self.spatLogfile = open(spatLogfileName, 'w')
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose,timestamp_posix,regionalId,intersectionId,msgCount,moy,msom," +
"v1_currState,v1_minEndTime,v1_maxEndTime,v1_elapsedTime," +
"v2_currState,v2_minEndTime,v2_maxEndTime,v2_elapsedTime," +
"v3_currState,v3_minEndTime,v3_maxEndTime,v3_elapsedTime," +
"v4_currState,v4_minEndTime,v4_maxEndTime,v4_elapsedTime," +
"v5_currState,v5_minEndTime,v5_maxEndTime,v5_elapsedTime," +
"v6_currState,v6_minEndTime,v6_maxEndTime,v6_elapsedTime," +
"v7_currState,v7_minEndTime,v7_maxEndTime,v7_elapsedTime," +
"v8_currState,v8_minEndTime,v8_maxEndTime,v8_elapsedTime," +
"p1_currState,p1_minEndTime,p1_maxEndTime,p1_elapsedTime," +
"p2_currState,p2_minEndTime,p2_maxEndTime,p2_elapsedTime," +
"p3_currState,p3_minEndTime,p3_maxEndTime,p3_elapsedTime," +
"p4_currState,p4_minEndTime,p4_maxEndTime,p4_elapsedTime," +
"p5_currState,p5_minEndTime,p5_maxEndTime,p5_elapsedTime," +
"p6_currState,p6_minEndTime,p6_maxEndTime,p6_elapsedTime," +
"p7_currState,p7_minEndTime,p7_maxEndTime,p7_elapsedTime," +
"p8_currState,p8_minEndTime,p8_maxEndTime,p8_elapsedTime" + "\n")
self.spatLogfile.write(csvHeader)
def initialize_srmLogfile(self):
srmLogfileName = self.loggingDirectory + self.baseName + "_" + "srmLog_" + self.initializationTimestamp + ".csv"
self.srmLogfile = open(srmLogfileName, 'w', buffering=1)
csvHeader = ("log_timestamp_verbose,log_timestamp_posix,timestamp_verbose" + ","
+ "timestamp_posix" + ","
+ "minuteOfYear" + ","
+ "msOfMinute" + ","
+ "msgCount" + ","
+ "regionalID" + ","
+ "intersectionID" + ","
+ "priorityRequestType" + ","
+ "basicVehicleRole" + ","
+ "laneID" + ","
+ "eTA_Minute" + ","
+ "eTA_Second" + ","
+ "eTA_Duration" + ","
+ "vehicleID" + ","
+ "latitude" + ","
+ "longitude" + ","
+ "elevation" + ","
+ "heading" + ","
+ "speed" + ","
+ "vehicleType"
+ "\n")
self.srmLogfile.write(csvHeader)
def initialize_ssmLogfile(self):
ssmLogfileName = self.loggingDirectory + self.baseName + "_" + "ssmLog_" + self.initializationTimestamp + ".csv"
self.ssmLogfile = open(ssmLogfileName, 'w', buffering=1)
csvHeader = ("log_timestamp_verbose" + ","
+ "log_timestamp_posix" + ","
+ "timestamp_verbose" + ","
+ "timestamp_posix" + ","
+ "minuteOfYear" + ","
+ "msOfMinute" + ","
+ "sequenceNumber" + ","
+ "updateCount" + ","
+ "regionalID" + ","
+ "noOfRequest" + ","
+ "intersectionID" +
"," + "r1_vehicleID,r1_msgCount,r1_basicVehicleRole,r1_inBoundLaneID,r1_ETA_Minute,r1_ETA_Second,r1_ETA_Duration,r1_priorityRequestStatus" +
"," + "r2_vehicleID,r2_msgCount,r2_basicVehicleRole,r2_inBoundLaneID,r2_ETA_Minute,r2_ETA_Second,r2_ETA_Duration,r2_priorityRequestStatus" +
"," + "r3_vehicleID,r3_msgCount,r3_basicVehicleRole,r3_inBoundLaneID,r3_ETA_Minute,r3_ETA_Second,r3_ETA_Duration,r3_priorityRequestStatus" +
"," + "r4_vehicleID,r4_msgCount,r4_basicVehicleRole,r4_inBoundLaneID,r4_ETA_Minute,r4_ETA_Second,r4_ETA_Duration,r4_priorityRequestStatus" +
"," + "r5_vehicleID,r5_msgCount,r5_basicVehicleRole,r5_inBoundLaneID,r5_ETA_Minute,r5_ETA_Second,r5_ETA_Duration,r5_priorityRequestStatus\n")
self.ssmLogfile.write(csvHeader)
def write_msgCount(self, msgCounts:json):
csvRow = self.msgCounts_json_to_csv(msgCounts)
self.msgCountsLogfile.write(csvRow)
def write_bsm(self, bsmJson:json, senderPort:int):
csvRow = self.bsm_json_to_csv(bsmJson)
if ((self.environment == "vehicle") and (senderPort == self.hostBsmDecoderPort)):
self.hostBsmLogfile.write(csvRow)
else:
self.remoteBsmLogfile.write(csvRow)
def write_spat(self, spatJson:json):
csvRow = self.spat_json_to_csv(spatJson)
self.spatLogfile.write(csvRow)
def write_srm(self, srmJson:json):
csvRow = self.srm_json_to_csv(srmJson)
self.srmLogfile.write(csvRow)
def write_ssm(self, ssmJson:json):
csvRow = self.ssm_json_to_csv(ssmJson)
self.ssmLogfile.write(csvRow)
def msgCounts_json_to_csv(self, jsonData:json):
log_timestamp_posix = str(time.time())
log_timestamp_verbose = str(datetime.datetime.now())
timestamp_posix = str(jsonData["MsgInformation"]["Timestamp_posix"])
timestamp_verbose = str(jsonData["MsgInformation"]["Timestamp_verbose"])
timeInterval = str(jsonData["MsgInformation"]["TimeInterval"])
msgSource = str(jsonData["MsgInformation"]["MsgSource"])
msgType = str(jsonData["MsgInformation"]["MsgCountType"])
msgCount = str(jsonData["MsgInformation"]["MsgCount"])
csv = (log_timestamp_verbose + "," +
log_timestamp_posix + "," +
timestamp_verbose + "," +
timestamp_posix + "," +
timeInterval + "," +
msgSource + "," +
msgType + "," +
msgCount)
if self.environment == "roadside":
msgServed = str(jsonData["MsgInformation"]["MsgServed"])
msgRejected = str(jsonData["MsgInformation"]["MsgRejected"])
csv = csv + "," + msgServed + "," + msgRejected
csv = csv + "\n"
return csv
def bsm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
temporaryId = str(jsonData["BasicVehicle"]["temporaryID"])
secMark = str(jsonData["BasicVehicle"]["secMark_Second"])
latitude = str(jsonData["BasicVehicle"]["position"]["latitude_DecimalDegree"])
longitude = str(jsonData["BasicVehicle"]["position"]["longitude_DecimalDegree"])
elevation = str(jsonData["BasicVehicle"]["position"]["elevation_Meter"])
speed = str(jsonData["BasicVehicle"]["speed_MeterPerSecond"])
heading = str(jsonData["BasicVehicle"]["heading_Degree"])
vehType = str(jsonData["BasicVehicle"]["type"])
length = str(jsonData["BasicVehicle"]["size"]["length_cm"])
width = str(jsonData["BasicVehicle"]["size"]["width_cm"])
if self.environment == "roadside":
onMap = str(jsonData["OnmapVehicle"]["onMap"])
approachId = str(jsonData["OnmapVehicle"]["approachId"])
laneId = str(jsonData["OnmapVehicle"]["laneId"])
signalGroup = str(jsonData["OnmapVehicle"]["signalGroup"])
distanceToStopbar = str(jsonData["OnmapVehicle"]["distanceToStopbar"])
locationOnMap = str(jsonData["OnmapVehicle"]["locationOnMap"])
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ temporaryId + ","
+ secMark + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ speed + ","
+ heading + ","
+ vehType + ","
+ length + ","
+ width + ","
+ onMap + ","
+ locationOnMap + ","
+ approachId + ","
+ laneId + ","
+ signalGroup + ","
+ distanceToStopbar + "\n")
elif self.environment == "vehicle":
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ temporaryId + ","
+ secMark + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ speed + ","
+ heading + ","
+ vehType + ","
+ length + ","
+ width + "\n")
return csv
def spat_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
regionalId = str(jsonData["Spat"]["IntersectionState"]["regionalID"])
intersectionId = str(jsonData["Spat"]["IntersectionState"]["intersectionID"])
msgCnt = str(jsonData["Spat"]["msgCnt"])
moy = str(jsonData["Spat"]["minuteOfYear"])
msom = str(jsonData["Spat"]["msOfMinute"])
v1_currState = str(jsonData["Spat"]["phaseState"][0]["currState"])
v1_minEndTime = str(jsonData["Spat"]["phaseState"][0]["minEndTime"])
v1_maxEndTime = str(jsonData["Spat"]["phaseState"][0]["maxEndTime"])
v1_elapsedTime = str(jsonData["Spat"]["phaseState"][0]["elapsedTime"])
v2_currState = str(jsonData["Spat"]["phaseState"][1]["currState"])
v2_minEndTime = str(jsonData["Spat"]["phaseState"][1]["minEndTime"])
v2_maxEndTime = str(jsonData["Spat"]["phaseState"][1]["maxEndTime"])
v2_elapsedTime = str(jsonData["Spat"]["phaseState"][1]["elapsedTime"])
v3_currState = str(jsonData["Spat"]["phaseState"][2]["currState"])
v3_minEndTime = str(jsonData["Spat"]["phaseState"][2]["minEndTime"])
v3_maxEndTime = str(jsonData["Spat"]["phaseState"][2]["maxEndTime"])
v3_elapsedTime = str(jsonData["Spat"]["phaseState"][2]["elapsedTime"])
v4_currState = str(jsonData["Spat"]["phaseState"][3]["currState"])
v4_minEndTime = str(jsonData["Spat"]["phaseState"][3]["minEndTime"])
v4_maxEndTime = str(jsonData["Spat"]["phaseState"][3]["maxEndTime"])
v4_elapsedTime = str(jsonData["Spat"]["phaseState"][3]["elapsedTime"])
v5_currState = str(jsonData["Spat"]["phaseState"][4]["currState"])
v5_minEndTime = str(jsonData["Spat"]["phaseState"][4]["minEndTime"])
v5_maxEndTime = str(jsonData["Spat"]["phaseState"][4]["maxEndTime"])
v5_elapsedTime = str(jsonData["Spat"]["phaseState"][4]["elapsedTime"])
v6_currState = str(jsonData["Spat"]["phaseState"][5]["currState"])
v6_minEndTime = str(jsonData["Spat"]["phaseState"][5]["minEndTime"])
v6_maxEndTime = str(jsonData["Spat"]["phaseState"][5]["maxEndTime"])
v6_elapsedTime = str(jsonData["Spat"]["phaseState"][5]["elapsedTime"])
v7_currState = str(jsonData["Spat"]["phaseState"][6]["currState"])
v7_minEndTime = str(jsonData["Spat"]["phaseState"][6]["minEndTime"])
v7_maxEndTime = str(jsonData["Spat"]["phaseState"][6]["maxEndTime"])
v7_elapsedTime = str(jsonData["Spat"]["phaseState"][6]["elapsedTime"])
v8_currState = str(jsonData["Spat"]["phaseState"][7]["currState"])
v8_minEndTime = str(jsonData["Spat"]["phaseState"][7]["minEndTime"])
v8_maxEndTime = str(jsonData["Spat"]["phaseState"][7]["maxEndTime"])
v8_elapsedTime = str(jsonData["Spat"]["phaseState"][7]["elapsedTime"])
p1_currState = str(jsonData["Spat"]["pedPhaseState"][0]["currState"])
p1_minEndTime = str(jsonData["Spat"]["pedPhaseState"][0]["minEndTime"])
p1_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][0]["maxEndTime"])
p1_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][0]["elapsedTime"])
p2_currState = str(jsonData["Spat"]["pedPhaseState"][1]["currState"])
p2_minEndTime = str(jsonData["Spat"]["pedPhaseState"][1]["minEndTime"])
p2_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][1]["maxEndTime"])
p2_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][1]["elapsedTime"])
p3_currState = str(jsonData["Spat"]["pedPhaseState"][2]["currState"])
p3_minEndTime = str(jsonData["Spat"]["pedPhaseState"][2]["minEndTime"])
p3_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][2]["maxEndTime"])
p3_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][2]["elapsedTime"])
p4_currState = str(jsonData["Spat"]["pedPhaseState"][3]["currState"])
p4_minEndTime = str(jsonData["Spat"]["pedPhaseState"][3]["minEndTime"])
p4_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][3]["maxEndTime"])
p4_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][3]["elapsedTime"])
p5_currState = str(jsonData["Spat"]["pedPhaseState"][4]["currState"])
p5_minEndTime = str(jsonData["Spat"]["pedPhaseState"][4]["minEndTime"])
p5_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][4]["maxEndTime"])
p5_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][4]["elapsedTime"])
p6_currState = str(jsonData["Spat"]["pedPhaseState"][5]["currState"])
p6_minEndTime = str(jsonData["Spat"]["pedPhaseState"][5]["minEndTime"])
p6_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][5]["maxEndTime"])
p6_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][5]["elapsedTime"])
p7_currState = str(jsonData["Spat"]["pedPhaseState"][6]["currState"])
p7_minEndTime = str(jsonData["Spat"]["pedPhaseState"][6]["minEndTime"])
p7_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][6]["maxEndTime"])
p7_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][6]["elapsedTime"])
p8_currState = str(jsonData["Spat"]["pedPhaseState"][7]["currState"])
p8_minEndTime = str(jsonData["Spat"]["pedPhaseState"][7]["minEndTime"])
p8_maxEndTime = str(jsonData["Spat"]["pedPhaseState"][7]["maxEndTime"])
p8_elapsedTime = str(jsonData["Spat"]["pedPhaseState"][7]["elapsedTime"])
csv = (log_timestamp_verbose + "," + log_timestamp_posix + "," + timestamp_verbose + "," + timestamp_posix + "," +
regionalId + "," + intersectionId + "," + msgCnt + "," + moy + "," + msom + "," +
v1_currState + "," + v1_minEndTime + "," + v1_maxEndTime + "," + v1_elapsedTime + "," +
v2_currState + "," + v2_minEndTime + "," + v2_maxEndTime + "," + v2_elapsedTime + "," +
v3_currState + "," + v3_minEndTime + "," + v3_maxEndTime + "," + v3_elapsedTime + "," +
v4_currState + "," + v4_minEndTime + "," + v4_maxEndTime + "," + v4_elapsedTime + "," +
v5_currState + "," + v5_minEndTime + "," + v5_maxEndTime + "," + v5_elapsedTime + "," +
v6_currState + "," + v6_minEndTime + "," + v6_maxEndTime + "," + v6_elapsedTime + "," +
v7_currState + "," + v7_minEndTime + "," + v7_maxEndTime + "," + v7_elapsedTime + "," +
v8_currState + "," + v8_minEndTime + "," + v8_maxEndTime + "," + v8_elapsedTime + "," +
p1_currState + "," + p1_minEndTime + "," + p1_maxEndTime + "," + p1_elapsedTime + "," +
p2_currState + "," + p2_minEndTime + "," + p2_maxEndTime + "," + p2_elapsedTime + "," +
p3_currState + "," + p3_minEndTime + "," + p3_maxEndTime + "," + p3_elapsedTime + "," +
p4_currState + "," + p4_minEndTime + "," + p4_maxEndTime + "," + p4_elapsedTime + "," +
p5_currState + "," + p5_minEndTime + "," + p5_maxEndTime + "," + p5_elapsedTime + "," +
p6_currState + "," + p6_minEndTime + "," + p6_maxEndTime + "," + p6_elapsedTime + "," +
p7_currState + "," + p7_minEndTime + "," + p7_maxEndTime + "," + p7_elapsedTime + "," +
p8_currState + "," + p8_minEndTime + "," + p8_maxEndTime + "," + p8_elapsedTime + "\n")
return csv
def srm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
minuteOfYear = str(jsonData["SignalRequest"]["minuteOfYear"])
msOfMinute = str(jsonData["SignalRequest"]["msOfMinute"])
msgCount = str(jsonData["SignalRequest"]["msgCount"])
regionalID = str(jsonData["SignalRequest"]["regionalID"])
intersectionID = str(jsonData["SignalRequest"]["intersectionID"])
priorityRequestType = str(jsonData["SignalRequest"]["priorityRequestType"])
basicVehicleRole = str(jsonData["SignalRequest"]["basicVehicleRole"])
laneID = str(jsonData["SignalRequest"]["inBoundLane"]["LaneID"])
eTA_Minute = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Minute"])
eTA_Second = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Second"])
eTA_Duration = str(jsonData["SignalRequest"]["expectedTimeOfArrival"]["ETA_Duration"])
vehicleID = str(jsonData["SignalRequest"]["vehicleID"])
latitude = str(jsonData["SignalRequest"]["position"]["latitude_DecimalDegree"])
longitude = str(jsonData["SignalRequest"]["position"]["longitude_DecimalDegree"])
elevation = str(jsonData["SignalRequest"]["position"]["elevation_Meter"])
heading = str(jsonData["SignalRequest"]["heading_Degree"])
speed = str(jsonData["SignalRequest"]["speed_MeterPerSecond"])
vehicleType = str(jsonData["SignalRequest"]["vehicleType"])
csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ minuteOfYear + ","
+ msOfMinute + ","
+ msgCount + ","
+ regionalID + ","
+ intersectionID + ","
+ priorityRequestType + ","
+ basicVehicleRole + ","
+ laneID + ","
+ eTA_Minute + ","
+ eTA_Second + ","
+ eTA_Duration + ","
+ vehicleID + ","
+ latitude + ","
+ longitude + ","
+ elevation + ","
+ heading + ","
+ speed + ","
+ vehicleType
+ "\n")
return csv
def ssm_json_to_csv(self, jsonData:json):
log_timestamp_verbose = str(datetime.datetime.now())
log_timestamp_posix = str(time.time())
timestamp_verbose = str(jsonData["Timestamp_verbose"])
timestamp_posix = str(jsonData["Timestamp_posix"])
noOfRequest = (jsonData["noOfRequest"])
minuteOfYear = str(jsonData["SignalStatus"]["minuteOfYear"])
msOfMinute = str(jsonData["SignalStatus"]["msOfMinute"])
sequenceNumber = str(jsonData["SignalStatus"]["sequenceNumber"])
updateCount = str(jsonData["SignalStatus"]["updateCount"])
regionalID = str(jsonData["SignalStatus"]["regionalID"])
intersectionID = str(jsonData["SignalStatus"]["intersectionID"])
static_csv = (log_timestamp_verbose + ","
+ log_timestamp_posix + ","
+ timestamp_verbose + ","
+ timestamp_posix + ","
+ minuteOfYear + ","
+ msOfMinute + ","
+ sequenceNumber + ","
+ updateCount + ","
+ regionalID + ","
+ str(noOfRequest) + ","
+ intersectionID)
dynamic_csv = ""
for request in range(0,noOfRequest):
vehicleID = str(jsonData["SignalStatus"]["requestorInfo"][request]["vehicleID"])
msgCount = str(jsonData["SignalStatus"]["requestorInfo"][request]["msgCount"])
basicVehicleRole = str(jsonData["SignalStatus"]["requestorInfo"][request]["basicVehicleRole"])
inBoundLaneID = str(jsonData["SignalStatus"]["requestorInfo"][request]["inBoundLaneID"])
ETA_Minute = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Minute"])
ETA_Second = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Second"])
ETA_Duration = str(jsonData["SignalStatus"]["requestorInfo"][request]["ETA_Duration"])
priorityRequestStatus = str(jsonData["SignalStatus"]["requestorInfo"][request]["priorityRequestStatus"])
request_csv = ("," + vehicleID +
"," + msgCount +
"," + basicVehicleRole +
"," + inBoundLaneID +
"," + ETA_Minute +
"," + ETA_Second +
"," + ETA_Duration +
"," + priorityRequestStatus )
dynamic_csv = dynamic_csv + request_csv
csv = static_csv + dynamic_csv + "\n"
return csv
def decode_and_store_data(self, data:bytes, senderPort:int):
try:
receivedMsg = json.loads(data.decode())
if receivedMsg["MsgType"] == "BSM":
self.write_bsm(receivedMsg,senderPort)
elif receivedMsg["MsgType"] == "SPaT":
self.write_spat(receivedMsg)
elif receivedMsg["MsgType"] == "SRM":
self.write_srm(receivedMsg)
elif receivedMsg["MsgType"] == "SSM":
self.write_ssm(receivedMsg)
elif receivedMsg["MsgType"] == "MsgCount":
self.write_msgCount(receivedMsg)
except:
print("Failed decoding of received message at: " + str(time.time()))
def close_logfiles(self):
if not self.msgCountsLogfile.closed:
self.msgCountsLogfile.close()
if not self.remoteBsmLogfile.closed:
self.remoteBsmLogfile.close()
if not self.spatLogfile.closed:
self.spatLogfile.close()
if not self.srmLogfile.closed:
self.srmLogfile.close()
if not self.ssmLogfile.closed:
self.ssmLogfile.close()
if self.environment == "vehicle":
if not self.hostBsmLogfile.closed:
self.hostBsmLogfile.close()
self.archive_current_directory()
def archive_current_directory(self):
shutil.move(self.loggingDirectory, (self.path + "/archive/"))
def archive_leftover_directories(self):
directories = list(os.walk(self.path))[0][1]
directories.remove("archive")
if len(directories) > 0:
for directory in directories:
shutil.move((self.path + "/" + directory), (self.path + "/archive/"))
directories = list(os.walk(self.path))[0][1]
directories.remove("archive")
if __name__ == "__main__":
pass
| 0.116337 | 0.107204 |
SC ={ 'DRV_ERROR_CODES': 20001,
'DRV_SUCCESS': 20002,
'DRV_VXDNOTINSTALLED' : 20003,
'DRV_ERROR_SCAN' : 20004,
'DRV_ERROR_CHECK_SUM' : 20005,
'DRV_ERROR_FILELOAD' : 20006,
'DRV_UNKNOWN_FUNCTION' : 20007,
'DRV_ERROR_VXD_INIT' : 20008,
'DRV_ERROR_ADDRESS' : 20009,
'DRV_ERROR_PAGELOCK' : 20010,
'DRV_ERROR_PAGEUNLOCK' : 20011,
'DRV_ERROR_BOARDTEST' : 20012,
'DRV_ERROR_ACK' : 20013,
'DRV_ERROR_UP_FIFO' : 20014,
'DRV_ERROR_PATTERN' : 20015,
'DRV_ACQUISITION_ERRORS' : 20017,
'DRV_ACQ_BUFFER' : 20018,
'DRV_ACQ_DOWNFIFO_FULL' : 20019,
'DRV_PROC_UNKONWN_INSTRUCTION' : 20020,
'DRV_ILLEGAL_OP_CODE' : 20021,
'DRV_KINETIC_TIME_NOT_MET' : 20022,
'DRV_ACCUM_TIME_NOT_MET' : 20023,
'DRV_NO_NEW_DATA' : 20024,
'DRV_PCI_DMA_FAIL' : 20025,
'DRV_SPOOLERROR' : 20026,
'DRV_SPOOLSETUPERROR' : 20027,
'DRV_FILESIZELIMITERROR' : 20028,
'DRV_ERROR_FILESAVE' : 20029,
'DRV_TEMPERATURE_CODES' : 20033,
'DRV_TEMPERATURE_OFF' : 20034,
'DRV_TEMPERATURE_NOT_STABILIZED' : 20035,
'DRV_TEMPERATURE_STABILIZED' : 20036,
'DRV_TEMPERATURE_NOT_REACHED' : 20037,
'DRV_TEMPERATURE_OUT_RANGE' : 20038,
'DRV_TEMPERATURE_NOT_SUPPORTED' : 20039,
'DRV_TEMPERATURE_DRIFT' : 20040,
'DRV_TEMP_CODES' : 20033,
'DRV_TEMP_OFF' : 20034,
'DRV_TEMP_NOT_STABILIZED' : 20035,
'DRV_TEMP_STABILIZED' : 20036,
'DRV_TEMP_NOT_REACHED' : 20037,
'DRV_TEMP_OUT_RANGE' : 20038,
'DRV_TEMP_NOT_SUPPORTED' : 20039,
'DRV_TEMP_DRIFT' : 20040,
'DRV_GENERAL_ERRORS' : 20049,
'DRV_INVALID_AUX' : 20050,
'DRV_COF_NOTLOADED' : 20051,
'DRV_FPGAPROG' : 20052,
'DRV_FLEXERROR' : 20053,
'DRV_GPIBERROR' : 20054,
'DRV_EEPROMVERSIONERROR' : 20055,
'DRV_DATATYPE' : 20064,
'DRV_DRIVER_ERRORS' : 20065,
'DRV_P1INVALID' : 20066,
'DRV_P2INVALID' : 20067,
'DRV_P3INVALID' : 20068,
'DRV_P4INVALID' : 20069,
'DRV_INIERROR' : 20070,
'DRV_COFERROR' : 20071,
'DRV_ACQUIRING' : 20072,
'DRV_IDLE' : 20073,
'DRV_TEMPCYCLE' : 20074,
'DRV_NOT_INITIALIZED' : 20075,
'DRV_P5INVALID' : 20076,
'DRV_P6INVALID' : 20077,
'DRV_INVALID_MODE' : 20078,
'DRV_INVALID_FILTER' : 20079,
'DRV_I2CERRORS' : 20080,
'DRV_I2CDEVNOTFOUND' : 20081,
'DRV_I2CTIMEOUT' : 20082,
'DRV_P7INVALID' : 20083,
'DRV_P8INVALID' : 20084,
'DRV_P9INVALID' : 20085,
'DRV_P10INVALID' : 20086,
'DRV_P11INVALID' : 20087,
'DRV_USBERROR' : 20089,
'DRV_IOCERROR' : 20090,
'DRV_VRMVERSIONERROR' : 20091,
'DRV_GATESTEPERROR' : 20092,
'DRV_USB_INTERRUPT_ENDPOINT_ERROR' : 20093,
'DRV_RANDOM_TRACK_ERROR' : 20094,
'DRV_INVALID_TRIGGER_MODE' : 20095,
'DRV_LOAD_FIRMWARE_ERROR' : 20096,
'DRV_DIVIDE_BY_ZERO_ERROR' : 20097,
'DRV_INVALID_RINGEXPOSURES' : 20098,
'DRV_BINNING_ERROR' : 20099,
'DRV_INVALID_AMPLIFIER' : 20100,
'DRV_INVALID_COUNTCONVERT_MODE' : 20101,
'DRV_ERROR_NOCAMERA' : 20990,
'DRV_NOT_SUPPORTED' : 20991,
'DRV_NOT_AVAILABLE' : 20992,
'DRV_ERROR_MAP' : 20115,
'DRV_ERROR_UNMAP' : 20116,
'DRV_ERROR_MDL' : 20117,
'DRV_ERROR_UNMDL' : 20118,
'DRV_ERROR_BUFFSIZE' : 20119,
'DRV_ERROR_NOHANDLE' : 20121,
'DRV_GATING_NOT_AVAILABLE' : 20130,
'DRV_FPGA_VOLTAGE_ERROR' : 20131,
'DRV_OW_CMD_FAIL' : 20150,
'DRV_OWMEMORY_BAD_ADDR' : 20151,
'DRV_OWCMD_NOT_AVAILABLE' : 20152,
'DRV_OW_NO_SLAVES' : 20153,
'DRV_OW_NOT_INITIALIZED' : 20154,
'DRV_OW_ERROR_SLAVE_NUM' : 20155,
'DRV_MSTIMINGS_ERROR' : 20156,
'DRV_OA_NULL_ERROR' : 20173,
'DRV_OA_PARSE_DTD_ERROR' : 20174,
'DRV_OA_DTD_VALIDATE_ERROR' : 20175,
'DRV_OA_FILE_ACCESS_ERROR' : 20176,
'DRV_OA_FILE_DOES_NOT_EXIST' : 20177,
'DRV_OA_XML_INVALID_OR_NOT_FOUND_ERROR' : 20178,
'DRV_OA_PRESET_FILE_NOT_LOADED' : 20179,
'DRV_OA_USER_FILE_NOT_LOADED' : 20180,
'DRV_OA_PRESET_AND_USER_FILE_NOT_LOADED' : 20181,
'DRV_OA_INVALID_FILE' : 20182,
'DRV_OA_FILE_HAS_BEEN_MODIFIED' : 20183,
'DRV_OA_BUFFER_FULL' : 20184,
'DRV_OA_INVALID_STRING_LENGTH' : 20185,
'DRV_OA_INVALID_CHARS_IN_NAME' : 20186,
'DRV_OA_INVALID_NAMING' : 20187,
'DRV_OA_GET_CAMERA_ERROR' : 20188,
'DRV_OA_MODE_ALREADY_EXISTS' : 20189,
'DRV_OA_STRINGS_NOT_EQUAL' : 20190,
'DRV_OA_NO_USER_DATA' : 20191,
'DRV_OA_VALUE_NOT_SUPPORTED' : 20192,
'DRV_OA_MODE_DOES_NOT_EXIST' : 20193,
'DRV_OA_CAMERA_NOT_SUPPORTED' : 20194,
'DRV_OA_FAILED_TO_GET_MODE' : 20195,
'DRV_PROCESSING_FAILED' : 20211}
# Flipped
_SC = dict((val, key) for key, val in SC.items())
|
labscript_devices/AndorSolis/andor_sdk/status_codes.py
|
SC ={ 'DRV_ERROR_CODES': 20001,
'DRV_SUCCESS': 20002,
'DRV_VXDNOTINSTALLED' : 20003,
'DRV_ERROR_SCAN' : 20004,
'DRV_ERROR_CHECK_SUM' : 20005,
'DRV_ERROR_FILELOAD' : 20006,
'DRV_UNKNOWN_FUNCTION' : 20007,
'DRV_ERROR_VXD_INIT' : 20008,
'DRV_ERROR_ADDRESS' : 20009,
'DRV_ERROR_PAGELOCK' : 20010,
'DRV_ERROR_PAGEUNLOCK' : 20011,
'DRV_ERROR_BOARDTEST' : 20012,
'DRV_ERROR_ACK' : 20013,
'DRV_ERROR_UP_FIFO' : 20014,
'DRV_ERROR_PATTERN' : 20015,
'DRV_ACQUISITION_ERRORS' : 20017,
'DRV_ACQ_BUFFER' : 20018,
'DRV_ACQ_DOWNFIFO_FULL' : 20019,
'DRV_PROC_UNKONWN_INSTRUCTION' : 20020,
'DRV_ILLEGAL_OP_CODE' : 20021,
'DRV_KINETIC_TIME_NOT_MET' : 20022,
'DRV_ACCUM_TIME_NOT_MET' : 20023,
'DRV_NO_NEW_DATA' : 20024,
'DRV_PCI_DMA_FAIL' : 20025,
'DRV_SPOOLERROR' : 20026,
'DRV_SPOOLSETUPERROR' : 20027,
'DRV_FILESIZELIMITERROR' : 20028,
'DRV_ERROR_FILESAVE' : 20029,
'DRV_TEMPERATURE_CODES' : 20033,
'DRV_TEMPERATURE_OFF' : 20034,
'DRV_TEMPERATURE_NOT_STABILIZED' : 20035,
'DRV_TEMPERATURE_STABILIZED' : 20036,
'DRV_TEMPERATURE_NOT_REACHED' : 20037,
'DRV_TEMPERATURE_OUT_RANGE' : 20038,
'DRV_TEMPERATURE_NOT_SUPPORTED' : 20039,
'DRV_TEMPERATURE_DRIFT' : 20040,
'DRV_TEMP_CODES' : 20033,
'DRV_TEMP_OFF' : 20034,
'DRV_TEMP_NOT_STABILIZED' : 20035,
'DRV_TEMP_STABILIZED' : 20036,
'DRV_TEMP_NOT_REACHED' : 20037,
'DRV_TEMP_OUT_RANGE' : 20038,
'DRV_TEMP_NOT_SUPPORTED' : 20039,
'DRV_TEMP_DRIFT' : 20040,
'DRV_GENERAL_ERRORS' : 20049,
'DRV_INVALID_AUX' : 20050,
'DRV_COF_NOTLOADED' : 20051,
'DRV_FPGAPROG' : 20052,
'DRV_FLEXERROR' : 20053,
'DRV_GPIBERROR' : 20054,
'DRV_EEPROMVERSIONERROR' : 20055,
'DRV_DATATYPE' : 20064,
'DRV_DRIVER_ERRORS' : 20065,
'DRV_P1INVALID' : 20066,
'DRV_P2INVALID' : 20067,
'DRV_P3INVALID' : 20068,
'DRV_P4INVALID' : 20069,
'DRV_INIERROR' : 20070,
'DRV_COFERROR' : 20071,
'DRV_ACQUIRING' : 20072,
'DRV_IDLE' : 20073,
'DRV_TEMPCYCLE' : 20074,
'DRV_NOT_INITIALIZED' : 20075,
'DRV_P5INVALID' : 20076,
'DRV_P6INVALID' : 20077,
'DRV_INVALID_MODE' : 20078,
'DRV_INVALID_FILTER' : 20079,
'DRV_I2CERRORS' : 20080,
'DRV_I2CDEVNOTFOUND' : 20081,
'DRV_I2CTIMEOUT' : 20082,
'DRV_P7INVALID' : 20083,
'DRV_P8INVALID' : 20084,
'DRV_P9INVALID' : 20085,
'DRV_P10INVALID' : 20086,
'DRV_P11INVALID' : 20087,
'DRV_USBERROR' : 20089,
'DRV_IOCERROR' : 20090,
'DRV_VRMVERSIONERROR' : 20091,
'DRV_GATESTEPERROR' : 20092,
'DRV_USB_INTERRUPT_ENDPOINT_ERROR' : 20093,
'DRV_RANDOM_TRACK_ERROR' : 20094,
'DRV_INVALID_TRIGGER_MODE' : 20095,
'DRV_LOAD_FIRMWARE_ERROR' : 20096,
'DRV_DIVIDE_BY_ZERO_ERROR' : 20097,
'DRV_INVALID_RINGEXPOSURES' : 20098,
'DRV_BINNING_ERROR' : 20099,
'DRV_INVALID_AMPLIFIER' : 20100,
'DRV_INVALID_COUNTCONVERT_MODE' : 20101,
'DRV_ERROR_NOCAMERA' : 20990,
'DRV_NOT_SUPPORTED' : 20991,
'DRV_NOT_AVAILABLE' : 20992,
'DRV_ERROR_MAP' : 20115,
'DRV_ERROR_UNMAP' : 20116,
'DRV_ERROR_MDL' : 20117,
'DRV_ERROR_UNMDL' : 20118,
'DRV_ERROR_BUFFSIZE' : 20119,
'DRV_ERROR_NOHANDLE' : 20121,
'DRV_GATING_NOT_AVAILABLE' : 20130,
'DRV_FPGA_VOLTAGE_ERROR' : 20131,
'DRV_OW_CMD_FAIL' : 20150,
'DRV_OWMEMORY_BAD_ADDR' : 20151,
'DRV_OWCMD_NOT_AVAILABLE' : 20152,
'DRV_OW_NO_SLAVES' : 20153,
'DRV_OW_NOT_INITIALIZED' : 20154,
'DRV_OW_ERROR_SLAVE_NUM' : 20155,
'DRV_MSTIMINGS_ERROR' : 20156,
'DRV_OA_NULL_ERROR' : 20173,
'DRV_OA_PARSE_DTD_ERROR' : 20174,
'DRV_OA_DTD_VALIDATE_ERROR' : 20175,
'DRV_OA_FILE_ACCESS_ERROR' : 20176,
'DRV_OA_FILE_DOES_NOT_EXIST' : 20177,
'DRV_OA_XML_INVALID_OR_NOT_FOUND_ERROR' : 20178,
'DRV_OA_PRESET_FILE_NOT_LOADED' : 20179,
'DRV_OA_USER_FILE_NOT_LOADED' : 20180,
'DRV_OA_PRESET_AND_USER_FILE_NOT_LOADED' : 20181,
'DRV_OA_INVALID_FILE' : 20182,
'DRV_OA_FILE_HAS_BEEN_MODIFIED' : 20183,
'DRV_OA_BUFFER_FULL' : 20184,
'DRV_OA_INVALID_STRING_LENGTH' : 20185,
'DRV_OA_INVALID_CHARS_IN_NAME' : 20186,
'DRV_OA_INVALID_NAMING' : 20187,
'DRV_OA_GET_CAMERA_ERROR' : 20188,
'DRV_OA_MODE_ALREADY_EXISTS' : 20189,
'DRV_OA_STRINGS_NOT_EQUAL' : 20190,
'DRV_OA_NO_USER_DATA' : 20191,
'DRV_OA_VALUE_NOT_SUPPORTED' : 20192,
'DRV_OA_MODE_DOES_NOT_EXIST' : 20193,
'DRV_OA_CAMERA_NOT_SUPPORTED' : 20194,
'DRV_OA_FAILED_TO_GET_MODE' : 20195,
'DRV_PROCESSING_FAILED' : 20211}
# Flipped
_SC = dict((val, key) for key, val in SC.items())
| 0.159021 | 0.058319 |
import serial
import struct
import time
import sys
from numpy import *
class sutterMP285 :
'Class which allows interaction with the Sutter Manipulator 285'
def __init__(self):
self.verbose = 1. # level of messages
self.timeOut = 30 # timeout in sec
# initialize serial connection to controller
try:
self.ser = serial.Serial(port='COM1',baudrate=9600,bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,timeout=self.timeOut)
self.connected = 1
if self.verbose:
print self.ser
except serial.SerialException:
print 'No connection to Sutter MP-285 could be established!'
sys.exit(1)
# set move velocity to 200
self.setVelocity(200,10)
self.updatePanel() # update controller panel
(stepM,currentV,vScaleF)= self.getStatus()
if currentV == 200:
print 'sutterMP285 ready'
else:
print 'sutterMP285: WARNING Sutter did not respond at startup.'
# destructor
def __del__(self):
self.ser.close()
if self.verbose :
print 'Connection to Sutter MP-285 closed'
def getPosition(self):
# send commend to get position
self.ser.write('c\r')
# read position from controller
xyzb = self.ser.read(13)
# convert bytes into 'signed long' numbers
xyz_um = array(struct.unpack('lll', xyzb[:12]))/self.stepMult
if self.verbose:
print 'sutterMP285 : Stage position '
print 'X: %g um \n Y: %g um\n Z: %g um' % (xyz_um[0],xyz_um[1],xyz_um[2])
return xyz_um
# Moves the three axes to specified location.
def gotoPosition(self,pos):
if len(pos) != 3:
print 'Length of position argument has to be three'
sys.exit(1)
xyzb = struct.pack('lll',int(pos[0]*self.stepMult),int(pos[1]*self.stepMult),int(pos[2]*self.stepMult)) # convert integer values into bytes
startt = time.time() # start timer
self.ser.write('m'+xyzb+'\r') # send position to controller; add the "m" and the CR to create the move command
cr = []
cr = self.ser.read(1) # read carriage return and ignore
endt = time.time() # stop timer
if len(cr)== 0:
print 'Sutter did not finish moving before timeout (%d sec).' % self.timeOut
else:
print 'sutterMP285: Sutter move completed in (%.2f sec)' % (endt-startt)
# this function changes the velocity of the sutter motions
def setVelocity(self,Vel,vScalF=10):
# Change velocity command 'V'xxCR where xx= unsigned short (16bit) int velocity
# set by bits 14 to 0, and bit 15 indicates ustep resolution 0=10, 1=50 uSteps/step
# V is ascii 86
# convert velocity into unsigned short - 2-byte - integeter
velb = struct.pack('H',int(Vel))
# change last bit of 2nd byte to 1 for ustep resolution = 50
if vScalF == 50:
velb2 = double(struct.unpack('B',velb[1])) + 128
velb = velb[0] + struct.pack('B',velb2)
self.ser.write('V'+velb+'\r')
self.ser.read(1)
# Update Panel
# causes the Sutter to display the XYZ info on the front panel
def updatePanel(self):
self.ser.write('n\r') #Sutter replies with a CR
self.ser.read(1) # read and ignore the carriage return
## Set Origin
# sets the origin of the coordinate system to the current position
def setOrigin(self):
self.ser.write('o\r') # Sutter replies with a CR
self.ser.read(1) # read and ignor the carrage return
# Reset controller
def sendReset(self):
self.ser.write('r\r') # Sutter does not reply
# Queries the status of the controller.
def getStatus(self):
if self.verbose :
print 'sutterMP285: get status info'
self.ser.write('s\r') # send status command
rrr = self.ser.read(32) # read return of 32 bytes without carriage return
self.ser.read(1) # read and ignore the carriage return
rrr
statusbytes = struct.unpack(32*'B',rrr)
print statusbytes
# the value of STEP_MUL ("Multiplier yields msteps/nm") is at bytes 25 & 26
self.stepMult = double(statusbytes[25])*256 + double(statusbytes[24])
# the value of "XSPEED" and scale factor is at bytes 29 & 30
if statusbytes[29] > 127:
self.vScaleFactor = 50
else:
self.vScaleFactor = 10
#print double(127 & statusbytes[29])*256
#print double(statusbytes[28]), statusbytes[28]
#print double(statusbytes[29]), statusbytes[29]
self.currentVelocity = double(127 & statusbytes[29])*256+double(statusbytes[28])
#vScaleFactor = struct.unpack('lll', rrr[30:31])
if self.verbose:
print 'step_mul (usteps/um): %g' % self.stepMult
print 'xspeed" [velocity] (usteps/sec): %g' % self.currentVelocity
print 'velocity scale factor (usteps/step): %g' % self.vScaleFactor
#
return (self.stepMult,self.currentVelocity,self.vScaleFactor)
|
sutterMP285.py
|
import serial
import struct
import time
import sys
from numpy import *
class sutterMP285 :
'Class which allows interaction with the Sutter Manipulator 285'
def __init__(self):
self.verbose = 1. # level of messages
self.timeOut = 30 # timeout in sec
# initialize serial connection to controller
try:
self.ser = serial.Serial(port='COM1',baudrate=9600,bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,timeout=self.timeOut)
self.connected = 1
if self.verbose:
print self.ser
except serial.SerialException:
print 'No connection to Sutter MP-285 could be established!'
sys.exit(1)
# set move velocity to 200
self.setVelocity(200,10)
self.updatePanel() # update controller panel
(stepM,currentV,vScaleF)= self.getStatus()
if currentV == 200:
print 'sutterMP285 ready'
else:
print 'sutterMP285: WARNING Sutter did not respond at startup.'
# destructor
def __del__(self):
self.ser.close()
if self.verbose :
print 'Connection to Sutter MP-285 closed'
def getPosition(self):
# send commend to get position
self.ser.write('c\r')
# read position from controller
xyzb = self.ser.read(13)
# convert bytes into 'signed long' numbers
xyz_um = array(struct.unpack('lll', xyzb[:12]))/self.stepMult
if self.verbose:
print 'sutterMP285 : Stage position '
print 'X: %g um \n Y: %g um\n Z: %g um' % (xyz_um[0],xyz_um[1],xyz_um[2])
return xyz_um
# Moves the three axes to specified location.
def gotoPosition(self,pos):
if len(pos) != 3:
print 'Length of position argument has to be three'
sys.exit(1)
xyzb = struct.pack('lll',int(pos[0]*self.stepMult),int(pos[1]*self.stepMult),int(pos[2]*self.stepMult)) # convert integer values into bytes
startt = time.time() # start timer
self.ser.write('m'+xyzb+'\r') # send position to controller; add the "m" and the CR to create the move command
cr = []
cr = self.ser.read(1) # read carriage return and ignore
endt = time.time() # stop timer
if len(cr)== 0:
print 'Sutter did not finish moving before timeout (%d sec).' % self.timeOut
else:
print 'sutterMP285: Sutter move completed in (%.2f sec)' % (endt-startt)
# this function changes the velocity of the sutter motions
def setVelocity(self,Vel,vScalF=10):
# Change velocity command 'V'xxCR where xx= unsigned short (16bit) int velocity
# set by bits 14 to 0, and bit 15 indicates ustep resolution 0=10, 1=50 uSteps/step
# V is ascii 86
# convert velocity into unsigned short - 2-byte - integeter
velb = struct.pack('H',int(Vel))
# change last bit of 2nd byte to 1 for ustep resolution = 50
if vScalF == 50:
velb2 = double(struct.unpack('B',velb[1])) + 128
velb = velb[0] + struct.pack('B',velb2)
self.ser.write('V'+velb+'\r')
self.ser.read(1)
# Update Panel
# causes the Sutter to display the XYZ info on the front panel
def updatePanel(self):
self.ser.write('n\r') #Sutter replies with a CR
self.ser.read(1) # read and ignore the carriage return
## Set Origin
# sets the origin of the coordinate system to the current position
def setOrigin(self):
self.ser.write('o\r') # Sutter replies with a CR
self.ser.read(1) # read and ignor the carrage return
# Reset controller
def sendReset(self):
self.ser.write('r\r') # Sutter does not reply
# Queries the status of the controller.
def getStatus(self):
if self.verbose :
print 'sutterMP285: get status info'
self.ser.write('s\r') # send status command
rrr = self.ser.read(32) # read return of 32 bytes without carriage return
self.ser.read(1) # read and ignore the carriage return
rrr
statusbytes = struct.unpack(32*'B',rrr)
print statusbytes
# the value of STEP_MUL ("Multiplier yields msteps/nm") is at bytes 25 & 26
self.stepMult = double(statusbytes[25])*256 + double(statusbytes[24])
# the value of "XSPEED" and scale factor is at bytes 29 & 30
if statusbytes[29] > 127:
self.vScaleFactor = 50
else:
self.vScaleFactor = 10
#print double(127 & statusbytes[29])*256
#print double(statusbytes[28]), statusbytes[28]
#print double(statusbytes[29]), statusbytes[29]
self.currentVelocity = double(127 & statusbytes[29])*256+double(statusbytes[28])
#vScaleFactor = struct.unpack('lll', rrr[30:31])
if self.verbose:
print 'step_mul (usteps/um): %g' % self.stepMult
print 'xspeed" [velocity] (usteps/sec): %g' % self.currentVelocity
print 'velocity scale factor (usteps/step): %g' % self.vScaleFactor
#
return (self.stepMult,self.currentVelocity,self.vScaleFactor)
| 0.182244 | 0.151749 |
import sys
import copy
import tempfile
import pytest
import torch
import numpy as np
import nibabel as nib
import torchio as tio
from ..utils import TorchioTestCase
class TestImage(TorchioTestCase):
"""Tests for `Image`."""
def test_image_not_found(self):
with self.assertRaises(FileNotFoundError):
tio.ScalarImage('nopath')
@pytest.mark.skipif(sys.platform == 'win32', reason='Path not valid')
def test_wrong_path_value(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage('~&./@#"!?X7=+')
def test_wrong_path_type(self):
with self.assertRaises(TypeError):
tio.ScalarImage(5)
def test_wrong_affine(self):
with self.assertRaises(TypeError):
tio.ScalarImage(5, affine=1)
def test_tensor_flip(self):
sample_input = torch.ones((4, 30, 30, 30))
tio.RandomFlip()(sample_input)
def test_tensor_affine(self):
sample_input = torch.ones((4, 10, 10, 10))
tio.RandomAffine()(sample_input)
def test_wrong_scalar_image_type(self):
data = torch.ones((1, 10, 10, 10))
with self.assertRaises(ValueError):
tio.ScalarImage(tensor=data, type=tio.LABEL)
def test_wrong_label_map_type(self):
data = torch.ones((1, 10, 10, 10))
with self.assertRaises(ValueError):
tio.LabelMap(tensor=data, type=tio.INTENSITY)
def test_no_input(self):
with self.assertRaises(ValueError):
tio.ScalarImage()
def test_bad_key(self):
with self.assertRaises(ValueError):
tio.ScalarImage(path='', data=5)
def test_repr(self):
subject = tio.Subject(
t1=tio.ScalarImage(self.get_image_path('repr_test')),
)
assert 'memory' not in repr(subject['t1'])
subject.load()
assert 'memory' in repr(subject['t1'])
def test_data_tensor(self):
subject = copy.deepcopy(self.sample_subject)
subject.load()
self.assertIs(subject.t1.data, subject.t1.tensor)
def test_bad_affine(self):
with self.assertRaises(ValueError):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 4), affine=np.eye(3))
def test_nans_tensor(self):
tensor = np.random.rand(1, 2, 3, 4)
tensor[0, 0, 0, 0] = np.nan
with self.assertWarns(RuntimeWarning):
image = tio.ScalarImage(tensor=tensor, check_nans=True)
image.set_check_nans(False)
def test_get_center(self):
tensor = torch.rand(1, 3, 3, 3)
image = tio.ScalarImage(tensor=tensor)
ras = image.get_center()
lps = image.get_center(lps=True)
self.assertEqual(ras, (1, 1, 1))
self.assertEqual(lps, (-1, -1, 1))
def test_with_list_of_missing_files(self):
with self.assertRaises(FileNotFoundError):
tio.ScalarImage(path=['nopath', 'error'])
def test_with_a_list_of_paths(self):
shape = (5, 5, 5)
path1 = self.get_image_path('path1', shape=shape)
path2 = self.get_image_path('path2', shape=shape)
image = tio.ScalarImage(path=[path1, path2])
self.assertEqual(image.shape, (2, 5, 5, 5))
self.assertEqual(image[tio.STEM], ['path1', 'path2'])
def test_with_a_list_of_images_with_different_shapes(self):
path1 = self.get_image_path('path1', shape=(5, 5, 5))
path2 = self.get_image_path('path2', shape=(7, 5, 5))
image = tio.ScalarImage(path=[path1, path2])
with self.assertRaises(RuntimeError):
image.load()
def test_with_a_list_of_images_with_different_affines(self):
path1 = self.get_image_path('path1', spacing=(1, 1, 1))
path2 = self.get_image_path('path2', spacing=(1, 2, 1))
image = tio.ScalarImage(path=[path1, path2])
with self.assertWarns(RuntimeWarning):
image.load()
def test_with_a_list_of_2d_paths(self):
shape = (5, 6)
path1 = self.get_image_path('path1', shape=shape, suffix='.nii')
path2 = self.get_image_path('path2', shape=shape, suffix='.img')
path3 = self.get_image_path('path3', shape=shape, suffix='.hdr')
image = tio.ScalarImage(path=[path1, path2, path3])
self.assertEqual(image.shape, (3, 5, 6, 1))
self.assertEqual(image[tio.STEM], ['path1', 'path2', 'path3'])
def test_axis_name_2d(self):
path = self.get_image_path('im2d', shape=(5, 6))
image = tio.ScalarImage(path)
height_idx = image.axis_name_to_index('t')
width_idx = image.axis_name_to_index('l')
self.assertEqual(image.height, image.shape[height_idx])
self.assertEqual(image.width, image.shape[width_idx])
def test_plot(self):
image = self.sample_subject.t1
image.plot(show=False, output_path=self.dir / 'image.png')
def test_data_type_uint16_array(self):
tensor = np.random.rand(1, 3, 3, 3).astype(np.uint16)
image = tio.ScalarImage(tensor=tensor)
self.assertEqual(image.data.dtype, torch.int32)
def test_data_type_uint32_array(self):
tensor = np.random.rand(1, 3, 3, 3).astype(np.uint32)
image = tio.ScalarImage(tensor=tensor)
self.assertEqual(image.data.dtype, torch.int64)
def test_save_image_with_data_type_boolean(self):
tensor = np.random.rand(1, 3, 3, 3).astype(bool)
image = tio.ScalarImage(tensor=tensor)
image.save(self.dir / 'image.nii')
def test_load_uint(self):
affine = np.eye(4)
for dtype in np.uint16, np.uint32:
data = np.ones((3, 3, 3), dtype=dtype)
img = nib.Nifti1Image(data, affine)
with tempfile.NamedTemporaryFile(suffix='.nii', delete=False) as f:
nib.save(img, f.name)
tio.ScalarImage(f.name).load()
def test_pil_3d(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 4)).as_pil()
def test_pil_1(self):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 1)).as_pil()
def test_pil_2(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage(tensor=torch.rand(2, 2, 3, 1)).as_pil()
def test_pil_3(self):
tio.ScalarImage(tensor=torch.rand(3, 2, 3, 1)).as_pil()
def test_set_data(self):
with self.assertWarns(DeprecationWarning):
im = self.sample_subject.t1
im.data = im.data
def test_no_type(self):
with self.assertWarns(UserWarning):
tio.Image(tensor=torch.rand(1, 2, 3, 4))
def test_custom_reader(self):
path = self.dir / 'im.npy'
def numpy_reader(path):
return np.load(path), np.eye(4)
def assert_shape(shape_in, shape_out):
np.save(path, np.random.rand(*shape_in))
image = tio.ScalarImage(path, reader=numpy_reader)
assert image.shape == shape_out
assert_shape((5, 5), (1, 5, 5, 1))
assert_shape((5, 5, 3), (3, 5, 5, 1))
assert_shape((3, 5, 5), (3, 5, 5, 1))
assert_shape((5, 5, 5), (1, 5, 5, 5))
assert_shape((1, 5, 5, 5), (1, 5, 5, 5))
assert_shape((4, 5, 5, 5), (4, 5, 5, 5))
def test_fast_gif(self):
with self.assertWarns(UserWarning):
with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as f:
self.sample_subject.t1.to_gif(0, 0.0001, f.name)
def test_gif_rgb(self):
with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as f:
tio.ScalarImage(tensor=torch.rand(3, 4, 5, 6)).to_gif(0, 1, f.name)
|
tests/data/test_image.py
|
import sys
import copy
import tempfile
import pytest
import torch
import numpy as np
import nibabel as nib
import torchio as tio
from ..utils import TorchioTestCase
class TestImage(TorchioTestCase):
"""Tests for `Image`."""
def test_image_not_found(self):
with self.assertRaises(FileNotFoundError):
tio.ScalarImage('nopath')
@pytest.mark.skipif(sys.platform == 'win32', reason='Path not valid')
def test_wrong_path_value(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage('~&./@#"!?X7=+')
def test_wrong_path_type(self):
with self.assertRaises(TypeError):
tio.ScalarImage(5)
def test_wrong_affine(self):
with self.assertRaises(TypeError):
tio.ScalarImage(5, affine=1)
def test_tensor_flip(self):
sample_input = torch.ones((4, 30, 30, 30))
tio.RandomFlip()(sample_input)
def test_tensor_affine(self):
sample_input = torch.ones((4, 10, 10, 10))
tio.RandomAffine()(sample_input)
def test_wrong_scalar_image_type(self):
data = torch.ones((1, 10, 10, 10))
with self.assertRaises(ValueError):
tio.ScalarImage(tensor=data, type=tio.LABEL)
def test_wrong_label_map_type(self):
data = torch.ones((1, 10, 10, 10))
with self.assertRaises(ValueError):
tio.LabelMap(tensor=data, type=tio.INTENSITY)
def test_no_input(self):
with self.assertRaises(ValueError):
tio.ScalarImage()
def test_bad_key(self):
with self.assertRaises(ValueError):
tio.ScalarImage(path='', data=5)
def test_repr(self):
subject = tio.Subject(
t1=tio.ScalarImage(self.get_image_path('repr_test')),
)
assert 'memory' not in repr(subject['t1'])
subject.load()
assert 'memory' in repr(subject['t1'])
def test_data_tensor(self):
subject = copy.deepcopy(self.sample_subject)
subject.load()
self.assertIs(subject.t1.data, subject.t1.tensor)
def test_bad_affine(self):
with self.assertRaises(ValueError):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 4), affine=np.eye(3))
def test_nans_tensor(self):
tensor = np.random.rand(1, 2, 3, 4)
tensor[0, 0, 0, 0] = np.nan
with self.assertWarns(RuntimeWarning):
image = tio.ScalarImage(tensor=tensor, check_nans=True)
image.set_check_nans(False)
def test_get_center(self):
tensor = torch.rand(1, 3, 3, 3)
image = tio.ScalarImage(tensor=tensor)
ras = image.get_center()
lps = image.get_center(lps=True)
self.assertEqual(ras, (1, 1, 1))
self.assertEqual(lps, (-1, -1, 1))
def test_with_list_of_missing_files(self):
with self.assertRaises(FileNotFoundError):
tio.ScalarImage(path=['nopath', 'error'])
def test_with_a_list_of_paths(self):
shape = (5, 5, 5)
path1 = self.get_image_path('path1', shape=shape)
path2 = self.get_image_path('path2', shape=shape)
image = tio.ScalarImage(path=[path1, path2])
self.assertEqual(image.shape, (2, 5, 5, 5))
self.assertEqual(image[tio.STEM], ['path1', 'path2'])
def test_with_a_list_of_images_with_different_shapes(self):
path1 = self.get_image_path('path1', shape=(5, 5, 5))
path2 = self.get_image_path('path2', shape=(7, 5, 5))
image = tio.ScalarImage(path=[path1, path2])
with self.assertRaises(RuntimeError):
image.load()
def test_with_a_list_of_images_with_different_affines(self):
path1 = self.get_image_path('path1', spacing=(1, 1, 1))
path2 = self.get_image_path('path2', spacing=(1, 2, 1))
image = tio.ScalarImage(path=[path1, path2])
with self.assertWarns(RuntimeWarning):
image.load()
def test_with_a_list_of_2d_paths(self):
shape = (5, 6)
path1 = self.get_image_path('path1', shape=shape, suffix='.nii')
path2 = self.get_image_path('path2', shape=shape, suffix='.img')
path3 = self.get_image_path('path3', shape=shape, suffix='.hdr')
image = tio.ScalarImage(path=[path1, path2, path3])
self.assertEqual(image.shape, (3, 5, 6, 1))
self.assertEqual(image[tio.STEM], ['path1', 'path2', 'path3'])
def test_axis_name_2d(self):
path = self.get_image_path('im2d', shape=(5, 6))
image = tio.ScalarImage(path)
height_idx = image.axis_name_to_index('t')
width_idx = image.axis_name_to_index('l')
self.assertEqual(image.height, image.shape[height_idx])
self.assertEqual(image.width, image.shape[width_idx])
def test_plot(self):
image = self.sample_subject.t1
image.plot(show=False, output_path=self.dir / 'image.png')
def test_data_type_uint16_array(self):
tensor = np.random.rand(1, 3, 3, 3).astype(np.uint16)
image = tio.ScalarImage(tensor=tensor)
self.assertEqual(image.data.dtype, torch.int32)
def test_data_type_uint32_array(self):
tensor = np.random.rand(1, 3, 3, 3).astype(np.uint32)
image = tio.ScalarImage(tensor=tensor)
self.assertEqual(image.data.dtype, torch.int64)
def test_save_image_with_data_type_boolean(self):
tensor = np.random.rand(1, 3, 3, 3).astype(bool)
image = tio.ScalarImage(tensor=tensor)
image.save(self.dir / 'image.nii')
def test_load_uint(self):
affine = np.eye(4)
for dtype in np.uint16, np.uint32:
data = np.ones((3, 3, 3), dtype=dtype)
img = nib.Nifti1Image(data, affine)
with tempfile.NamedTemporaryFile(suffix='.nii', delete=False) as f:
nib.save(img, f.name)
tio.ScalarImage(f.name).load()
def test_pil_3d(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 4)).as_pil()
def test_pil_1(self):
tio.ScalarImage(tensor=torch.rand(1, 2, 3, 1)).as_pil()
def test_pil_2(self):
with self.assertRaises(RuntimeError):
tio.ScalarImage(tensor=torch.rand(2, 2, 3, 1)).as_pil()
def test_pil_3(self):
tio.ScalarImage(tensor=torch.rand(3, 2, 3, 1)).as_pil()
def test_set_data(self):
with self.assertWarns(DeprecationWarning):
im = self.sample_subject.t1
im.data = im.data
def test_no_type(self):
with self.assertWarns(UserWarning):
tio.Image(tensor=torch.rand(1, 2, 3, 4))
def test_custom_reader(self):
path = self.dir / 'im.npy'
def numpy_reader(path):
return np.load(path), np.eye(4)
def assert_shape(shape_in, shape_out):
np.save(path, np.random.rand(*shape_in))
image = tio.ScalarImage(path, reader=numpy_reader)
assert image.shape == shape_out
assert_shape((5, 5), (1, 5, 5, 1))
assert_shape((5, 5, 3), (3, 5, 5, 1))
assert_shape((3, 5, 5), (3, 5, 5, 1))
assert_shape((5, 5, 5), (1, 5, 5, 5))
assert_shape((1, 5, 5, 5), (1, 5, 5, 5))
assert_shape((4, 5, 5, 5), (4, 5, 5, 5))
def test_fast_gif(self):
with self.assertWarns(UserWarning):
with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as f:
self.sample_subject.t1.to_gif(0, 0.0001, f.name)
def test_gif_rgb(self):
with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as f:
tio.ScalarImage(tensor=torch.rand(3, 4, 5, 6)).to_gif(0, 1, f.name)
| 0.615203 | 0.583619 |
from __future__ import absolute_import, division, print_function
import logging
from collections import defaultdict, namedtuple
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.contrib.gp.kernels import Cosine, Matern32, RBF, WhiteNoise
from pyro.contrib.gp.likelihoods import Gaussian
from pyro.contrib.gp.models import (GPLVM, GPRegression, SparseGPRegression,
VariationalGP, VariationalSparseGP)
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.mcmc import MCMC
from pyro.params import param_with_module_name
from tests.common import assert_equal
logging.basicConfig(format='%(levelname)s %(message)s')
logger = logging.getLogger('pyro')
logger.setLevel(logging.INFO)
T = namedtuple("TestGPModel", ["model_class", "X", "y", "kernel", "likelihood"])
X = torch.tensor([[1., 5., 3.], [4., 3., 7.]])
y1D = torch.tensor([2., 1.])
y2D = torch.tensor([[1., 2.], [3., 3.], [1., 4.], [-1., 1.]])
kernel = RBF(input_dim=3, variance=torch.tensor(3.), lengthscale=torch.tensor(2.))
noise = torch.tensor(1e-6)
likelihood = Gaussian(noise)
TEST_CASES = [
T(
GPRegression,
X, y1D, kernel, noise
),
T(
GPRegression,
X, y2D, kernel, noise
),
T(
SparseGPRegression,
X, y1D, kernel, noise
),
T(
SparseGPRegression,
X, y2D, kernel, noise
),
T(
VariationalGP,
X, y1D, kernel, likelihood
),
T(
VariationalGP,
X, y2D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y1D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y2D, kernel, likelihood
),
]
TEST_IDS = [t[0].__name__ + "_y{}D".format(str(t[2].dim()))
for t in TEST_CASES]
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_model(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, None, kernel, X, likelihood)
else:
gp = model_class(X, None, kernel, likelihood)
loc, var = gp.model()
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(var, torch.ones(var.shape[-1]).expand(var.shape))
else:
assert_equal(loc.norm().item(), 0)
assert_equal(var, kernel(X).diag())
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.dim() == y.dim()
assert loc0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == y.shape[:-1]
assert cov0.shape[:-2] == y.shape[:-1]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
assert_equal(loc0, loc1)
n = Xnew.shape[0]
cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(var1.shape)
assert_equal(cov0_diag, var1)
# test trivial forward: Xnew = X
loc, cov = gp(X, full_cov=True)
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
else:
assert_equal(loc, y)
assert_equal(cov.norm().item(), 0)
# test same input forward: Xnew[0,:] = Xnew[1,:] = ...
Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
loc, cov = gp(Xnew, full_cov=True)
loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
assert_equal(loc_diff.norm().item(), 0)
cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
assert_equal(cov_diff.norm().item(), 0)
# test noise kernel forward: kernel = WhiteNoise
gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.))
loc, cov = gp(X, full_cov=True)
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape, no need for test
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is VariationalSparseGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.shape[-1] == Xnew.shape[0]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == torch.Size([])
assert cov0.shape[:-2] == torch.Size([])
assert_equal(loc0, loc1)
assert_equal(cov0.diag(), var1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
@pytest.mark.init(rng_seed=0)
def test_inference(model_class, X, y, kernel, likelihood):
# skip variational GP models because variance/lengthscale highly
# depend on variational parameters
if model_class is VariationalGP or model_class is VariationalSparseGP:
return
elif model_class is GPRegression:
gp = model_class(X, y, RBF(input_dim=3), likelihood)
else: # model_class is SparseGPRegression
gp = model_class(X, y, RBF(input_dim=3), X, likelihood)
# fix inducing points because variance/lengthscale highly depend on it
gp.fix_param("Xu")
generator = dist.MultivariateNormal(torch.zeros(X.shape[0]), kernel(X))
target_y = generator(sample_shape=torch.Size([1000])).detach()
gp.set_data(X, target_y)
gp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
y_cov = gp.kernel(X)
target_y_cov = kernel(X)
assert_equal(y_cov, target_y_cov, prec=0.1)
@pytest.mark.init(rng_seed=0)
def test_inference_sgpr():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
sgpr = SparseGPRegression(X, y, kernel, Xu)
sgpr.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = sgpr(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.init(rng_seed=0)
def test_inference_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian())
vsgp.optimize(optim.Adam({"lr": 0.03}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.06)
@pytest.mark.init(rng_seed=0)
def test_inference_whiten_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
vsgp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape (default=torch.Size([]))
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_whiten(model_class, X, y, kernel, likelihood):
# regression models don't use whiten
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, whiten=True)
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, whiten=True)
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_hmc(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
kernel.set_prior("variance", dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
kernel.set_prior("lengthscale", dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
hmc_kernel = HMC(gp.model, step_size=1)
mcmc_run = MCMC(hmc_kernel, num_samples=10)
post_trace = defaultdict(list)
for trace, _ in mcmc_run._traces():
variance_name = param_with_module_name(kernel.name, "variance")
post_trace["variance"].append(trace.nodes[variance_name]["value"])
lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
post_trace["lengthscale"].append(trace.nodes[lengthscale_name]["value"])
if model_class is VariationalGP:
f_name = param_with_module_name(gp.name, "f")
post_trace["f"].append(trace.nodes[f_name]["value"])
if model_class is VariationalSparseGP:
u_name = param_with_module_name(gp.name, "u")
post_trace["u"].append(trace.nodes[u_name]["value"])
for param in post_trace:
param_mean = torch.mean(torch.stack(post_trace[param]), 0)
logger.info("Posterior mean - {}".format(param))
logger.info(param_mean)
def test_inference_deepGP():
gp1 = GPRegression(X, None, kernel, name="GPR1")
Z, _ = gp1.model()
gp2 = VariationalSparseGP(Z, y2D, Matern32(input_dim=3), Z.clone(),
likelihood, name="GPR2")
def model():
Z, _ = gp1.model()
gp2.set_data(Z, y2D)
gp2.model()
def guide():
gp1.guide()
gp2.guide()
svi = SVI(model, guide, optim.Adam({}), Trace_ELBO())
svi.step()
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_gplvm(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
gplvm = GPLVM(gp)
# test inference
gplvm.optimize(num_steps=1)
# test forward
gplvm(Xnew=X)
def _pre_test_mean_function():
def f(x):
return 2 * x + 3 + 5 * torch.sin(7 * x)
X = torch.arange(100)
y = f(X)
Xnew = torch.arange(100, 150)
ynew = f(Xnew)
kernel = Cosine(input_dim=1)
def trend(x):
a = pyro.param("a", torch.tensor(0.))
b = pyro.param("b", torch.tensor(1.))
return a * x + b
return X, y, Xnew, ynew, kernel, trend
def _mape(y_true, y_pred):
return ((y_pred - y_true) / y_true).abs().mean()
def _post_test_mean_function(model, Xnew, y_true):
assert_equal(pyro.param("a").item(), 2, prec=0.02)
assert_equal(pyro.param("b").item(), 3, prec=0.02)
y_pred, _ = model(Xnew)
assert_equal(_mape(y_true, y_pred).item(), 0, prec=0.02)
def test_mean_function_GPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
model = GPRegression(X, y, kernel, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_DTC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="DTC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_FITC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="FITC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn,
whiten=True)
model.optimize(optim.Adam({"lr": 0.1}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VSGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
model = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.02}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VSGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
model = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn,
whiten=True)
model.optimize(optim.Adam({"lr": 0.1}))
_post_test_mean_function(model, Xnew, ynew)
|
tests/contrib/gp/test_models.py
|
from __future__ import absolute_import, division, print_function
import logging
from collections import defaultdict, namedtuple
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.contrib.gp.kernels import Cosine, Matern32, RBF, WhiteNoise
from pyro.contrib.gp.likelihoods import Gaussian
from pyro.contrib.gp.models import (GPLVM, GPRegression, SparseGPRegression,
VariationalGP, VariationalSparseGP)
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.mcmc import MCMC
from pyro.params import param_with_module_name
from tests.common import assert_equal
logging.basicConfig(format='%(levelname)s %(message)s')
logger = logging.getLogger('pyro')
logger.setLevel(logging.INFO)
T = namedtuple("TestGPModel", ["model_class", "X", "y", "kernel", "likelihood"])
X = torch.tensor([[1., 5., 3.], [4., 3., 7.]])
y1D = torch.tensor([2., 1.])
y2D = torch.tensor([[1., 2.], [3., 3.], [1., 4.], [-1., 1.]])
kernel = RBF(input_dim=3, variance=torch.tensor(3.), lengthscale=torch.tensor(2.))
noise = torch.tensor(1e-6)
likelihood = Gaussian(noise)
TEST_CASES = [
T(
GPRegression,
X, y1D, kernel, noise
),
T(
GPRegression,
X, y2D, kernel, noise
),
T(
SparseGPRegression,
X, y1D, kernel, noise
),
T(
SparseGPRegression,
X, y2D, kernel, noise
),
T(
VariationalGP,
X, y1D, kernel, likelihood
),
T(
VariationalGP,
X, y2D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y1D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y2D, kernel, likelihood
),
]
TEST_IDS = [t[0].__name__ + "_y{}D".format(str(t[2].dim()))
for t in TEST_CASES]
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_model(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, None, kernel, X, likelihood)
else:
gp = model_class(X, None, kernel, likelihood)
loc, var = gp.model()
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(var, torch.ones(var.shape[-1]).expand(var.shape))
else:
assert_equal(loc.norm().item(), 0)
assert_equal(var, kernel(X).diag())
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.dim() == y.dim()
assert loc0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == y.shape[:-1]
assert cov0.shape[:-2] == y.shape[:-1]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
assert_equal(loc0, loc1)
n = Xnew.shape[0]
cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(var1.shape)
assert_equal(cov0_diag, var1)
# test trivial forward: Xnew = X
loc, cov = gp(X, full_cov=True)
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
else:
assert_equal(loc, y)
assert_equal(cov.norm().item(), 0)
# test same input forward: Xnew[0,:] = Xnew[1,:] = ...
Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
loc, cov = gp(Xnew, full_cov=True)
loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
assert_equal(loc_diff.norm().item(), 0)
cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
assert_equal(cov_diff.norm().item(), 0)
# test noise kernel forward: kernel = WhiteNoise
gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.))
loc, cov = gp(X, full_cov=True)
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape, no need for test
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is VariationalSparseGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.shape[-1] == Xnew.shape[0]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == torch.Size([])
assert cov0.shape[:-2] == torch.Size([])
assert_equal(loc0, loc1)
assert_equal(cov0.diag(), var1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
@pytest.mark.init(rng_seed=0)
def test_inference(model_class, X, y, kernel, likelihood):
# skip variational GP models because variance/lengthscale highly
# depend on variational parameters
if model_class is VariationalGP or model_class is VariationalSparseGP:
return
elif model_class is GPRegression:
gp = model_class(X, y, RBF(input_dim=3), likelihood)
else: # model_class is SparseGPRegression
gp = model_class(X, y, RBF(input_dim=3), X, likelihood)
# fix inducing points because variance/lengthscale highly depend on it
gp.fix_param("Xu")
generator = dist.MultivariateNormal(torch.zeros(X.shape[0]), kernel(X))
target_y = generator(sample_shape=torch.Size([1000])).detach()
gp.set_data(X, target_y)
gp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
y_cov = gp.kernel(X)
target_y_cov = kernel(X)
assert_equal(y_cov, target_y_cov, prec=0.1)
@pytest.mark.init(rng_seed=0)
def test_inference_sgpr():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
sgpr = SparseGPRegression(X, y, kernel, Xu)
sgpr.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = sgpr(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.init(rng_seed=0)
def test_inference_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian())
vsgp.optimize(optim.Adam({"lr": 0.03}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.06)
@pytest.mark.init(rng_seed=0)
def test_inference_whiten_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
vsgp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape (default=torch.Size([]))
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_whiten(model_class, X, y, kernel, likelihood):
# regression models don't use whiten
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, whiten=True)
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, whiten=True)
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_hmc(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
kernel.set_prior("variance", dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
kernel.set_prior("lengthscale", dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
hmc_kernel = HMC(gp.model, step_size=1)
mcmc_run = MCMC(hmc_kernel, num_samples=10)
post_trace = defaultdict(list)
for trace, _ in mcmc_run._traces():
variance_name = param_with_module_name(kernel.name, "variance")
post_trace["variance"].append(trace.nodes[variance_name]["value"])
lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
post_trace["lengthscale"].append(trace.nodes[lengthscale_name]["value"])
if model_class is VariationalGP:
f_name = param_with_module_name(gp.name, "f")
post_trace["f"].append(trace.nodes[f_name]["value"])
if model_class is VariationalSparseGP:
u_name = param_with_module_name(gp.name, "u")
post_trace["u"].append(trace.nodes[u_name]["value"])
for param in post_trace:
param_mean = torch.mean(torch.stack(post_trace[param]), 0)
logger.info("Posterior mean - {}".format(param))
logger.info(param_mean)
def test_inference_deepGP():
gp1 = GPRegression(X, None, kernel, name="GPR1")
Z, _ = gp1.model()
gp2 = VariationalSparseGP(Z, y2D, Matern32(input_dim=3), Z.clone(),
likelihood, name="GPR2")
def model():
Z, _ = gp1.model()
gp2.set_data(Z, y2D)
gp2.model()
def guide():
gp1.guide()
gp2.guide()
svi = SVI(model, guide, optim.Adam({}), Trace_ELBO())
svi.step()
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_gplvm(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
gplvm = GPLVM(gp)
# test inference
gplvm.optimize(num_steps=1)
# test forward
gplvm(Xnew=X)
def _pre_test_mean_function():
def f(x):
return 2 * x + 3 + 5 * torch.sin(7 * x)
X = torch.arange(100)
y = f(X)
Xnew = torch.arange(100, 150)
ynew = f(Xnew)
kernel = Cosine(input_dim=1)
def trend(x):
a = pyro.param("a", torch.tensor(0.))
b = pyro.param("b", torch.tensor(1.))
return a * x + b
return X, y, Xnew, ynew, kernel, trend
def _mape(y_true, y_pred):
return ((y_pred - y_true) / y_true).abs().mean()
def _post_test_mean_function(model, Xnew, y_true):
assert_equal(pyro.param("a").item(), 2, prec=0.02)
assert_equal(pyro.param("b").item(), 3, prec=0.02)
y_pred, _ = model(Xnew)
assert_equal(_mape(y_true, y_pred).item(), 0, prec=0.02)
def test_mean_function_GPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
model = GPRegression(X, y, kernel, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_DTC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="DTC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_FITC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="FITC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn,
whiten=True)
model.optimize(optim.Adam({"lr": 0.1}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VSGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
model = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.02}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VSGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
model = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn,
whiten=True)
model.optimize(optim.Adam({"lr": 0.1}))
_post_test_mean_function(model, Xnew, ynew)
| 0.836921 | 0.504516 |
from numpy import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def adjust_spines(ax, spines):
"""
removing the spines from a matplotlib graphics.
taken from matplotlib gallery anonymous author.
parameters
----------
ax: a matplolib axes object
handler of the object to work with
spines: list of char
location of the spines
"""
for loc, spine in ax.spines.items():
if loc in spines:
pass
# print 'skipped'
# spine.set_position(('outward',10)) # outward by 10 points
# spine.set_smart_bounds(true)
else:
spine.set_color('none')
# don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
rmax = 0.2
colored = False
if colored:
fig = plt.figure(figsize=(15,5))
ax = plt.axes(xlim=(0-rmax,1+rmax), ylim=(0-rmax,1+rmax))
adjust_spines(ax, ['']) #Remove the axis
else:
fig = plt.figure(figsize=(5, 5))
ax = plt.axes(xlim=(0-rmax,1+rmax), ylim=(0-rmax,3+rmax))
adjust_spines(ax, ['']) #Remove the axis
tmax= 300 #If colored is true need to be possible to divide by 3
ndrops = 10
drop_life = 50
rmax = 0.4
# Need to define the position of all drops
# Their opacity at a given time and the related diameter
def init():
# Generate the random position of the drops
fall = np.zeros((ndrops, 4, tmax))
for i in range(ndrops):
fall_t = np.random.randint(tmax)
coor = np.random.rand(2)
k = 0
radius = np.linspace(0.05, rmax, drop_life)
opacity = np.linspace(1, 0, drop_life)
for j in range(fall_t, min(fall_t + drop_life, tmax)):
fall[i,0,j] = opacity[k]
fall[i,1,j] = radius[k]
fall[i,2,j] = coor[0]
fall[i,3,j] = coor[1]
k +=1
return fall
fall = init()
def ink_rain(t):
"""Generate a drawing of diluted inkdrops"""
plt.cla()
adjust_spines(ax, ['']) #Remove the axis
ax.set_xlim(0-rmax, 1+rmax)
ax.set_ylim(0-rmax, 1+rmax)
for i in range(ndrops):
if fall[i,0,t] > 0.1:
circle = plt.Circle((fall[i, 2, t], fall[i, 3, t]),
radius=fall[i, 1,t],
fc="black",
alpha=fall[i, 0,t])
ax.add_patch(circle)
return ax
if __name__ == "__main__":
anim = FuncAnimation(fig, ink_rain, init_func=init,
frames=tmax, interval=20)
anim.save('Rainy_sunday.avi')
#anim.save('spreading_ink.avi')
plt.show()
|
inkdrops.py
|
from numpy import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def adjust_spines(ax, spines):
"""
removing the spines from a matplotlib graphics.
taken from matplotlib gallery anonymous author.
parameters
----------
ax: a matplolib axes object
handler of the object to work with
spines: list of char
location of the spines
"""
for loc, spine in ax.spines.items():
if loc in spines:
pass
# print 'skipped'
# spine.set_position(('outward',10)) # outward by 10 points
# spine.set_smart_bounds(true)
else:
spine.set_color('none')
# don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
rmax = 0.2
colored = False
if colored:
fig = plt.figure(figsize=(15,5))
ax = plt.axes(xlim=(0-rmax,1+rmax), ylim=(0-rmax,1+rmax))
adjust_spines(ax, ['']) #Remove the axis
else:
fig = plt.figure(figsize=(5, 5))
ax = plt.axes(xlim=(0-rmax,1+rmax), ylim=(0-rmax,3+rmax))
adjust_spines(ax, ['']) #Remove the axis
tmax= 300 #If colored is true need to be possible to divide by 3
ndrops = 10
drop_life = 50
rmax = 0.4
# Need to define the position of all drops
# Their opacity at a given time and the related diameter
def init():
# Generate the random position of the drops
fall = np.zeros((ndrops, 4, tmax))
for i in range(ndrops):
fall_t = np.random.randint(tmax)
coor = np.random.rand(2)
k = 0
radius = np.linspace(0.05, rmax, drop_life)
opacity = np.linspace(1, 0, drop_life)
for j in range(fall_t, min(fall_t + drop_life, tmax)):
fall[i,0,j] = opacity[k]
fall[i,1,j] = radius[k]
fall[i,2,j] = coor[0]
fall[i,3,j] = coor[1]
k +=1
return fall
fall = init()
def ink_rain(t):
"""Generate a drawing of diluted inkdrops"""
plt.cla()
adjust_spines(ax, ['']) #Remove the axis
ax.set_xlim(0-rmax, 1+rmax)
ax.set_ylim(0-rmax, 1+rmax)
for i in range(ndrops):
if fall[i,0,t] > 0.1:
circle = plt.Circle((fall[i, 2, t], fall[i, 3, t]),
radius=fall[i, 1,t],
fc="black",
alpha=fall[i, 0,t])
ax.add_patch(circle)
return ax
if __name__ == "__main__":
anim = FuncAnimation(fig, ink_rain, init_func=init,
frames=tmax, interval=20)
anim.save('Rainy_sunday.avi')
#anim.save('spreading_ink.avi')
plt.show()
| 0.388618 | 0.59408 |
from vmaf.config import VmafConfig
dataset_name = 'test_image'
yuv_fmt = 'yuv444p'
ref_videos = [
{'content_id': 0,
'content_name': '100007',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007.yuv'),
'width': 481},
{'content_id': 1,
'content_name': '100039',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039.yuv'),
'width': 481},
{'content_id': 2,
'content_name': '100075',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075.yuv'),
'width': 481},
{'content_id': 4,
'content_name': '100098',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098.yuv'),
'width': 481},
{'content_id': 5,
'content_name': '100099',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099.yuv'),
'width': 481},
{'content_id': 6,
'content_name': '10081',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081.yuv'),
'width': 481},
{'content_id': 7,
'content_name': '101027',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027.yuv'),
'width': 481},
{'content_id': 12,
'content_name': '102062',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062.yuv'),
'width': 481},
{'content_id': 13,
'content_name': '1012576_0m14s_15',
'height': 486,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15.yuv'),
'width': 720},
]
dis_videos = [
{'asset_id': 0,
'content_id': 0,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007.yuv'), 'groundtruth': 0},
{'asset_id': 1,
'content_id': 1,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039.yuv'), 'groundtruth': 0},
{'asset_id': 2,
'content_id': 2,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075.yuv'), 'groundtruth': 0},
{'asset_id': 4,
'content_id': 4,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098.yuv'), 'groundtruth': 0},
{'asset_id': 5,
'content_id': 5,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099.yuv'), 'groundtruth': 0},
{'asset_id': 6,
'content_id': 6,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081.yuv'), 'groundtruth': 0},
{'asset_id': 7,
'content_id': 7,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027.yuv'), 'groundtruth': 0},
{'asset_id': 12,
'content_id': 12,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062.yuv'), 'groundtruth': 0},
{'asset_id': 13,
'content_id': 13,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15.yuv'), 'groundtruth': 0},
{'asset_id': 50,
'content_id': 0,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 51,
'content_id': 1,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 52,
'content_id': 2,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 54,
'content_id': 4,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 55,
'content_id': 5,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 56,
'content_id': 6,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 57,
'content_id': 7,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 62,
'content_id': 12,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 63,
'content_id': 13,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15_noisy.yuv'), 'groundtruth': 1}
]
|
python/test/resource/test_image_dataset_noisy.py
|
from vmaf.config import VmafConfig
dataset_name = 'test_image'
yuv_fmt = 'yuv444p'
ref_videos = [
{'content_id': 0,
'content_name': '100007',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007.yuv'),
'width': 481},
{'content_id': 1,
'content_name': '100039',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039.yuv'),
'width': 481},
{'content_id': 2,
'content_name': '100075',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075.yuv'),
'width': 481},
{'content_id': 4,
'content_name': '100098',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098.yuv'),
'width': 481},
{'content_id': 5,
'content_name': '100099',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099.yuv'),
'width': 481},
{'content_id': 6,
'content_name': '10081',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081.yuv'),
'width': 481},
{'content_id': 7,
'content_name': '101027',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027.yuv'),
'width': 481},
{'content_id': 12,
'content_name': '102062',
'height': 321,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062.yuv'),
'width': 481},
{'content_id': 13,
'content_name': '1012576_0m14s_15',
'height': 486,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15.yuv'),
'width': 720},
]
dis_videos = [
{'asset_id': 0,
'content_id': 0,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007.yuv'), 'groundtruth': 0},
{'asset_id': 1,
'content_id': 1,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039.yuv'), 'groundtruth': 0},
{'asset_id': 2,
'content_id': 2,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075.yuv'), 'groundtruth': 0},
{'asset_id': 4,
'content_id': 4,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098.yuv'), 'groundtruth': 0},
{'asset_id': 5,
'content_id': 5,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099.yuv'), 'groundtruth': 0},
{'asset_id': 6,
'content_id': 6,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081.yuv'), 'groundtruth': 0},
{'asset_id': 7,
'content_id': 7,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027.yuv'), 'groundtruth': 0},
{'asset_id': 12,
'content_id': 12,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062.yuv'), 'groundtruth': 0},
{'asset_id': 13,
'content_id': 13,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15.yuv'), 'groundtruth': 0},
{'asset_id': 50,
'content_id': 0,
'path': VmafConfig.test_resource_path('test_image_yuv', '100007_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 51,
'content_id': 1,
'path': VmafConfig.test_resource_path('test_image_yuv', '100039_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 52,
'content_id': 2,
'path': VmafConfig.test_resource_path('test_image_yuv', '100075_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 54,
'content_id': 4,
'path': VmafConfig.test_resource_path('test_image_yuv', '100098_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 55,
'content_id': 5,
'path': VmafConfig.test_resource_path('test_image_yuv', '100099_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 56,
'content_id': 6,
'path': VmafConfig.test_resource_path('test_image_yuv', '10081_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 57,
'content_id': 7,
'path': VmafConfig.test_resource_path('test_image_yuv', '101027_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 62,
'content_id': 12,
'path': VmafConfig.test_resource_path('test_image_yuv', '102062_noisy.yuv'), 'groundtruth': 1},
{'asset_id': 63,
'content_id': 13,
'path': VmafConfig.test_resource_path('test_image_yuv', '1012576_0m14s_15_noisy.yuv'), 'groundtruth': 1}
]
| 0.433742 | 0.159479 |
"""MySQL prerequisite."""
import logging
import os
from spet.lib.utilities import download
from spet.lib.utilities import execute
from spet.lib.utilities import extract
from spet.lib.utilities import prettify
class MySQL:
"""MySQL prerequisite.
Args:
mysql_ver (str): Version number for MySQL
glibc_ver (str): Version number for MySQL glibc.
root_dir (str): The main directory for SPET.
Attributes:
mysql_ver (str): Version number for MySQL.
mysql_ver (str): Version number for MySQL glibc.
src_dir (str): The source directory for installing packages.
mysql_dir (str): The source directory for MySQL.
"""
def __init__(self, mysql_ver, glibc_ver, root_dir):
self.mysql_ver = mysql_ver
self.glibc_ver = glibc_ver
self.src_dir = root_dir + "/src"
self.mysql_dir = self.src_dir + "/mysql"
def download(self):
"""Download MySQL.
Returns:
Boolean: True if download was successful otherwise False.
"""
minor_ver = ".".join(self.mysql_ver.split(".")[:2])
archive = "mysql-{}-linux-glibc{}-x86_64.tar.gz".format(
self.mysql_ver, self.glibc_ver)
url = "https://dev.mysql.com/get/Downloads/MySQL-{}/{}".format(
minor_ver, archive)
archive_path = "{}/{}".format(self.src_dir, archive)
if os.path.isfile(archive_path):
return True
logging.info("Downloading MySQL.")
download.file(url, archive_path)
if os.path.isfile(archive_path):
return True
return False
def extract(self):
"""Extract MySQL.
Returns:
Boolean: True if extraction was successful otherwise False.
"""
dir_path = "{}/mysql-{}-linux-glibc{}-x86_64".format(
self.src_dir, self.mysql_ver, self.glibc_ver)
file_path = dir_path + ".tar.gz"
if os.path.isdir(self.mysql_dir):
return True
if not os.path.isfile(file_path):
prettify.error_message(
'Cannot extract MySQL because "{}" could not be found.'.format(
file_path))
return False
logging.info("Extracting MySQL.")
extract.tar(file_path, self.src_dir)
os.rename(dir_path, self.mysql_dir)
if os.path.isdir(self.mysql_dir):
return True
return False
def setup(self):
"""Extract MySQL.
Returns:
Boolean: True if extraction was successful otherwise False.
"""
files_dir = self.mysql_dir + "/mysql-files"
if not os.path.isdir(self.mysql_dir):
prettify.error_message(
'Cannot setup MySQL because "{}" could not be found.'.format(
self.mysql_dir))
return False
if os.path.isdir(files_dir) and os.listdir(files_dir):
return True
os.makedirs(files_dir, exist_ok=True)
os.chmod(files_dir, 0o750)
execute.output(
"./bin/mysqld --initialize-insecure --user=root --basedir={} "
"--datadir={}".format(self.mysql_dir, files_dir),
working_dir=self.mysql_dir,
)
execute.output(
"./bin/mysql_ssl_rsa_setup --user=root --basedir={} --datadir={}".
format(self.mysql_dir, files_dir),
working_dir=self.mysql_dir,
)
return True
|
spet/lib/prerequisites/mysql.py
|
"""MySQL prerequisite."""
import logging
import os
from spet.lib.utilities import download
from spet.lib.utilities import execute
from spet.lib.utilities import extract
from spet.lib.utilities import prettify
class MySQL:
"""MySQL prerequisite.
Args:
mysql_ver (str): Version number for MySQL
glibc_ver (str): Version number for MySQL glibc.
root_dir (str): The main directory for SPET.
Attributes:
mysql_ver (str): Version number for MySQL.
mysql_ver (str): Version number for MySQL glibc.
src_dir (str): The source directory for installing packages.
mysql_dir (str): The source directory for MySQL.
"""
def __init__(self, mysql_ver, glibc_ver, root_dir):
self.mysql_ver = mysql_ver
self.glibc_ver = glibc_ver
self.src_dir = root_dir + "/src"
self.mysql_dir = self.src_dir + "/mysql"
def download(self):
"""Download MySQL.
Returns:
Boolean: True if download was successful otherwise False.
"""
minor_ver = ".".join(self.mysql_ver.split(".")[:2])
archive = "mysql-{}-linux-glibc{}-x86_64.tar.gz".format(
self.mysql_ver, self.glibc_ver)
url = "https://dev.mysql.com/get/Downloads/MySQL-{}/{}".format(
minor_ver, archive)
archive_path = "{}/{}".format(self.src_dir, archive)
if os.path.isfile(archive_path):
return True
logging.info("Downloading MySQL.")
download.file(url, archive_path)
if os.path.isfile(archive_path):
return True
return False
def extract(self):
"""Extract MySQL.
Returns:
Boolean: True if extraction was successful otherwise False.
"""
dir_path = "{}/mysql-{}-linux-glibc{}-x86_64".format(
self.src_dir, self.mysql_ver, self.glibc_ver)
file_path = dir_path + ".tar.gz"
if os.path.isdir(self.mysql_dir):
return True
if not os.path.isfile(file_path):
prettify.error_message(
'Cannot extract MySQL because "{}" could not be found.'.format(
file_path))
return False
logging.info("Extracting MySQL.")
extract.tar(file_path, self.src_dir)
os.rename(dir_path, self.mysql_dir)
if os.path.isdir(self.mysql_dir):
return True
return False
def setup(self):
"""Extract MySQL.
Returns:
Boolean: True if extraction was successful otherwise False.
"""
files_dir = self.mysql_dir + "/mysql-files"
if not os.path.isdir(self.mysql_dir):
prettify.error_message(
'Cannot setup MySQL because "{}" could not be found.'.format(
self.mysql_dir))
return False
if os.path.isdir(files_dir) and os.listdir(files_dir):
return True
os.makedirs(files_dir, exist_ok=True)
os.chmod(files_dir, 0o750)
execute.output(
"./bin/mysqld --initialize-insecure --user=root --basedir={} "
"--datadir={}".format(self.mysql_dir, files_dir),
working_dir=self.mysql_dir,
)
execute.output(
"./bin/mysql_ssl_rsa_setup --user=root --basedir={} --datadir={}".
format(self.mysql_dir, files_dir),
working_dir=self.mysql_dir,
)
return True
| 0.753829 | 0.098209 |
from django.db import migrations, models
import django.db.models.deletion
import martor.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('tag', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('published_at', models.DateTimeField(auto_now_add=True)),
('comment', models.TextField(max_length=1000)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('description', models.CharField(max_length=300)),
('post', martor.models.MartorField()),
('published_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('published', models.PositiveSmallIntegerField(choices=[(1, 'Published'), (0, 'Draft')])),
],
),
migrations.CreateModel(
name='PostSeries',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveSmallIntegerField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('posts', models.ManyToManyField(related_name='series_posts', through='blog.PostSeries', to='blog.Post')),
('tags', models.ManyToManyField(related_name='series_tags', to='tag.Tag')),
],
),
migrations.AddField(
model_name='postseries',
name='series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.series'),
),
]
|
django/codentino/apps/blog/migrations/0001_initial.py
|
from django.db import migrations, models
import django.db.models.deletion
import martor.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('tag', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('published_at', models.DateTimeField(auto_now_add=True)),
('comment', models.TextField(max_length=1000)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('description', models.CharField(max_length=300)),
('post', martor.models.MartorField()),
('published_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('published', models.PositiveSmallIntegerField(choices=[(1, 'Published'), (0, 'Draft')])),
],
),
migrations.CreateModel(
name='PostSeries',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveSmallIntegerField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('posts', models.ManyToManyField(related_name='series_posts', through='blog.PostSeries', to='blog.Post')),
('tags', models.ManyToManyField(related_name='series_tags', to='tag.Tag')),
],
),
migrations.AddField(
model_name='postseries',
name='series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.series'),
),
]
| 0.60743 | 0.191365 |
import numpy
from io_funcs.binary_io import BinaryIOCollection
import logging
class MinMaxNormalisation(object):
def __init__(self, feature_dimension, min_value = 0.01, max_value = 0.99, min_vector = 0.0, max_vector = 0.0, exclude_columns=[]):
# this is the wrong name for this logger because we can also normalise labels here too
logger = logging.getLogger("acoustic_norm")
self.target_min_value = min_value
self.target_max_value = max_value
self.feature_dimension = feature_dimension
self.min_vector = min_vector
self.max_vector = max_vector
self.exclude_columns = exclude_columns
if type(min_vector) != float:
try:
assert( len(self.min_vector) == self.feature_dimension)
except AssertionError:
logger.critical('inconsistent feature_dimension (%d) and length of min_vector (%d)' % (self.feature_dimension,len(self.min_vector)))
raise
if type(max_vector) != float:
try:
assert( len(self.max_vector) == self.feature_dimension)
except AssertionError:
logger.critical('inconsistent feature_dimension (%d) and length of max_vector (%d)' % (self.feature_dimension,len(self.max_vector)))
raise
logger.debug('MinMaxNormalisation created for feature dimension of %d' % self.feature_dimension)
def load_min_max_values(self, label_norm_file):
logger = logging.getLogger("acoustic_norm")
io_funcs = BinaryIOCollection()
min_max_vector, frame_number = io_funcs.load_binary_file_frame(label_norm_file, 1)
min_max_vector = numpy.reshape(min_max_vector, (-1, ))
self.min_vector = min_max_vector[0:frame_number//2]
self.max_vector = min_max_vector[frame_number//2:]
logger.info('Loaded min max values from the trained data for feature dimension of %d' % self.feature_dimension)
def find_min_max_values(self, in_file_list):
logger = logging.getLogger("acoustic_norm")
file_number = len(in_file_list)
min_value_matrix = numpy.zeros((file_number, self.feature_dimension))
max_value_matrix = numpy.zeros((file_number, self.feature_dimension))
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
temp_min = numpy.amin(features, axis = 0)
temp_max = numpy.amax(features, axis = 0)
min_value_matrix[i, ] = temp_min;
max_value_matrix[i, ] = temp_max;
self.min_vector = numpy.amin(min_value_matrix, axis = 0)
self.max_vector = numpy.amax(max_value_matrix, axis = 0)
self.min_vector = numpy.reshape(self.min_vector, (1, self.feature_dimension))
self.max_vector = numpy.reshape(self.max_vector, (1, self.feature_dimension))
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('across %d files found min/max values of length %d:' % (file_number,self.feature_dimension) )
logger.info(' min: %s' % self.min_vector)
logger.info(' max: %s' % self.max_vector)
# restore the print options
# numpy.set_printoptions(po)
def normalise_data(self, in_file_list, out_file_list):
file_number = len(in_file_list)
fea_max_min_diff = self.max_vector - self.min_vector
diff_value = self.target_max_value - self.target_min_value
fea_max_min_diff = numpy.reshape(fea_max_min_diff, (1, self.feature_dimension))
target_max_min_diff = numpy.zeros((1, self.feature_dimension))
target_max_min_diff.fill(diff_value)
target_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
fea_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
frame_number = features.size // self.feature_dimension
fea_min_matrix = numpy.tile(self.min_vector, (frame_number, 1))
target_min_matrix = numpy.tile(self.target_min_value, (frame_number, self.feature_dimension))
fea_diff_matrix = numpy.tile(fea_max_min_diff, (frame_number, 1))
diff_norm_matrix = numpy.tile(target_max_min_diff, (frame_number, 1)) / fea_diff_matrix
norm_features = diff_norm_matrix * (features - fea_min_matrix) + target_min_matrix
## If we are to keep some columns unnormalised, use advanced indexing to
## reinstate original values:
m,n = numpy.shape(features)
for col in self.exclude_columns:
norm_features[list(range(m)),[col]*m] = features[list(range(m)),[col]*m]
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
# norm_features = numpy.array(norm_features, 'float32')
# fid = open(out_file_list[i], 'wb')
# norm_features.tofile(fid)
# fid.close()
def denormalise_data(self, in_file_list, out_file_list):
logger = logging.getLogger("acoustic_norm")
file_number = len(in_file_list)
logger.info('MinMaxNormalisation.denormalise_data for %d files' % file_number)
# print self.max_vector, self.min_vector
fea_max_min_diff = self.max_vector - self.min_vector
diff_value = self.target_max_value - self.target_min_value
# logger.debug('reshaping fea_max_min_diff from shape %s to (1,%d)' % (fea_max_min_diff.shape, self.feature_dimension) )
fea_max_min_diff = numpy.reshape(fea_max_min_diff, (1, self.feature_dimension))
target_max_min_diff = numpy.zeros((1, self.feature_dimension))
target_max_min_diff.fill(diff_value)
target_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
fea_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
frame_number = features.size // self.feature_dimension
fea_min_matrix = numpy.tile(self.min_vector, (frame_number, 1))
target_min_matrix = numpy.tile(self.target_min_value, (frame_number, self.feature_dimension))
fea_diff_matrix = numpy.tile(fea_max_min_diff, (frame_number, 1))
diff_norm_matrix = fea_diff_matrix / numpy.tile(target_max_min_diff, (frame_number, 1))
norm_features = diff_norm_matrix * (features - target_min_matrix) + fea_min_matrix
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
def normal_standardization(self, in_file_list, out_file_list):
mean_vector = self.compute_mean(in_file_list)
std_vector = self.compute_std(in_file_list, mean_vector)
io_funcs = BinaryIOCollection()
file_number = len(in_file_list)
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_matrix = numpy.tile(mean_vector, (current_frame_number, 1))
std_matrix = numpy.tile(std_vector, (current_frame_number, 1))
norm_features = (features - mean_matrix) / std_matrix
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
def compute_mean(self, file_list):
logger = logging.getLogger("acoustic_norm")
mean_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_vector += numpy.reshape(numpy.sum(features, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def compute_std(self, file_list, mean_vector):
logger = logging.getLogger("acoustic_norm")
std_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_matrix = numpy.tile(mean_vector, (current_frame_number, 1))
std_vector += numpy.reshape(numpy.sum((features - mean_matrix) ** 2, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
std_vector /= float(all_frame_number)
std_vector = std_vector ** 0.5
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed std vector of length %d' % std_vector.shape[1] )
logger.info(' std: %s' % std_vector)
# restore the print options
# numpy.set_printoptions(po)
return std_vector
if __name__ == '__main__':
in_file_list = ['/group/project/dnn_tts/data/nick/sp/nick/herald_001.sp']
out_file_list = ['/group/project/dnn_tts/herald_001.sp']
out_file_list1 = ['/group/project/dnn_tts/herald_001.test.sp']
feature_dimension = 1025
normaliser = MinMaxNormalisation(feature_dimension, min_value = 0.01, max_value = 0.99)
normaliser.find_min_max_values(in_file_list)
tmp_min_vector = normaliser.min_vector
tmp_max_vector = normaliser.max_vector
normaliser.normalise_data(in_file_list, out_file_list)
denormaliser = MinMaxNormalisation(feature_dimension, min_value = 0.01, max_value = 0.99, min_vector = tmp_min_vector, max_vector = tmp_max_vector)
denormaliser.denormalise_data(out_file_list, out_file_list1)
|
src/frontend/min_max_norm.py
|
import numpy
from io_funcs.binary_io import BinaryIOCollection
import logging
class MinMaxNormalisation(object):
def __init__(self, feature_dimension, min_value = 0.01, max_value = 0.99, min_vector = 0.0, max_vector = 0.0, exclude_columns=[]):
# this is the wrong name for this logger because we can also normalise labels here too
logger = logging.getLogger("acoustic_norm")
self.target_min_value = min_value
self.target_max_value = max_value
self.feature_dimension = feature_dimension
self.min_vector = min_vector
self.max_vector = max_vector
self.exclude_columns = exclude_columns
if type(min_vector) != float:
try:
assert( len(self.min_vector) == self.feature_dimension)
except AssertionError:
logger.critical('inconsistent feature_dimension (%d) and length of min_vector (%d)' % (self.feature_dimension,len(self.min_vector)))
raise
if type(max_vector) != float:
try:
assert( len(self.max_vector) == self.feature_dimension)
except AssertionError:
logger.critical('inconsistent feature_dimension (%d) and length of max_vector (%d)' % (self.feature_dimension,len(self.max_vector)))
raise
logger.debug('MinMaxNormalisation created for feature dimension of %d' % self.feature_dimension)
def load_min_max_values(self, label_norm_file):
logger = logging.getLogger("acoustic_norm")
io_funcs = BinaryIOCollection()
min_max_vector, frame_number = io_funcs.load_binary_file_frame(label_norm_file, 1)
min_max_vector = numpy.reshape(min_max_vector, (-1, ))
self.min_vector = min_max_vector[0:frame_number//2]
self.max_vector = min_max_vector[frame_number//2:]
logger.info('Loaded min max values from the trained data for feature dimension of %d' % self.feature_dimension)
def find_min_max_values(self, in_file_list):
logger = logging.getLogger("acoustic_norm")
file_number = len(in_file_list)
min_value_matrix = numpy.zeros((file_number, self.feature_dimension))
max_value_matrix = numpy.zeros((file_number, self.feature_dimension))
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
temp_min = numpy.amin(features, axis = 0)
temp_max = numpy.amax(features, axis = 0)
min_value_matrix[i, ] = temp_min;
max_value_matrix[i, ] = temp_max;
self.min_vector = numpy.amin(min_value_matrix, axis = 0)
self.max_vector = numpy.amax(max_value_matrix, axis = 0)
self.min_vector = numpy.reshape(self.min_vector, (1, self.feature_dimension))
self.max_vector = numpy.reshape(self.max_vector, (1, self.feature_dimension))
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('across %d files found min/max values of length %d:' % (file_number,self.feature_dimension) )
logger.info(' min: %s' % self.min_vector)
logger.info(' max: %s' % self.max_vector)
# restore the print options
# numpy.set_printoptions(po)
def normalise_data(self, in_file_list, out_file_list):
file_number = len(in_file_list)
fea_max_min_diff = self.max_vector - self.min_vector
diff_value = self.target_max_value - self.target_min_value
fea_max_min_diff = numpy.reshape(fea_max_min_diff, (1, self.feature_dimension))
target_max_min_diff = numpy.zeros((1, self.feature_dimension))
target_max_min_diff.fill(diff_value)
target_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
fea_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
frame_number = features.size // self.feature_dimension
fea_min_matrix = numpy.tile(self.min_vector, (frame_number, 1))
target_min_matrix = numpy.tile(self.target_min_value, (frame_number, self.feature_dimension))
fea_diff_matrix = numpy.tile(fea_max_min_diff, (frame_number, 1))
diff_norm_matrix = numpy.tile(target_max_min_diff, (frame_number, 1)) / fea_diff_matrix
norm_features = diff_norm_matrix * (features - fea_min_matrix) + target_min_matrix
## If we are to keep some columns unnormalised, use advanced indexing to
## reinstate original values:
m,n = numpy.shape(features)
for col in self.exclude_columns:
norm_features[list(range(m)),[col]*m] = features[list(range(m)),[col]*m]
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
# norm_features = numpy.array(norm_features, 'float32')
# fid = open(out_file_list[i], 'wb')
# norm_features.tofile(fid)
# fid.close()
def denormalise_data(self, in_file_list, out_file_list):
logger = logging.getLogger("acoustic_norm")
file_number = len(in_file_list)
logger.info('MinMaxNormalisation.denormalise_data for %d files' % file_number)
# print self.max_vector, self.min_vector
fea_max_min_diff = self.max_vector - self.min_vector
diff_value = self.target_max_value - self.target_min_value
# logger.debug('reshaping fea_max_min_diff from shape %s to (1,%d)' % (fea_max_min_diff.shape, self.feature_dimension) )
fea_max_min_diff = numpy.reshape(fea_max_min_diff, (1, self.feature_dimension))
target_max_min_diff = numpy.zeros((1, self.feature_dimension))
target_max_min_diff.fill(diff_value)
target_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
fea_max_min_diff[fea_max_min_diff <= 0.0] = 1.0
io_funcs = BinaryIOCollection()
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
frame_number = features.size // self.feature_dimension
fea_min_matrix = numpy.tile(self.min_vector, (frame_number, 1))
target_min_matrix = numpy.tile(self.target_min_value, (frame_number, self.feature_dimension))
fea_diff_matrix = numpy.tile(fea_max_min_diff, (frame_number, 1))
diff_norm_matrix = fea_diff_matrix / numpy.tile(target_max_min_diff, (frame_number, 1))
norm_features = diff_norm_matrix * (features - target_min_matrix) + fea_min_matrix
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
def normal_standardization(self, in_file_list, out_file_list):
mean_vector = self.compute_mean(in_file_list)
std_vector = self.compute_std(in_file_list, mean_vector)
io_funcs = BinaryIOCollection()
file_number = len(in_file_list)
for i in range(file_number):
features = io_funcs.load_binary_file(in_file_list[i], self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_matrix = numpy.tile(mean_vector, (current_frame_number, 1))
std_matrix = numpy.tile(std_vector, (current_frame_number, 1))
norm_features = (features - mean_matrix) / std_matrix
io_funcs.array_to_binary_file(norm_features, out_file_list[i])
def compute_mean(self, file_list):
logger = logging.getLogger("acoustic_norm")
mean_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_vector += numpy.reshape(numpy.sum(features, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def compute_std(self, file_list, mean_vector):
logger = logging.getLogger("acoustic_norm")
std_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_matrix = numpy.tile(mean_vector, (current_frame_number, 1))
std_vector += numpy.reshape(numpy.sum((features - mean_matrix) ** 2, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
std_vector /= float(all_frame_number)
std_vector = std_vector ** 0.5
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed std vector of length %d' % std_vector.shape[1] )
logger.info(' std: %s' % std_vector)
# restore the print options
# numpy.set_printoptions(po)
return std_vector
if __name__ == '__main__':
in_file_list = ['/group/project/dnn_tts/data/nick/sp/nick/herald_001.sp']
out_file_list = ['/group/project/dnn_tts/herald_001.sp']
out_file_list1 = ['/group/project/dnn_tts/herald_001.test.sp']
feature_dimension = 1025
normaliser = MinMaxNormalisation(feature_dimension, min_value = 0.01, max_value = 0.99)
normaliser.find_min_max_values(in_file_list)
tmp_min_vector = normaliser.min_vector
tmp_max_vector = normaliser.max_vector
normaliser.normalise_data(in_file_list, out_file_list)
denormaliser = MinMaxNormalisation(feature_dimension, min_value = 0.01, max_value = 0.99, min_vector = tmp_min_vector, max_vector = tmp_max_vector)
denormaliser.denormalise_data(out_file_list, out_file_list1)
| 0.454956 | 0.386127 |
from functools import partial
import glob
import os
import shutil
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testTraceContext(self):
x = 3
with jax.profiler.TraceContext("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.trace_function
def f(x):
return x + 2
self.assertEqual(f(7), 9)
@partial(jax.profiler.trace_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.trace_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skipIf(not (portpicker and profiler_client and tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceContext("atracecontext"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
tests/profiler_test.py
|
from functools import partial
import glob
import os
import shutil
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testTraceContext(self):
x = 3
with jax.profiler.TraceContext("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.trace_function
def f(x):
return x + 2
self.assertEqual(f(7), 9)
@partial(jax.profiler.trace_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.trace_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skipIf(not (portpicker and profiler_client and tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceContext("atracecontext"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 0.498535 | 0.193776 |
from __future__ import absolute_import, print_function, unicode_literals
import re
import debug # pyflakes:ignore
from ietf.submit.parsers.base import FileParser
class PlainParser(FileParser):
ext = 'txt'
mimetypes = ['text/plain', ]
def __init__(self, fd):
super(PlainParser, self).__init__(fd)
# If some error is found after this method invocation
# no other file parsing is recommended
def critical_parse(self):
super(PlainParser, self).critical_parse()
self.check_file_charset()
self.parse_name()
return self.parsed_info
def check_file_charset(self):
charset = self.parsed_info.charset
if not charset in ['us-ascii', 'utf-8',]:
self.parsed_info.add_error('A plain text ASCII document is required. '
'Found an unexpected encoding: "%s". '
'You probably have one or more non-ascii characters in your file.' % charset
)
if self.fd.charset and charset != self.fd.charset:
self.parsed_info.add_error("Unexpected charset mismatch: upload: %s, libmagic: %s" % (self.fd.charset, charset))
def parse_name(self):
self.fd.file.seek(0)
draftre = re.compile(r'(draft-\S+)')
revisionre = re.compile(r'.*-(\d+)$')
limit = 80
if self.parsed_info.charset in ['us-ascii', 'utf-8']:
while limit:
limit -= 1
line = self.fd.readline().decode(self.parsed_info.charset)
match = draftre.search(line)
if not match:
continue
name = match.group(1)
name = re.sub(r'^[^\w]+', '', name)
name = re.sub(r'[^\w]+$', '', name)
name = re.sub(r'\.txt$', '', name)
extra_chars = re.sub(r'[0-9a-z\-]', '', name)
if extra_chars:
if len(extra_chars) == 1:
self.parsed_info.add_error(('The document name on the first page, "%s", contains a disallowed character with byte code: %s ' % (name.decode('utf-8','replace'), ord(extra_chars[0]))) +
'(see https://www.ietf.org/id-info/guidelines.html#naming for details).')
else:
self.parsed_info.add_error(('The document name on the first page, "%s", contains disallowed characters with byte codes: %s ' % (name.decode('utf-8','replace'), (', '.join([ str(ord(c)) for c in extra_chars] )))) +
'(see https://www.ietf.org/id-info/guidelines.html#naming for details).')
match_revision = revisionre.match(name)
if match_revision:
self.parsed_info.metadata.rev = match_revision.group(1)
else:
self.parsed_info.add_error('The name found on the first page of the document does not contain a revision: "%s"' % (name.decode('utf-8','replace'),))
name = re.sub(r'-\d+$', '', name)
self.parsed_info.metadata.name = name
return
self.parsed_info.add_error('The first page of the document does not contain a legitimate name that starts with draft-*')
|
ietf/submit/parsers/plain_parser.py
|
from __future__ import absolute_import, print_function, unicode_literals
import re
import debug # pyflakes:ignore
from ietf.submit.parsers.base import FileParser
class PlainParser(FileParser):
ext = 'txt'
mimetypes = ['text/plain', ]
def __init__(self, fd):
super(PlainParser, self).__init__(fd)
# If some error is found after this method invocation
# no other file parsing is recommended
def critical_parse(self):
super(PlainParser, self).critical_parse()
self.check_file_charset()
self.parse_name()
return self.parsed_info
def check_file_charset(self):
charset = self.parsed_info.charset
if not charset in ['us-ascii', 'utf-8',]:
self.parsed_info.add_error('A plain text ASCII document is required. '
'Found an unexpected encoding: "%s". '
'You probably have one or more non-ascii characters in your file.' % charset
)
if self.fd.charset and charset != self.fd.charset:
self.parsed_info.add_error("Unexpected charset mismatch: upload: %s, libmagic: %s" % (self.fd.charset, charset))
def parse_name(self):
self.fd.file.seek(0)
draftre = re.compile(r'(draft-\S+)')
revisionre = re.compile(r'.*-(\d+)$')
limit = 80
if self.parsed_info.charset in ['us-ascii', 'utf-8']:
while limit:
limit -= 1
line = self.fd.readline().decode(self.parsed_info.charset)
match = draftre.search(line)
if not match:
continue
name = match.group(1)
name = re.sub(r'^[^\w]+', '', name)
name = re.sub(r'[^\w]+$', '', name)
name = re.sub(r'\.txt$', '', name)
extra_chars = re.sub(r'[0-9a-z\-]', '', name)
if extra_chars:
if len(extra_chars) == 1:
self.parsed_info.add_error(('The document name on the first page, "%s", contains a disallowed character with byte code: %s ' % (name.decode('utf-8','replace'), ord(extra_chars[0]))) +
'(see https://www.ietf.org/id-info/guidelines.html#naming for details).')
else:
self.parsed_info.add_error(('The document name on the first page, "%s", contains disallowed characters with byte codes: %s ' % (name.decode('utf-8','replace'), (', '.join([ str(ord(c)) for c in extra_chars] )))) +
'(see https://www.ietf.org/id-info/guidelines.html#naming for details).')
match_revision = revisionre.match(name)
if match_revision:
self.parsed_info.metadata.rev = match_revision.group(1)
else:
self.parsed_info.add_error('The name found on the first page of the document does not contain a revision: "%s"' % (name.decode('utf-8','replace'),))
name = re.sub(r'-\d+$', '', name)
self.parsed_info.metadata.name = name
return
self.parsed_info.add_error('The first page of the document does not contain a legitimate name that starts with draft-*')
| 0.4917 | 0.100481 |
import six as _six
from flytekit.common import sdk_bases as _sdk_bases
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.models.core import identifier as _core_identifier
class Identifier(_core_identifier.Identifier, metaclass=_sdk_bases.ExtendedSdkType):
_STRING_TO_TYPE_MAP = {
"lp": _core_identifier.ResourceType.LAUNCH_PLAN,
"wf": _core_identifier.ResourceType.WORKFLOW,
"tsk": _core_identifier.ResourceType.TASK,
}
_TYPE_TO_STRING_MAP = {v: k for k, v in _six.iteritems(_STRING_TO_TYPE_MAP)}
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.Identifier base_model:
:rtype: Identifier
"""
return cls(
base_model.resource_type,
base_model.project,
base_model.domain,
base_model.name,
base_model.version,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: Identifier
"""
segments = string.split(":")
if len(segments) != 5:
raise _user_exceptions.FlyteValueException(
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" entity_type:project:domain:name:version. Received: {}".format(string)
)
resource_type, project, domain, name, version = segments
if resource_type not in cls._STRING_TO_TYPE_MAP:
raise _user_exceptions.FlyteValueException(
"The provided string could not be parsed. The first element of an identifier must be one of: {}. "
"Received: {}".format(list(cls._STRING_TO_TYPE_MAP.keys()), resource_type)
)
resource_type = cls._STRING_TO_TYPE_MAP[resource_type]
return cls(resource_type, project, domain, name, version)
def __str__(self):
return "{}:{}:{}:{}:{}".format(
type(self)._TYPE_TO_STRING_MAP.get(self.resource_type, "<unknown>"),
self.project,
self.domain,
self.name,
self.version,
)
class WorkflowExecutionIdentifier(_core_identifier.WorkflowExecutionIdentifier, metaclass=_sdk_bases.ExtendedSdkType):
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.WorkflowExecutionIdentifier base_model:
:rtype: WorkflowExecutionIdentifier
"""
return cls(
base_model.project,
base_model.domain,
base_model.name,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: WorkflowExecutionIdentifier
"""
segments = string.split(":")
if len(segments) != 4:
raise _user_exceptions.FlyteValueException(
string,
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" ex:project:domain:name.",
)
resource_type, project, domain, name = segments
if resource_type != "ex":
raise _user_exceptions.FlyteValueException(
resource_type,
"The provided string could not be parsed. The first element of an execution identifier must be 'ex'.",
)
return cls(
project,
domain,
name,
)
def __str__(self):
return "ex:{}:{}:{}".format(self.project, self.domain, self.name)
class TaskExecutionIdentifier(_core_identifier.TaskExecutionIdentifier, metaclass=_sdk_bases.ExtendedSdkType):
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.TaskExecutionIdentifier base_model:
:rtype: TaskExecutionIdentifier
"""
return cls(
task_id=base_model.task_id,
node_execution_id=base_model.node_execution_id,
retry_attempt=base_model.retry_attempt,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: TaskExecutionIdentifier
"""
segments = string.split(":")
if len(segments) != 10:
raise _user_exceptions.FlyteValueException(
string,
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" te:exec_project:exec_domain:exec_name:node_id:task_project:task_domain:task_name:task_version:retry.",
)
resource_type, ep, ed, en, node_id, tp, td, tn, tv, retry = segments
if resource_type != "te":
raise _user_exceptions.FlyteValueException(
resource_type,
"The provided string could not be parsed. The first element of an execution identifier must be 'ex'.",
)
return cls(
task_id=Identifier(_core_identifier.ResourceType.TASK, tp, td, tn, tv),
node_execution_id=_core_identifier.NodeExecutionIdentifier(
node_id=node_id,
execution_id=_core_identifier.WorkflowExecutionIdentifier(ep, ed, en),
),
retry_attempt=int(retry),
)
def __str__(self):
return "te:{ep}:{ed}:{en}:{node_id}:{tp}:{td}:{tn}:{tv}:{retry}".format(
ep=self.node_execution_id.execution_id.project,
ed=self.node_execution_id.execution_id.domain,
en=self.node_execution_id.execution_id.name,
node_id=self.node_execution_id.node_id,
tp=self.task_id.project,
td=self.task_id.domain,
tn=self.task_id.name,
tv=self.task_id.version,
retry=self.retry_attempt,
)
|
flytekit/common/core/identifier.py
|
import six as _six
from flytekit.common import sdk_bases as _sdk_bases
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.models.core import identifier as _core_identifier
class Identifier(_core_identifier.Identifier, metaclass=_sdk_bases.ExtendedSdkType):
_STRING_TO_TYPE_MAP = {
"lp": _core_identifier.ResourceType.LAUNCH_PLAN,
"wf": _core_identifier.ResourceType.WORKFLOW,
"tsk": _core_identifier.ResourceType.TASK,
}
_TYPE_TO_STRING_MAP = {v: k for k, v in _six.iteritems(_STRING_TO_TYPE_MAP)}
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.Identifier base_model:
:rtype: Identifier
"""
return cls(
base_model.resource_type,
base_model.project,
base_model.domain,
base_model.name,
base_model.version,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: Identifier
"""
segments = string.split(":")
if len(segments) != 5:
raise _user_exceptions.FlyteValueException(
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" entity_type:project:domain:name:version. Received: {}".format(string)
)
resource_type, project, domain, name, version = segments
if resource_type not in cls._STRING_TO_TYPE_MAP:
raise _user_exceptions.FlyteValueException(
"The provided string could not be parsed. The first element of an identifier must be one of: {}. "
"Received: {}".format(list(cls._STRING_TO_TYPE_MAP.keys()), resource_type)
)
resource_type = cls._STRING_TO_TYPE_MAP[resource_type]
return cls(resource_type, project, domain, name, version)
def __str__(self):
return "{}:{}:{}:{}:{}".format(
type(self)._TYPE_TO_STRING_MAP.get(self.resource_type, "<unknown>"),
self.project,
self.domain,
self.name,
self.version,
)
class WorkflowExecutionIdentifier(_core_identifier.WorkflowExecutionIdentifier, metaclass=_sdk_bases.ExtendedSdkType):
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.WorkflowExecutionIdentifier base_model:
:rtype: WorkflowExecutionIdentifier
"""
return cls(
base_model.project,
base_model.domain,
base_model.name,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: WorkflowExecutionIdentifier
"""
segments = string.split(":")
if len(segments) != 4:
raise _user_exceptions.FlyteValueException(
string,
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" ex:project:domain:name.",
)
resource_type, project, domain, name = segments
if resource_type != "ex":
raise _user_exceptions.FlyteValueException(
resource_type,
"The provided string could not be parsed. The first element of an execution identifier must be 'ex'.",
)
return cls(
project,
domain,
name,
)
def __str__(self):
return "ex:{}:{}:{}".format(self.project, self.domain, self.name)
class TaskExecutionIdentifier(_core_identifier.TaskExecutionIdentifier, metaclass=_sdk_bases.ExtendedSdkType):
@classmethod
def promote_from_model(cls, base_model):
"""
:param flytekit.models.core.identifier.TaskExecutionIdentifier base_model:
:rtype: TaskExecutionIdentifier
"""
return cls(
task_id=base_model.task_id,
node_execution_id=base_model.node_execution_id,
retry_attempt=base_model.retry_attempt,
)
@classmethod
def from_python_std(cls, string):
"""
Parses a string in the correct format into an identifier
:param Text string:
:rtype: TaskExecutionIdentifier
"""
segments = string.split(":")
if len(segments) != 10:
raise _user_exceptions.FlyteValueException(
string,
"The provided string was not in a parseable format. The string for an identifier must be in the format"
" te:exec_project:exec_domain:exec_name:node_id:task_project:task_domain:task_name:task_version:retry.",
)
resource_type, ep, ed, en, node_id, tp, td, tn, tv, retry = segments
if resource_type != "te":
raise _user_exceptions.FlyteValueException(
resource_type,
"The provided string could not be parsed. The first element of an execution identifier must be 'ex'.",
)
return cls(
task_id=Identifier(_core_identifier.ResourceType.TASK, tp, td, tn, tv),
node_execution_id=_core_identifier.NodeExecutionIdentifier(
node_id=node_id,
execution_id=_core_identifier.WorkflowExecutionIdentifier(ep, ed, en),
),
retry_attempt=int(retry),
)
def __str__(self):
return "te:{ep}:{ed}:{en}:{node_id}:{tp}:{td}:{tn}:{tv}:{retry}".format(
ep=self.node_execution_id.execution_id.project,
ed=self.node_execution_id.execution_id.domain,
en=self.node_execution_id.execution_id.name,
node_id=self.node_execution_id.node_id,
tp=self.task_id.project,
td=self.task_id.domain,
tn=self.task_id.name,
tv=self.task_id.version,
retry=self.retry_attempt,
)
| 0.677794 | 0.108378 |
MV_OK = 0x00000000 # < 成功,无错误 | en:Successed, no error
MV_E_HANDLE = 0x80000000 # < 错误或无效的句柄 | en:Error or invalid handle
MV_E_SUPPORT = 0x80000001 # < 不支持的功能 | en:Not supported function
MV_E_BUFOVER = 0x80000002 # < 缓存已满 | en:Cache is full
MV_E_CALLORDER = 0x80000003 # < 函数调用顺序错误 | en:Function calling order error
MV_E_PARAMETER = 0x80000004 # < 错误的参数 | en:Incorrect parameter
MV_E_RESOURCE = 0x80000006 # < 资源申请失败 | en:Applying resource failed
MV_E_NODATA = 0x80000007 # < 无数据 | en:No data
MV_E_PRECONDITION = 0x80000008 # < 前置条件有误,或运行环境已发生变化 | en:Precondition error, or running environment changed
MV_E_VERSION = 0x80000009 # < 版本不匹配 | en:Version mismatches
MV_E_NOENOUGH_BUF = 0x8000000A # < 传入的内存空间不足 | en:Insufficient memory
MV_E_ABNORMAL_IMAGE = 0x8000000B # < 异常图像,可能是丢包导致图像不完整 | en:Abnormal image, maybe incomplete image because of lost packet
MV_E_LOAD_LIBRARY = 0x8000000C # < 动态导入DLL失败 | en:Load library failed
MV_E_UNKNOW = 0x800000FF # < 未知的错误 | en:Unknown error
MV_E_GC_GENERIC = 0x80000100 # < 通用错误 | en:General error
MV_E_GC_ARGUMENT = 0x80000101 # < 参数非法 | en:Illegal parameters
MV_E_GC_RANGE = 0x80000102 # < 值超出范围 | en:The value is out of range
MV_E_GC_PROPERTY = 0x80000103 # < 属性 | en:Property
MV_E_GC_RUNTIME = 0x80000104 # < 运行环境有问题 | en:Running environment error
MV_E_GC_LOGICAL = 0x80000105 # < 逻辑错误 | en:Logical error
MV_E_GC_ACCESS = 0x80000106 # < 节点访问条件有误 | en:Node accessing condition error
MV_E_GC_TIMEOUT = 0x80000107 # < 超时 | en:Timeout
MV_E_GC_DYNAMICCAST = 0x80000108 # < 转换异常 | en:Transformation exception
MV_E_GC_UNKNOW = 0x800001FF # < GenICam未知错误 | en:GenICam unknown error
MV_E_NOT_IMPLEMENTED = 0x80000200 # < 命令不被设备支持 | en:The command is not supported by device
MV_E_INVALID_ADDRESS = 0x80000201 # < 访问的目标地址不存在 | en:The target address being accessed does not exist
MV_E_WRITE_PROTECT = 0x80000202 # < 目标地址不可写 | en:The target address is not writable
MV_E_ACCESS_DENIED = 0x80000203 # < 设备无访问权限 | en:No permission
MV_E_BUSY = 0x80000204 # < 设备忙,或网络断开 | en:Device is busy, or network disconnected
MV_E_PACKET = 0x80000205 # < 网络包数据错误 | en:Network data packet error
MV_E_NETER = 0x80000206 # < 网络相关错误 | en:Network error
MV_E_IP_CONFLICT = 0x80000221 # < 设备IP冲突 | en:Device IP conflict
MV_E_USB_READ = 0x80000300 # < 读usb出错 | en:Reading USB error
MV_E_USB_WRITE = 0x80000301 # < 写usb出错 | en:Writing USB error
MV_E_USB_DEVICE = 0x80000302 # < 设备异常 | en:Device exception
MV_E_USB_GENICAM = 0x80000303 # < GenICam相关错误 | en:GenICam error
MV_E_USB_BANDWIDTH = 0x80000304 # < 带宽不足 该错误码新增 | en:Insufficient bandwidth, this error code is newly added
MV_E_USB_DRIVER = 0x80000305 # < 驱动不匹配或者未装驱动 | en:Driver mismatch or unmounted drive
MV_E_USB_UNKNOW = 0x800003FF # < USB未知的错误 | en:USB unknown error
MV_E_UPG_FILE_MISMATCH = 0x80000400 # < 升级固件不匹配 | en:Firmware mismatches
MV_E_UPG_LANGUSGE_MISMATCH = 0x80000401 # < 升级固件语言不匹配 | en:Firmware language mismatches
MV_E_UPG_CONFLICT = 0x80000402 # < 升级冲突(设备已经在升级了再次请求升级即返回此错误) | en:Upgrading conflicted (repeated upgrading requests during device upgrade)
MV_E_UPG_INNER_ERR = 0x80000403 # < 升级时相机内部出现错误 | en:Camera internal error during upgrade
MV_E_UPG_UNKNOW = 0x800004FF # < 升级时未知错误 | en:Unknown error during upgrade
|
hik-driver/MVS/Samples/Python/MvImport/MvErrorDefine_const.py
|
MV_OK = 0x00000000 # < 成功,无错误 | en:Successed, no error
MV_E_HANDLE = 0x80000000 # < 错误或无效的句柄 | en:Error or invalid handle
MV_E_SUPPORT = 0x80000001 # < 不支持的功能 | en:Not supported function
MV_E_BUFOVER = 0x80000002 # < 缓存已满 | en:Cache is full
MV_E_CALLORDER = 0x80000003 # < 函数调用顺序错误 | en:Function calling order error
MV_E_PARAMETER = 0x80000004 # < 错误的参数 | en:Incorrect parameter
MV_E_RESOURCE = 0x80000006 # < 资源申请失败 | en:Applying resource failed
MV_E_NODATA = 0x80000007 # < 无数据 | en:No data
MV_E_PRECONDITION = 0x80000008 # < 前置条件有误,或运行环境已发生变化 | en:Precondition error, or running environment changed
MV_E_VERSION = 0x80000009 # < 版本不匹配 | en:Version mismatches
MV_E_NOENOUGH_BUF = 0x8000000A # < 传入的内存空间不足 | en:Insufficient memory
MV_E_ABNORMAL_IMAGE = 0x8000000B # < 异常图像,可能是丢包导致图像不完整 | en:Abnormal image, maybe incomplete image because of lost packet
MV_E_LOAD_LIBRARY = 0x8000000C # < 动态导入DLL失败 | en:Load library failed
MV_E_UNKNOW = 0x800000FF # < 未知的错误 | en:Unknown error
MV_E_GC_GENERIC = 0x80000100 # < 通用错误 | en:General error
MV_E_GC_ARGUMENT = 0x80000101 # < 参数非法 | en:Illegal parameters
MV_E_GC_RANGE = 0x80000102 # < 值超出范围 | en:The value is out of range
MV_E_GC_PROPERTY = 0x80000103 # < 属性 | en:Property
MV_E_GC_RUNTIME = 0x80000104 # < 运行环境有问题 | en:Running environment error
MV_E_GC_LOGICAL = 0x80000105 # < 逻辑错误 | en:Logical error
MV_E_GC_ACCESS = 0x80000106 # < 节点访问条件有误 | en:Node accessing condition error
MV_E_GC_TIMEOUT = 0x80000107 # < 超时 | en:Timeout
MV_E_GC_DYNAMICCAST = 0x80000108 # < 转换异常 | en:Transformation exception
MV_E_GC_UNKNOW = 0x800001FF # < GenICam未知错误 | en:GenICam unknown error
MV_E_NOT_IMPLEMENTED = 0x80000200 # < 命令不被设备支持 | en:The command is not supported by device
MV_E_INVALID_ADDRESS = 0x80000201 # < 访问的目标地址不存在 | en:The target address being accessed does not exist
MV_E_WRITE_PROTECT = 0x80000202 # < 目标地址不可写 | en:The target address is not writable
MV_E_ACCESS_DENIED = 0x80000203 # < 设备无访问权限 | en:No permission
MV_E_BUSY = 0x80000204 # < 设备忙,或网络断开 | en:Device is busy, or network disconnected
MV_E_PACKET = 0x80000205 # < 网络包数据错误 | en:Network data packet error
MV_E_NETER = 0x80000206 # < 网络相关错误 | en:Network error
MV_E_IP_CONFLICT = 0x80000221 # < 设备IP冲突 | en:Device IP conflict
MV_E_USB_READ = 0x80000300 # < 读usb出错 | en:Reading USB error
MV_E_USB_WRITE = 0x80000301 # < 写usb出错 | en:Writing USB error
MV_E_USB_DEVICE = 0x80000302 # < 设备异常 | en:Device exception
MV_E_USB_GENICAM = 0x80000303 # < GenICam相关错误 | en:GenICam error
MV_E_USB_BANDWIDTH = 0x80000304 # < 带宽不足 该错误码新增 | en:Insufficient bandwidth, this error code is newly added
MV_E_USB_DRIVER = 0x80000305 # < 驱动不匹配或者未装驱动 | en:Driver mismatch or unmounted drive
MV_E_USB_UNKNOW = 0x800003FF # < USB未知的错误 | en:USB unknown error
MV_E_UPG_FILE_MISMATCH = 0x80000400 # < 升级固件不匹配 | en:Firmware mismatches
MV_E_UPG_LANGUSGE_MISMATCH = 0x80000401 # < 升级固件语言不匹配 | en:Firmware language mismatches
MV_E_UPG_CONFLICT = 0x80000402 # < 升级冲突(设备已经在升级了再次请求升级即返回此错误) | en:Upgrading conflicted (repeated upgrading requests during device upgrade)
MV_E_UPG_INNER_ERR = 0x80000403 # < 升级时相机内部出现错误 | en:Camera internal error during upgrade
MV_E_UPG_UNKNOW = 0x800004FF # < 升级时未知错误 | en:Unknown error during upgrade
| 0.300746 | 0.07843 |
import os
import re
from time import time
from pygaggle.rerank.base import Query, Text
from pygaggle.rerank.transformer import MonoT5
def main():
DATA_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/"
RUNS_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/runs"
SEGMENTS_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/segment_texts/"
QUERY_DOC_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/query_doc_ids/"
OUTPUT_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/monot5/"
list_of_segment_texts = [
'segment_texts_1000hits.txt', 'segment_texts_2000hits.txt',
'segment_texts_3000hits.txt', 'segment_texts_4000hits.txt',
'segment_texts_5000hits.txt', 'segment_texts_6000hits.txt',
'segment_texts_7000hits.txt', 'segment_texts_8000hits.txt',
'segment_texts_9000hits.txt', 'segment_texts_10000hits.txt'
]
list_of_query_doc_ids = [
'segment_query_doc_ids_1000hits.tsv', 'segment_query_doc_ids_2000hits.tsv',
'segment_query_doc_ids_3000hits.tsv', 'segment_query_doc_ids_4000hits.tsv',
'segment_query_doc_ids_5000hits.tsv', 'segment_query_doc_ids_6000hits.tsv',
'segment_query_doc_ids_7000hits.tsv', 'segment_query_doc_ids_8000hits.tsv',
'segment_query_doc_ids_9000hits.tsv', 'segment_query_doc_ids_10000hits.tsv'
]
# Model
reranker = MonoT5()
segment_texts = os.path.join(SEGMENTS_DIR, 'segment_texts_5000hits_001.txt')
query_doc_ids = os.path.join(QUERY_DOC_DIR, 'segment_query_doc_ids_5000hits_001.tsv')
monot5_results = os.path.join(OUTPUT_DIR, 'monot5_results_5000hits_001.txt')
with open(segment_texts) as seg_file, open(query_doc_ids) as qdoc_file:
passages = []
qdoc_first_line = qdoc_file.readline()
qdoc_first_line = qdoc_first_line.replace("\n", "").split('\t')
query_id_old = qdoc_first_line[0]
doc_id = qdoc_first_line[1]
seg_first_line = seg_file.readline()
result = re.search(r'Query\:(.*?)Document\:', seg_first_line)
query_text = result.group(1).strip()
result = re.search(r'Document\:(.*?)Relevant\:', seg_first_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
for seg_line, qdoc_line in zip(seg_file, qdoc_file):
qdoc_line = qdoc_line.replace("\n", "").split('\t')
query_id = qdoc_line[0]
doc_id = qdoc_line[1]
if query_id == query_id_old:
result = re.search(r'Document\:(.*?)Relevant\:', seg_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
else:
# Reranker using pygaggle
query = Query(query_text)
texts = [ Text(p[1], {'docid': p[0]}, 0) for p in passages]
start = time()
ranked_results = reranker.rerank(query, texts)
end = time()
time_elapsed = end - start
print("Time Elapsed: {:.1f}".format(time_elapsed))
# Get scores from the reranker
final_t5_scores = {}
for result in ranked_results:
if result.metadata["docid"] not in final_t5_scores:
final_t5_scores[result.metadata["docid"]] = result.score
else:
if final_t5_scores[result.metadata["docid"]] < result.score:
final_t5_scores[result.metadata["docid"]] = result.score
# Writes a run file in the TREC format
for rank, (docid, score) in enumerate(final_t5_scores.items()):
with open(monot5_results, mode='a') as writer:
writer.write(f'{query_id_old} Q0 {docid} {rank + 1} {1 / (rank + 1)} T5\n')
# Restart variables for a new query
passages = []
query_id_old = query_id
result = re.search(r'Query\:(.*?)Document\:', seg_line)
query_text = result.group(1).strip()
result = re.search(r'Document\:(.*?)Relevant\:', seg_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
# Reranker using pygaggle
query = Query(query_text)
texts = [ Text(p[1], {'docid': p[0]}, 0) for p in passages]
start = time()
ranked_results = reranker.rerank(query, texts)
end = time()
time_elapsed = end - start
print("Time Elapsed: {:.1f}".format(time_elapsed))
# Get scores from the reranker
final_t5_scores = {}
for result in ranked_results:
if result.metadata["docid"] not in final_t5_scores:
final_t5_scores[result.metadata["docid"]] = result.score
else:
if final_t5_scores[result.metadata["docid"]] < result.score:
final_t5_scores[result.metadata["docid"]] = result.score
# Writes a run file in the TREC format
for rank, (docid, score) in enumerate(final_t5_scores.items()):
with open(monot5_results, mode='a') as writer:
writer.write(f'{query_id_old} Q0 {docid} {rank + 1} {1 / (rank + 1)} T5\n')
if __name__ == "__main__":
main()
|
codigos/t5_reranker_robust04.py
|
import os
import re
from time import time
from pygaggle.rerank.base import Query, Text
from pygaggle.rerank.transformer import MonoT5
def main():
DATA_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/"
RUNS_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/runs"
SEGMENTS_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/segment_texts/"
QUERY_DOC_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/query_doc_ids/"
OUTPUT_DIR = "/home/l224016/projects/ia376_projeto_final/data/robust04/monot5/"
list_of_segment_texts = [
'segment_texts_1000hits.txt', 'segment_texts_2000hits.txt',
'segment_texts_3000hits.txt', 'segment_texts_4000hits.txt',
'segment_texts_5000hits.txt', 'segment_texts_6000hits.txt',
'segment_texts_7000hits.txt', 'segment_texts_8000hits.txt',
'segment_texts_9000hits.txt', 'segment_texts_10000hits.txt'
]
list_of_query_doc_ids = [
'segment_query_doc_ids_1000hits.tsv', 'segment_query_doc_ids_2000hits.tsv',
'segment_query_doc_ids_3000hits.tsv', 'segment_query_doc_ids_4000hits.tsv',
'segment_query_doc_ids_5000hits.tsv', 'segment_query_doc_ids_6000hits.tsv',
'segment_query_doc_ids_7000hits.tsv', 'segment_query_doc_ids_8000hits.tsv',
'segment_query_doc_ids_9000hits.tsv', 'segment_query_doc_ids_10000hits.tsv'
]
# Model
reranker = MonoT5()
segment_texts = os.path.join(SEGMENTS_DIR, 'segment_texts_5000hits_001.txt')
query_doc_ids = os.path.join(QUERY_DOC_DIR, 'segment_query_doc_ids_5000hits_001.tsv')
monot5_results = os.path.join(OUTPUT_DIR, 'monot5_results_5000hits_001.txt')
with open(segment_texts) as seg_file, open(query_doc_ids) as qdoc_file:
passages = []
qdoc_first_line = qdoc_file.readline()
qdoc_first_line = qdoc_first_line.replace("\n", "").split('\t')
query_id_old = qdoc_first_line[0]
doc_id = qdoc_first_line[1]
seg_first_line = seg_file.readline()
result = re.search(r'Query\:(.*?)Document\:', seg_first_line)
query_text = result.group(1).strip()
result = re.search(r'Document\:(.*?)Relevant\:', seg_first_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
for seg_line, qdoc_line in zip(seg_file, qdoc_file):
qdoc_line = qdoc_line.replace("\n", "").split('\t')
query_id = qdoc_line[0]
doc_id = qdoc_line[1]
if query_id == query_id_old:
result = re.search(r'Document\:(.*?)Relevant\:', seg_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
else:
# Reranker using pygaggle
query = Query(query_text)
texts = [ Text(p[1], {'docid': p[0]}, 0) for p in passages]
start = time()
ranked_results = reranker.rerank(query, texts)
end = time()
time_elapsed = end - start
print("Time Elapsed: {:.1f}".format(time_elapsed))
# Get scores from the reranker
final_t5_scores = {}
for result in ranked_results:
if result.metadata["docid"] not in final_t5_scores:
final_t5_scores[result.metadata["docid"]] = result.score
else:
if final_t5_scores[result.metadata["docid"]] < result.score:
final_t5_scores[result.metadata["docid"]] = result.score
# Writes a run file in the TREC format
for rank, (docid, score) in enumerate(final_t5_scores.items()):
with open(monot5_results, mode='a') as writer:
writer.write(f'{query_id_old} Q0 {docid} {rank + 1} {1 / (rank + 1)} T5\n')
# Restart variables for a new query
passages = []
query_id_old = query_id
result = re.search(r'Query\:(.*?)Document\:', seg_line)
query_text = result.group(1).strip()
result = re.search(r'Document\:(.*?)Relevant\:', seg_line)
segment_text = result.group(1).strip()
passages.append([doc_id, segment_text])
# Reranker using pygaggle
query = Query(query_text)
texts = [ Text(p[1], {'docid': p[0]}, 0) for p in passages]
start = time()
ranked_results = reranker.rerank(query, texts)
end = time()
time_elapsed = end - start
print("Time Elapsed: {:.1f}".format(time_elapsed))
# Get scores from the reranker
final_t5_scores = {}
for result in ranked_results:
if result.metadata["docid"] not in final_t5_scores:
final_t5_scores[result.metadata["docid"]] = result.score
else:
if final_t5_scores[result.metadata["docid"]] < result.score:
final_t5_scores[result.metadata["docid"]] = result.score
# Writes a run file in the TREC format
for rank, (docid, score) in enumerate(final_t5_scores.items()):
with open(monot5_results, mode='a') as writer:
writer.write(f'{query_id_old} Q0 {docid} {rank + 1} {1 / (rank + 1)} T5\n')
if __name__ == "__main__":
main()
| 0.331552 | 0.138958 |
from PySide2.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QFileDialog
from PySide2.QtCore import Qt
# parent UI
from ui_node_manager_mainwindow import Ui_MainWindow
# custom content
from Node import Node
from Node_ListWidget import Node_ListWidget
from NodeContentWidget import NodeContentWidget
from SaveDialog import SaveDialog
import json
import os
class MainWindow(QMainWindow):
nodes = []
node_list_widgets = []
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.nodes_list_widget.setFixedWidth(200)
self.setWindowTitle('Node Manager')
self.load_stylesheet('dark')
self.ui.add_new_node_pushButton.clicked.connect(self.add_new_node_pushButton_clicked)
self.ui.import_nodes_pushButton.clicked.connect(self.import_button_clicked)
self.ui.save_pushButton.clicked.connect(self.save_button_clicked)
def add_new_node_pushButton_clicked(self):
node_content_widget = NodeContentWidget()
new_node = Node(node_content_widget)
node_content_widget.node = new_node #load_node(new_node)
new_node.title_changed.connect(self.update_nodes_list_names) # this will update the list view
self.nodes.append(new_node)
self.rebuild_nodes_list()
self.set_current_node(self.nodes[-1])
def rebuild_nodes_list(self):
if self.ui.nodes_scrollArea.widget().layout().count() != 0:
# clear nodes layout
for i in reversed(range(self.ui.nodes_scrollArea.widget().layout().count())):
# 'The new widget is deleted when its parent is deleted' - Docs
self.ui.nodes_scrollArea.widget().layout().itemAt(i).widget().setParent(None)
scroll_area_content_widget = QWidget() # create new widget and layout for the scroll area
nodes_layout = QVBoxLayout()
nodes_layout.setAlignment(Qt.AlignTop)
for n in self.nodes: # create a new node list widget for every node
node_widget = Node_ListWidget(n)
node_widget.double_clicked.connect(self.node_widget_double_clicked)
nodes_layout.addWidget(node_widget)
self.node_list_widgets.append(node_widget)
scroll_area_content_widget.setLayout(nodes_layout)
self.ui.nodes_scrollArea.setWidget(scroll_area_content_widget)
def update_nodes_list_names(self):
for node_list_widget in self.node_list_widgets:
node_list_widget.update_display_title()
def set_current_node(self, node: Node):
# clear node_content_placeholder_widget
layout = self.ui.node_content_placeholder_widget.layout()
for i in reversed(range(layout.count())):
item = layout.itemAt(i)
item.widget().setParent(self) # removing the widget from the layout without deleting the widget
layout.addWidget(node.content_widget)
node.content_widget.show()
def node_widget_double_clicked(self, node):
self.set_current_node(node)
def load_stylesheet(self, ss):
ss_content = ''
try:
f = open('stuff/stylesheets/'+ss+'.txt')
ss_content = f.read()
f.close()
finally:
self.setStyleSheet(ss_content)
def import_nodes(self, j_nodes, dir):
print('importing nodes')
o_nodes = json.loads(j_nodes)
nodes_list = o_nodes['nodes']
for n in nodes_list:
print('parsing node', n['title'])
new_node = Node()
new_node.title = n['title']
new_node.description = n['description']
new_node.type = n['type']
new_node.module_name = n['module name']
new_node.class_name = n['class name']
new_node.design_style = n['design style']
new_node.color = n['color']
new_node.has_main_widget = n['has main widget']
if new_node.has_main_widget:
new_node.widget_position = n['widget position']
new_node.custom_input_widgets = n['custom input widgets']
new_node.inputs = n['inputs']
new_node.outputs = n['outputs']
# load custom files
module_name_separator = '___'
# main code
node_path = dir+'/'+new_node.module_name
print(new_node.module_name)
f = open(node_path+'/'+new_node.module_name+module_name_separator+'METACODE.py')
new_node.meta_code = f.read()
f.close()
node_widgets_path = node_path + '/widgets'
# main widget code
if new_node.has_main_widget:
f = open(node_widgets_path+'/'+new_node.module_name+module_name_separator+'main_widget'+module_name_separator+'METACODE.py')
new_node.main_widget_content = f.read()
f.close()
# custom input widgets
for ciw in new_node.custom_input_widgets:
f = open(node_widgets_path+'/'+new_node.module_name+module_name_separator+ciw+module_name_separator+'METACODE.py')
new_node.custom_input_widget_contents.append(f.read())
f.close()
new_node_content_widget = NodeContentWidget()
new_node.title_changed.connect(self.update_nodes_list_names) # this will update the list view
new_node_content_widget.load_node(new_node)
new_node.content_widget = new_node_content_widget
self.nodes.append(new_node)
print('finished parsing')
print(self.nodes)
self.rebuild_nodes_list()
def import_button_clicked(self):
file_path = QFileDialog.getOpenFileName(self, 'select the json file you want to import', '../packages')[0]
f_content = ''
try:
f = open(file_path)
f_content = f.read()
f.close()
except Exception as e:
return
self.import_nodes(f_content, os.path.dirname(file_path) + '/nodes')
def save_button_clicked(self):
# the dialog does the whole saving process
save_dialog = SaveDialog(self, self.nodes)
save_dialog.exec_()
|
pyScript_NodeManager/MainWindow.py
|
from PySide2.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QFileDialog
from PySide2.QtCore import Qt
# parent UI
from ui_node_manager_mainwindow import Ui_MainWindow
# custom content
from Node import Node
from Node_ListWidget import Node_ListWidget
from NodeContentWidget import NodeContentWidget
from SaveDialog import SaveDialog
import json
import os
class MainWindow(QMainWindow):
nodes = []
node_list_widgets = []
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.nodes_list_widget.setFixedWidth(200)
self.setWindowTitle('Node Manager')
self.load_stylesheet('dark')
self.ui.add_new_node_pushButton.clicked.connect(self.add_new_node_pushButton_clicked)
self.ui.import_nodes_pushButton.clicked.connect(self.import_button_clicked)
self.ui.save_pushButton.clicked.connect(self.save_button_clicked)
def add_new_node_pushButton_clicked(self):
node_content_widget = NodeContentWidget()
new_node = Node(node_content_widget)
node_content_widget.node = new_node #load_node(new_node)
new_node.title_changed.connect(self.update_nodes_list_names) # this will update the list view
self.nodes.append(new_node)
self.rebuild_nodes_list()
self.set_current_node(self.nodes[-1])
def rebuild_nodes_list(self):
if self.ui.nodes_scrollArea.widget().layout().count() != 0:
# clear nodes layout
for i in reversed(range(self.ui.nodes_scrollArea.widget().layout().count())):
# 'The new widget is deleted when its parent is deleted' - Docs
self.ui.nodes_scrollArea.widget().layout().itemAt(i).widget().setParent(None)
scroll_area_content_widget = QWidget() # create new widget and layout for the scroll area
nodes_layout = QVBoxLayout()
nodes_layout.setAlignment(Qt.AlignTop)
for n in self.nodes: # create a new node list widget for every node
node_widget = Node_ListWidget(n)
node_widget.double_clicked.connect(self.node_widget_double_clicked)
nodes_layout.addWidget(node_widget)
self.node_list_widgets.append(node_widget)
scroll_area_content_widget.setLayout(nodes_layout)
self.ui.nodes_scrollArea.setWidget(scroll_area_content_widget)
def update_nodes_list_names(self):
for node_list_widget in self.node_list_widgets:
node_list_widget.update_display_title()
def set_current_node(self, node: Node):
# clear node_content_placeholder_widget
layout = self.ui.node_content_placeholder_widget.layout()
for i in reversed(range(layout.count())):
item = layout.itemAt(i)
item.widget().setParent(self) # removing the widget from the layout without deleting the widget
layout.addWidget(node.content_widget)
node.content_widget.show()
def node_widget_double_clicked(self, node):
self.set_current_node(node)
def load_stylesheet(self, ss):
ss_content = ''
try:
f = open('stuff/stylesheets/'+ss+'.txt')
ss_content = f.read()
f.close()
finally:
self.setStyleSheet(ss_content)
def import_nodes(self, j_nodes, dir):
print('importing nodes')
o_nodes = json.loads(j_nodes)
nodes_list = o_nodes['nodes']
for n in nodes_list:
print('parsing node', n['title'])
new_node = Node()
new_node.title = n['title']
new_node.description = n['description']
new_node.type = n['type']
new_node.module_name = n['module name']
new_node.class_name = n['class name']
new_node.design_style = n['design style']
new_node.color = n['color']
new_node.has_main_widget = n['has main widget']
if new_node.has_main_widget:
new_node.widget_position = n['widget position']
new_node.custom_input_widgets = n['custom input widgets']
new_node.inputs = n['inputs']
new_node.outputs = n['outputs']
# load custom files
module_name_separator = '___'
# main code
node_path = dir+'/'+new_node.module_name
print(new_node.module_name)
f = open(node_path+'/'+new_node.module_name+module_name_separator+'METACODE.py')
new_node.meta_code = f.read()
f.close()
node_widgets_path = node_path + '/widgets'
# main widget code
if new_node.has_main_widget:
f = open(node_widgets_path+'/'+new_node.module_name+module_name_separator+'main_widget'+module_name_separator+'METACODE.py')
new_node.main_widget_content = f.read()
f.close()
# custom input widgets
for ciw in new_node.custom_input_widgets:
f = open(node_widgets_path+'/'+new_node.module_name+module_name_separator+ciw+module_name_separator+'METACODE.py')
new_node.custom_input_widget_contents.append(f.read())
f.close()
new_node_content_widget = NodeContentWidget()
new_node.title_changed.connect(self.update_nodes_list_names) # this will update the list view
new_node_content_widget.load_node(new_node)
new_node.content_widget = new_node_content_widget
self.nodes.append(new_node)
print('finished parsing')
print(self.nodes)
self.rebuild_nodes_list()
def import_button_clicked(self):
file_path = QFileDialog.getOpenFileName(self, 'select the json file you want to import', '../packages')[0]
f_content = ''
try:
f = open(file_path)
f_content = f.read()
f.close()
except Exception as e:
return
self.import_nodes(f_content, os.path.dirname(file_path) + '/nodes')
def save_button_clicked(self):
# the dialog does the whole saving process
save_dialog = SaveDialog(self, self.nodes)
save_dialog.exec_()
| 0.211906 | 0.053874 |
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
import numpy as np
import scipy.integrate as intg
class NaNException(Exception):
pass
def hmf_integral_gtm(M, dndm, mass_density=False):
"""
Cumulatively integrate dn/dm.
Parameters
----------
M : array_like
Array of masses.
dndm : array_like
Array of dn/dm (corresponding to M)
mass_density : bool, `False`
Whether to calculate mass density (or number density).
Returns
-------
ngtm : array_like
Cumulative integral of dndm.
Examples
--------
Using a simple power-law mass function:
>>> import numpy as np
>>> m = np.logspace(10,18,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
The function always integrates to m=1e18, and extrapolates with a spline
if data not provided:
>>> m = np.logspace(10,12,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
"""
# Eliminate NaN's
m = M[np.logical_not(np.isnan(dndm))]
dndm = dndm[np.logical_not(np.isnan(dndm))]
dndlnm = m * dndm
if len(m) < 4:
raise NaNException("There are too few real numbers in dndm: len(dndm) = %s, #NaN's = %s" % (len(M), len(M) - len(dndm)))
# Calculate the mass function (and its integral) from the highest M up to 10**18
if m[-1] < m[0] * 10 ** 18 / m[3]:
m_upper = np.arange(np.log(m[-1]), np.log(10 ** 18), np.log(m[1]) - np.log(m[0]))
mf_func = _spline(np.log(m), np.log(dndlnm), k=1)
mf = mf_func(m_upper)
if not mass_density:
int_upper = intg.simps(np.exp(mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = intg.simps(np.exp(m_upper + mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = 0
# Calculate the cumulative integral (backwards) of [m*]dndlnm
if not mass_density:
ngtm = np.concatenate((intg.cumtrapz(dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
else:
ngtm = np.concatenate((intg.cumtrapz(m[::-1] * dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
return (ngtm + int_upper)
|
hmf/integrate_hmf.py
|
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
import numpy as np
import scipy.integrate as intg
class NaNException(Exception):
pass
def hmf_integral_gtm(M, dndm, mass_density=False):
"""
Cumulatively integrate dn/dm.
Parameters
----------
M : array_like
Array of masses.
dndm : array_like
Array of dn/dm (corresponding to M)
mass_density : bool, `False`
Whether to calculate mass density (or number density).
Returns
-------
ngtm : array_like
Cumulative integral of dndm.
Examples
--------
Using a simple power-law mass function:
>>> import numpy as np
>>> m = np.logspace(10,18,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
The function always integrates to m=1e18, and extrapolates with a spline
if data not provided:
>>> m = np.logspace(10,12,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
"""
# Eliminate NaN's
m = M[np.logical_not(np.isnan(dndm))]
dndm = dndm[np.logical_not(np.isnan(dndm))]
dndlnm = m * dndm
if len(m) < 4:
raise NaNException("There are too few real numbers in dndm: len(dndm) = %s, #NaN's = %s" % (len(M), len(M) - len(dndm)))
# Calculate the mass function (and its integral) from the highest M up to 10**18
if m[-1] < m[0] * 10 ** 18 / m[3]:
m_upper = np.arange(np.log(m[-1]), np.log(10 ** 18), np.log(m[1]) - np.log(m[0]))
mf_func = _spline(np.log(m), np.log(dndlnm), k=1)
mf = mf_func(m_upper)
if not mass_density:
int_upper = intg.simps(np.exp(mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = intg.simps(np.exp(m_upper + mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = 0
# Calculate the cumulative integral (backwards) of [m*]dndlnm
if not mass_density:
ngtm = np.concatenate((intg.cumtrapz(dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
else:
ngtm = np.concatenate((intg.cumtrapz(m[::-1] * dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
return (ngtm + int_upper)
| 0.890491 | 0.524273 |
import numpy as np
from optuna import Trial
from sklearn.decomposition import PCA
import mutation_prediction.data as data
import mutation_prediction.embeddings.aaindex as aaindex
from mutation_prediction.data import Dataset
from mutation_prediction.embeddings import Embedding, EmbeddingMatrix
class AcidsOneHot(EmbeddingMatrix):
def __init__(self):
super(AcidsOneHot, self).__init__(np.eye(data.num_acids()))
class ZScales(EmbeddingMatrix):
def __init__(self):
super(ZScales, self).__init__(
np.asarray(
[
[0.24, -2.32, 0.60, -0.14, 1.30],
[0.84, -1.67, 3.75, 0.18, -2.65],
[3.98, 0.93, 1.93, -2.46, 0.75],
[3.11, 0.26, -0.11, -3.04, -0.25],
[-4.22, 1.94, 1.06, 0.54, -0.62],
[2.05, -4.06, 0.36, -0.82, -0.38],
[2.47, 1.95, 0.26, 3.90, 0.09],
[-3.89, -1.73, -1.71, -0.84, 0.26],
[2.29, 0.89, -2.49, 1.49, 0.31],
[-4.28, -1.30, -1.49, -0.72, 0.84],
[-2.85, -0.22, 0.47, 1.94, -0.98],
[3.05, 1.62, 1.04, -1.15, 1.61],
[1.66, 0.27, 1.84, 0.70, 2.00],
[1.75, 0.50, -1.44, -1.34, 0.66],
[3.52, 2.50, -3.50, 1.99, -0.17],
[2.39, -1.07, 1.15, -1.39, 0.67],
[0.75, -2.18, -1.12, -1.46, -0.40],
[-2.59, -2.64, -1.54, -0.85, -0.02],
[-4.36, 3.94, 0.59, 3.44, -1.59],
[-2.54, 2.44, 0.43, 0.04, -1.47],
]
)
)
class VHSE(EmbeddingMatrix):
def __init__(self):
super(VHSE, self).__init__(
np.asarray(
[
[0.15, -1.11, -1.35, -0.92, 0.02, -0.91, 0.36, -0.48],
[0.18, -1.67, -0.46, -0.21, 0.0, 1.2, -1.61, -0.19],
[-1.15, 0.67, -0.41, -0.01, -2.68, 1.31, 0.03, 0.56],
[-1.18, 0.4, 0.1, 0.36, -2.16, -0.17, 0.91, 0.02],
[1.52, 0.61, 0.96, -0.16, 0.25, 0.28, -1.33, -0.2],
[-0.2, -1.53, -2.63, 2.28, -0.53, -1.18, 2.01, -1.34],
[-0.43, -0.25, 0.37, 0.19, 0.51, 1.28, 0.93, 0.65],
[1.27, -0.14, 0.3, -1.8, 0.3, -1.61, -0.16, -0.13],
[-1.17, 0.7, 0.7, 0.8, 1.64, 0.67, 1.63, 0.13],
[1.36, 0.07, 0.26, -0.8, 0.22, -1.37, 0.08, -0.62],
[1.01, -0.53, 0.43, 0.0, 0.23, 0.1, -0.86, -0.68],
[-0.99, 0.0, -0.37, 0.69, -0.55, 0.85, 0.73, -0.8],
[0.22, -0.17, -0.5, 0.05, -0.01, -1.34, -0.19, 3.56],
[-0.96, 0.12, 0.18, 0.16, 0.09, 0.42, -0.2, -0.41],
[-1.47, 1.45, 1.24, 1.27, 1.55, 1.47, 1.3, 0.83],
[-0.67, -0.86, -1.07, -0.41, -0.32, 0.27, -0.64, 0.11],
[-0.34, -0.51, -0.55, -1.06, -0.06, -0.01, -0.79, 0.39],
[0.76, -0.92, -0.17, -1.91, 0.22, -1.4, -0.24, -0.03],
[1.5, 2.06, 1.79, 0.75, 0.75, -0.13, -1.01, -0.85],
[0.61, 1.6, 1.17, 0.73, 0.53, 0.25, -0.96, -0.52],
]
)
)
class PcScores(EmbeddingMatrix):
matrix = None
def __init__(self):
if PcScores.matrix is None:
matrix = aaindex.read_aaindex1()
matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
pca = PCA(n_components=11)
PcScores.matrix = pca.fit_transform(matrix)
super(PcScores, self).__init__(PcScores.matrix)
class SScales(EmbeddingMatrix):
matrix = None
def __init__(self):
if SScales.matrix is None:
matrix = aaindex.read_aaindex1(
keys=[
"BIOV880101",
"BLAM930101",
"<KEY>",
"TSAJ990101",
"NAKH920106",
"NAKH920107",
"NAKH920108",
"CEDJ970104",
"LIFS790101",
"MIYS990104",
"ARGP820101",
"DAWD720101",
"FAUJ880109",
]
)
SScales.matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
super(SScales, self).__init__(SScales.matrix)
class AaIndex(EmbeddingMatrix):
matrix = None
def __init__(self, keys=None):
if AaIndex.matrix is None:
matrix = aaindex.read_aaindex1(keys=keys)
AaIndex.matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
super(AaIndex, self).__init__(AaIndex.matrix)
class ProtVec(Embedding):
matrix = None
def embed_update(self, dataset: Dataset, trial: Trial = None) -> np.ndarray:
return self.embed(dataset)
def embed(self, dataset: Dataset) -> np.ndarray:
if ProtVec.matrix is None:
ProtVec.matrix = _read_protvec()
embedded = np.zeros((len(dataset), 100))
sequence_length = len(dataset.get_sequence())
num_mutations = dataset.get_num_mutations()
positions = dataset.get_positions()
for i, sequence in enumerate(dataset.get_sequences()):
three_grams = set()
for p in positions[i, : num_mutations[i]]:
p = int(p)
if 2 <= p:
three_grams.add((p - 2, p - 1, p))
if 1 <= p < sequence_length - 1:
three_grams.add((p - 1, p, p + 1))
if p < sequence_length - 2:
three_grams.add((p, p + 1, p + 2))
vectors = [
ProtVec.matrix[sequence[g[0]], sequence[g[1]], sequence[g[2]]] for g in three_grams
]
if len(vectors) > 0:
embedded[i] = np.asarray(vectors).mean(axis=0)
return embedded
def get_matrix(self) -> np.ndarray:
return self.matrix
def _read_protvec():
with open("data/embeddings/protVec_100d_3grams.csv") as fd:
matrix = np.zeros((data.num_acids(), data.num_acids(), data.num_acids(), 100))
for row in fd:
row = row.replace('"', "")
parts = row.split("\t")
try:
i = data.acid_to_index(parts[0][0])
j = data.acid_to_index(parts[0][1])
k = data.acid_to_index(parts[0][2])
except KeyError:
continue
vector = np.asarray([float(v) for v in parts[1:]])
matrix[i, j, k] = vector
return matrix
|
mutation_prediction/embeddings/acids.py
|
import numpy as np
from optuna import Trial
from sklearn.decomposition import PCA
import mutation_prediction.data as data
import mutation_prediction.embeddings.aaindex as aaindex
from mutation_prediction.data import Dataset
from mutation_prediction.embeddings import Embedding, EmbeddingMatrix
class AcidsOneHot(EmbeddingMatrix):
def __init__(self):
super(AcidsOneHot, self).__init__(np.eye(data.num_acids()))
class ZScales(EmbeddingMatrix):
def __init__(self):
super(ZScales, self).__init__(
np.asarray(
[
[0.24, -2.32, 0.60, -0.14, 1.30],
[0.84, -1.67, 3.75, 0.18, -2.65],
[3.98, 0.93, 1.93, -2.46, 0.75],
[3.11, 0.26, -0.11, -3.04, -0.25],
[-4.22, 1.94, 1.06, 0.54, -0.62],
[2.05, -4.06, 0.36, -0.82, -0.38],
[2.47, 1.95, 0.26, 3.90, 0.09],
[-3.89, -1.73, -1.71, -0.84, 0.26],
[2.29, 0.89, -2.49, 1.49, 0.31],
[-4.28, -1.30, -1.49, -0.72, 0.84],
[-2.85, -0.22, 0.47, 1.94, -0.98],
[3.05, 1.62, 1.04, -1.15, 1.61],
[1.66, 0.27, 1.84, 0.70, 2.00],
[1.75, 0.50, -1.44, -1.34, 0.66],
[3.52, 2.50, -3.50, 1.99, -0.17],
[2.39, -1.07, 1.15, -1.39, 0.67],
[0.75, -2.18, -1.12, -1.46, -0.40],
[-2.59, -2.64, -1.54, -0.85, -0.02],
[-4.36, 3.94, 0.59, 3.44, -1.59],
[-2.54, 2.44, 0.43, 0.04, -1.47],
]
)
)
class VHSE(EmbeddingMatrix):
def __init__(self):
super(VHSE, self).__init__(
np.asarray(
[
[0.15, -1.11, -1.35, -0.92, 0.02, -0.91, 0.36, -0.48],
[0.18, -1.67, -0.46, -0.21, 0.0, 1.2, -1.61, -0.19],
[-1.15, 0.67, -0.41, -0.01, -2.68, 1.31, 0.03, 0.56],
[-1.18, 0.4, 0.1, 0.36, -2.16, -0.17, 0.91, 0.02],
[1.52, 0.61, 0.96, -0.16, 0.25, 0.28, -1.33, -0.2],
[-0.2, -1.53, -2.63, 2.28, -0.53, -1.18, 2.01, -1.34],
[-0.43, -0.25, 0.37, 0.19, 0.51, 1.28, 0.93, 0.65],
[1.27, -0.14, 0.3, -1.8, 0.3, -1.61, -0.16, -0.13],
[-1.17, 0.7, 0.7, 0.8, 1.64, 0.67, 1.63, 0.13],
[1.36, 0.07, 0.26, -0.8, 0.22, -1.37, 0.08, -0.62],
[1.01, -0.53, 0.43, 0.0, 0.23, 0.1, -0.86, -0.68],
[-0.99, 0.0, -0.37, 0.69, -0.55, 0.85, 0.73, -0.8],
[0.22, -0.17, -0.5, 0.05, -0.01, -1.34, -0.19, 3.56],
[-0.96, 0.12, 0.18, 0.16, 0.09, 0.42, -0.2, -0.41],
[-1.47, 1.45, 1.24, 1.27, 1.55, 1.47, 1.3, 0.83],
[-0.67, -0.86, -1.07, -0.41, -0.32, 0.27, -0.64, 0.11],
[-0.34, -0.51, -0.55, -1.06, -0.06, -0.01, -0.79, 0.39],
[0.76, -0.92, -0.17, -1.91, 0.22, -1.4, -0.24, -0.03],
[1.5, 2.06, 1.79, 0.75, 0.75, -0.13, -1.01, -0.85],
[0.61, 1.6, 1.17, 0.73, 0.53, 0.25, -0.96, -0.52],
]
)
)
class PcScores(EmbeddingMatrix):
matrix = None
def __init__(self):
if PcScores.matrix is None:
matrix = aaindex.read_aaindex1()
matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
pca = PCA(n_components=11)
PcScores.matrix = pca.fit_transform(matrix)
super(PcScores, self).__init__(PcScores.matrix)
class SScales(EmbeddingMatrix):
matrix = None
def __init__(self):
if SScales.matrix is None:
matrix = aaindex.read_aaindex1(
keys=[
"BIOV880101",
"BLAM930101",
"<KEY>",
"TSAJ990101",
"NAKH920106",
"NAKH920107",
"NAKH920108",
"CEDJ970104",
"LIFS790101",
"MIYS990104",
"ARGP820101",
"DAWD720101",
"FAUJ880109",
]
)
SScales.matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
super(SScales, self).__init__(SScales.matrix)
class AaIndex(EmbeddingMatrix):
matrix = None
def __init__(self, keys=None):
if AaIndex.matrix is None:
matrix = aaindex.read_aaindex1(keys=keys)
AaIndex.matrix = (matrix - matrix.mean(axis=0)) / matrix.std(axis=0)
super(AaIndex, self).__init__(AaIndex.matrix)
class ProtVec(Embedding):
matrix = None
def embed_update(self, dataset: Dataset, trial: Trial = None) -> np.ndarray:
return self.embed(dataset)
def embed(self, dataset: Dataset) -> np.ndarray:
if ProtVec.matrix is None:
ProtVec.matrix = _read_protvec()
embedded = np.zeros((len(dataset), 100))
sequence_length = len(dataset.get_sequence())
num_mutations = dataset.get_num_mutations()
positions = dataset.get_positions()
for i, sequence in enumerate(dataset.get_sequences()):
three_grams = set()
for p in positions[i, : num_mutations[i]]:
p = int(p)
if 2 <= p:
three_grams.add((p - 2, p - 1, p))
if 1 <= p < sequence_length - 1:
three_grams.add((p - 1, p, p + 1))
if p < sequence_length - 2:
three_grams.add((p, p + 1, p + 2))
vectors = [
ProtVec.matrix[sequence[g[0]], sequence[g[1]], sequence[g[2]]] for g in three_grams
]
if len(vectors) > 0:
embedded[i] = np.asarray(vectors).mean(axis=0)
return embedded
def get_matrix(self) -> np.ndarray:
return self.matrix
def _read_protvec():
with open("data/embeddings/protVec_100d_3grams.csv") as fd:
matrix = np.zeros((data.num_acids(), data.num_acids(), data.num_acids(), 100))
for row in fd:
row = row.replace('"', "")
parts = row.split("\t")
try:
i = data.acid_to_index(parts[0][0])
j = data.acid_to_index(parts[0][1])
k = data.acid_to_index(parts[0][2])
except KeyError:
continue
vector = np.asarray([float(v) for v in parts[1:]])
matrix[i, j, k] = vector
return matrix
| 0.691602 | 0.439807 |
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor, LongTensor
from torch_geometric.typing import OptTensor
from tsl.nn.functional import reverse_tensor
from tsl.nn.layers.graph_convs.grin_cell import GRIL
from tsl.utils.parser_utils import str_to_bool
from ...base.embedding import StaticGraphEmbedding
class GRINModel(nn.Module):
r"""
Graph Recurrent Imputation Network with DCRNN cells.
From Cini et al., ”Filling the G ap s: Multivariate Time Series Imputation by Graph Neural Networks” ICLR 2022
Args:
input_size (int): Size of the input.
hidden_size (int): Number of units in the DCRNN hidden layer.
ff_size (int): Number of units in the nonlinear readout.
embedding_size (int, optional): Number of features in the optional node embeddings.
exog_size (int): Number of channels for the exogenous variables.
n_layers (int): Number DCRNN cells.
n_nodes (int, optional): Number of nodes in the input graph.
kernel_size (int): Order of the spatial diffusion process.
layer_norm (bool, optional): Whther to use layer normalization
dropout (float, optional): Dropout probability in the DCRNN cells.
ff_dropout (float, optional): Dropout probability in the readout.
merge_mode (str, optional): Strategy used to merge representations coming from the two branches of the bidirectional model.
(default: `mlp`)
"""
def __init__(self,
input_size: int,
hidden_size: int,
ff_size: int,
embedding_size: Optional[int] = None,
exog_size: Optional[int] = None,
n_layers: int = 1,
n_nodes: Optional[int] = None,
kernel_size: int = 2,
decoder_order: int = 1,
layer_norm: bool = False,
dropout: float = 0.,
ff_dropout: float = 0.,
merge_mode: str = 'mlp'):
super(GRINModel, self).__init__()
self.fwd_gril = GRIL(input_size=input_size,
hidden_size=hidden_size,
exog_size=exog_size,
n_layers=n_layers,
dropout=dropout,
kernel_size=kernel_size,
decoder_order=decoder_order,
n_nodes=n_nodes,
layer_norm=layer_norm)
self.bwd_gril = GRIL(input_size=input_size,
hidden_size=hidden_size,
exog_size=exog_size,
n_layers=n_layers,
dropout=dropout,
kernel_size=kernel_size,
decoder_order=decoder_order,
n_nodes=n_nodes,
layer_norm=layer_norm)
if embedding_size is not None:
assert n_nodes is not None
self.emb = StaticGraphEmbedding(n_nodes, embedding_size)
else:
self.register_parameter('emb', None)
self.merge_mode = merge_mode
if merge_mode == 'mlp':
in_channels = 4 * hidden_size + input_size + embedding_size
self.out = nn.Sequential(nn.Linear(in_channels, ff_size),
nn.ReLU(),
nn.Dropout(ff_dropout),
nn.Linear(ff_size, input_size))
elif merge_mode in ['mean', 'sum', 'min', 'max']:
self.out = getattr(torch, merge_mode)
else:
raise ValueError("Merge option %s not allowed." % merge_mode)
def forward(self, x: Tensor, edge_index: LongTensor,
edge_weight: OptTensor = None, mask: OptTensor = None,
u: OptTensor = None):
# x: [batch, steps, nodes, channels]
fwd_out, fwd_pred, fwd_repr, _ = self.fwd_gril(x,
edge_index, edge_weight,
mask=mask, u=u)
# Backward
rev_x = reverse_tensor(x, dim=1)
rev_mask = reverse_tensor(mask, dim=1) if mask is not None else None
rev_u = reverse_tensor(u, dim=1) if u is not None else None
*bwd, _ = self.bwd_gril(rev_x, edge_index, edge_weight,
mask=rev_mask, u=rev_u)
bwd_out, bwd_pred, bwd_repr = [reverse_tensor(res, 1) for res in bwd]
if self.merge_mode == 'mlp':
inputs = [fwd_repr, bwd_repr, mask]
if self.emb is not None:
b, s, *_ = fwd_repr.size() # fwd_h: [b s n c]
inputs += [self.emb(expand=(b, s, -1, -1))]
imputation = torch.cat(inputs, dim=-1)
imputation = self.out(imputation)
else:
imputation = torch.stack([fwd_out, bwd_out], dim=-1)
imputation = self.out(imputation, dim=-1)
return imputation, (fwd_out, bwd_out, fwd_pred, bwd_pred)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--hidden-size', type=int)
parser.add_argument('--ff-size', type=int)
parser.add_argument('--embedding-size', type=int, default=None)
parser.add_argument('--n-layers', type=int, default=1)
parser.add_argument('--n-nodes', type=int, default=None)
parser.add_argument('--kernel-size', type=int, default=2)
parser.add_argument('--decoder-order', type=int, default=1)
parser.add_argument('--layer-norm', type=str_to_bool, nargs='?',
const=True, default=False)
parser.add_argument('--dropout', type=float, default=0.)
parser.add_argument('--ff-dropout', type=float, default=0.)
parser.add_argument('--merge-mode', type=str, default='mlp',
choices=['mlp', 'mean', 'sum', 'min', 'max'])
return parser
|
tsl/nn/models/imputation/grin_model.py
|
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor, LongTensor
from torch_geometric.typing import OptTensor
from tsl.nn.functional import reverse_tensor
from tsl.nn.layers.graph_convs.grin_cell import GRIL
from tsl.utils.parser_utils import str_to_bool
from ...base.embedding import StaticGraphEmbedding
class GRINModel(nn.Module):
r"""
Graph Recurrent Imputation Network with DCRNN cells.
From Cini et al., ”Filling the G ap s: Multivariate Time Series Imputation by Graph Neural Networks” ICLR 2022
Args:
input_size (int): Size of the input.
hidden_size (int): Number of units in the DCRNN hidden layer.
ff_size (int): Number of units in the nonlinear readout.
embedding_size (int, optional): Number of features in the optional node embeddings.
exog_size (int): Number of channels for the exogenous variables.
n_layers (int): Number DCRNN cells.
n_nodes (int, optional): Number of nodes in the input graph.
kernel_size (int): Order of the spatial diffusion process.
layer_norm (bool, optional): Whther to use layer normalization
dropout (float, optional): Dropout probability in the DCRNN cells.
ff_dropout (float, optional): Dropout probability in the readout.
merge_mode (str, optional): Strategy used to merge representations coming from the two branches of the bidirectional model.
(default: `mlp`)
"""
def __init__(self,
input_size: int,
hidden_size: int,
ff_size: int,
embedding_size: Optional[int] = None,
exog_size: Optional[int] = None,
n_layers: int = 1,
n_nodes: Optional[int] = None,
kernel_size: int = 2,
decoder_order: int = 1,
layer_norm: bool = False,
dropout: float = 0.,
ff_dropout: float = 0.,
merge_mode: str = 'mlp'):
super(GRINModel, self).__init__()
self.fwd_gril = GRIL(input_size=input_size,
hidden_size=hidden_size,
exog_size=exog_size,
n_layers=n_layers,
dropout=dropout,
kernel_size=kernel_size,
decoder_order=decoder_order,
n_nodes=n_nodes,
layer_norm=layer_norm)
self.bwd_gril = GRIL(input_size=input_size,
hidden_size=hidden_size,
exog_size=exog_size,
n_layers=n_layers,
dropout=dropout,
kernel_size=kernel_size,
decoder_order=decoder_order,
n_nodes=n_nodes,
layer_norm=layer_norm)
if embedding_size is not None:
assert n_nodes is not None
self.emb = StaticGraphEmbedding(n_nodes, embedding_size)
else:
self.register_parameter('emb', None)
self.merge_mode = merge_mode
if merge_mode == 'mlp':
in_channels = 4 * hidden_size + input_size + embedding_size
self.out = nn.Sequential(nn.Linear(in_channels, ff_size),
nn.ReLU(),
nn.Dropout(ff_dropout),
nn.Linear(ff_size, input_size))
elif merge_mode in ['mean', 'sum', 'min', 'max']:
self.out = getattr(torch, merge_mode)
else:
raise ValueError("Merge option %s not allowed." % merge_mode)
def forward(self, x: Tensor, edge_index: LongTensor,
edge_weight: OptTensor = None, mask: OptTensor = None,
u: OptTensor = None):
# x: [batch, steps, nodes, channels]
fwd_out, fwd_pred, fwd_repr, _ = self.fwd_gril(x,
edge_index, edge_weight,
mask=mask, u=u)
# Backward
rev_x = reverse_tensor(x, dim=1)
rev_mask = reverse_tensor(mask, dim=1) if mask is not None else None
rev_u = reverse_tensor(u, dim=1) if u is not None else None
*bwd, _ = self.bwd_gril(rev_x, edge_index, edge_weight,
mask=rev_mask, u=rev_u)
bwd_out, bwd_pred, bwd_repr = [reverse_tensor(res, 1) for res in bwd]
if self.merge_mode == 'mlp':
inputs = [fwd_repr, bwd_repr, mask]
if self.emb is not None:
b, s, *_ = fwd_repr.size() # fwd_h: [b s n c]
inputs += [self.emb(expand=(b, s, -1, -1))]
imputation = torch.cat(inputs, dim=-1)
imputation = self.out(imputation)
else:
imputation = torch.stack([fwd_out, bwd_out], dim=-1)
imputation = self.out(imputation, dim=-1)
return imputation, (fwd_out, bwd_out, fwd_pred, bwd_pred)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--hidden-size', type=int)
parser.add_argument('--ff-size', type=int)
parser.add_argument('--embedding-size', type=int, default=None)
parser.add_argument('--n-layers', type=int, default=1)
parser.add_argument('--n-nodes', type=int, default=None)
parser.add_argument('--kernel-size', type=int, default=2)
parser.add_argument('--decoder-order', type=int, default=1)
parser.add_argument('--layer-norm', type=str_to_bool, nargs='?',
const=True, default=False)
parser.add_argument('--dropout', type=float, default=0.)
parser.add_argument('--ff-dropout', type=float, default=0.)
parser.add_argument('--merge-mode', type=str, default='mlp',
choices=['mlp', 'mean', 'sum', 'min', 'max'])
return parser
| 0.963057 | 0.582135 |
import datetime, copy, os
import matchesDealEngine
class CBaseStrategy(object):
def __init__(self, stockCode):
super(CBaseStrategy, self).__init__()
self.stockCode = stockCode
self.customInit()
self.initCashe()
#最新数据
self.currentData = {}
#连接池,用于发送信号
self.requesHandlerObjList = []
#数据缓存
#当前时间,最近的一个行情数据时间
self.currentMDDateTime = datetime.datetime(1990,1,1,0,0,0)
self.MDList = [] #行情数据
self.TDList = [] #逐笔成交数据
self.ODList = [] #成交队列数据
#交易数据
self.dealEngine = matchesDealEngine.CMatchesDealEngine(stockCode, self)
#------------------------------
#listener 调用接口
#------------------------------
#获得连接对象
def getRequesHandlerObjList(self, requesHandlerObjList):
self.requesHandlerObjList = requesHandlerObjList
def dataListener(self, dataType, data):
if dataType == 1: #逐笔成交数据
self.dealEngine.onRtnTradeSettlement(data)
self.onRtnTradeSettlement(data)
self.saveTradeSettlement(data)
elif dataType == 2: #报单队列
self.onRtnOrderQueue(data)
self.saveOrderQuene(data)
else:
if data["dateTime"] > self.currentMDDateTime:
self.dealEngine.onRtnMarketData(data)
self.onRtnMarketData(data)
self.currentMDDateTime = copy.copy(data["dateTime"])
self.saveMarketData(data)
#自动保存缓存触发
if (datetime.datetime.now() - self.preSaveCacheTime)> datetime.timedelta(minutes = 1):
self.autosaveCache()
#self.saveCacheAdd(MDList = self.MDList, TDList = self.TDList, ODList = self.ODList)
#------------------------------
#cache 相关函数
#------------------------------
def initCashe(self):
self.cacheFilePath = "cache/%s%s.cache" %(self.stockCode, self.name)
self.preSaveCacheTime = datetime.datetime.now()
self.loadCache()
#读取缓存
def loadCache(self):
if not os.path.isfile(self.cacheFilePath):
self.cacheFile = open(self.cacheFilePath, "w")
self.cacheFile.close
execfile(self.cacheFilePath)
#保存缓存
def saveCache(self, **objDict):
self.cacheFile = open(self.cacheFilePath, "w")
content = ""
for key, value in objDict.items():
content += "self.%s = %s\n" %(key, str(value))
self.cacheFile.write(content)
self.cacheFile.close()
self.preSaveCacheTime = datetime.datetime.now()
#保存缓存
def saveCacheAdd(self, **objDict):
self.cacheFile = open(self.cacheFilePath, "a")
content = ""
for key, value in objDict.items():
content += "self.%s = %s\n" %(key, str(value))
self.cacheFile.write(content)
self.cacheFile.close()
self.preSaveCacheTime = datetime.datetime.now()
#------------------------------
#数据保存相关函数
#------------------------------
def saveMarketData(self, data):
self.MDList.append(copy.copy(data))
if len(self.MDList) > 300:
del self.MDList[0]
def saveTradeSettlement(self, data):
self.TDList.append(copy.copy(data))
if len(self.TDList) > 300:
del self.TDList[0]
def saveOrderQuene(self, data):
self.ODList.append(copy.copy(data))
if len(self.ODList) > 300:
del self.ODList[0]
#------------------------------
#交易方法
#------------------------------
def buy(self, mode, price, vol, dateTime):
return self.dealEngine.buy(mode, price, vol, dateTime)
def sell(self, mode, price, vol, dateTime):
return self.dealEngine.sell(mode, price, vol, dateTime)
#报单成交明细
def dealed(self, dealObj, avePrice, vol, dateTime):
pass
#报单失败
def quotedFailed(self, failedCode): #失败原因
pass
#------------------------------
#继承重载函数
#------------------------------
#自定义初始化函数
def customInit(self):
self.name = "baseStrategy"
#行情数据触发函数
def onRtnMarketData(self, data):
pass
#逐笔成交触发函数
def onRtnTradeSettlement(self, data):
pass
#买一队列触发函数
def onRtnOrderQueue(self, data):
pass
#当日结束
def dayEnd(self):
pass
#数据结束
def dataEnd(self):
pass
#自动保存缓存触发函数
def autosaveCache(self):
#self.saveCache(data = self.data)
pass
|
baseStrategy.py
|
import datetime, copy, os
import matchesDealEngine
class CBaseStrategy(object):
def __init__(self, stockCode):
super(CBaseStrategy, self).__init__()
self.stockCode = stockCode
self.customInit()
self.initCashe()
#最新数据
self.currentData = {}
#连接池,用于发送信号
self.requesHandlerObjList = []
#数据缓存
#当前时间,最近的一个行情数据时间
self.currentMDDateTime = datetime.datetime(1990,1,1,0,0,0)
self.MDList = [] #行情数据
self.TDList = [] #逐笔成交数据
self.ODList = [] #成交队列数据
#交易数据
self.dealEngine = matchesDealEngine.CMatchesDealEngine(stockCode, self)
#------------------------------
#listener 调用接口
#------------------------------
#获得连接对象
def getRequesHandlerObjList(self, requesHandlerObjList):
self.requesHandlerObjList = requesHandlerObjList
def dataListener(self, dataType, data):
if dataType == 1: #逐笔成交数据
self.dealEngine.onRtnTradeSettlement(data)
self.onRtnTradeSettlement(data)
self.saveTradeSettlement(data)
elif dataType == 2: #报单队列
self.onRtnOrderQueue(data)
self.saveOrderQuene(data)
else:
if data["dateTime"] > self.currentMDDateTime:
self.dealEngine.onRtnMarketData(data)
self.onRtnMarketData(data)
self.currentMDDateTime = copy.copy(data["dateTime"])
self.saveMarketData(data)
#自动保存缓存触发
if (datetime.datetime.now() - self.preSaveCacheTime)> datetime.timedelta(minutes = 1):
self.autosaveCache()
#self.saveCacheAdd(MDList = self.MDList, TDList = self.TDList, ODList = self.ODList)
#------------------------------
#cache 相关函数
#------------------------------
def initCashe(self):
self.cacheFilePath = "cache/%s%s.cache" %(self.stockCode, self.name)
self.preSaveCacheTime = datetime.datetime.now()
self.loadCache()
#读取缓存
def loadCache(self):
if not os.path.isfile(self.cacheFilePath):
self.cacheFile = open(self.cacheFilePath, "w")
self.cacheFile.close
execfile(self.cacheFilePath)
#保存缓存
def saveCache(self, **objDict):
self.cacheFile = open(self.cacheFilePath, "w")
content = ""
for key, value in objDict.items():
content += "self.%s = %s\n" %(key, str(value))
self.cacheFile.write(content)
self.cacheFile.close()
self.preSaveCacheTime = datetime.datetime.now()
#保存缓存
def saveCacheAdd(self, **objDict):
self.cacheFile = open(self.cacheFilePath, "a")
content = ""
for key, value in objDict.items():
content += "self.%s = %s\n" %(key, str(value))
self.cacheFile.write(content)
self.cacheFile.close()
self.preSaveCacheTime = datetime.datetime.now()
#------------------------------
#数据保存相关函数
#------------------------------
def saveMarketData(self, data):
self.MDList.append(copy.copy(data))
if len(self.MDList) > 300:
del self.MDList[0]
def saveTradeSettlement(self, data):
self.TDList.append(copy.copy(data))
if len(self.TDList) > 300:
del self.TDList[0]
def saveOrderQuene(self, data):
self.ODList.append(copy.copy(data))
if len(self.ODList) > 300:
del self.ODList[0]
#------------------------------
#交易方法
#------------------------------
def buy(self, mode, price, vol, dateTime):
return self.dealEngine.buy(mode, price, vol, dateTime)
def sell(self, mode, price, vol, dateTime):
return self.dealEngine.sell(mode, price, vol, dateTime)
#报单成交明细
def dealed(self, dealObj, avePrice, vol, dateTime):
pass
#报单失败
def quotedFailed(self, failedCode): #失败原因
pass
#------------------------------
#继承重载函数
#------------------------------
#自定义初始化函数
def customInit(self):
self.name = "baseStrategy"
#行情数据触发函数
def onRtnMarketData(self, data):
pass
#逐笔成交触发函数
def onRtnTradeSettlement(self, data):
pass
#买一队列触发函数
def onRtnOrderQueue(self, data):
pass
#当日结束
def dayEnd(self):
pass
#数据结束
def dataEnd(self):
pass
#自动保存缓存触发函数
def autosaveCache(self):
#self.saveCache(data = self.data)
pass
| 0.081695 | 0.124665 |
from bresenham import bresenham
from scipy.spatial import Voronoi
import numpy as np
from queue import PriorityQueue
import networkx as nx
def closest_node(graph, current_position):
'''
Compute the closest node in the graph to the current position
'''
closest_node = None
dist = 100000
xy_position = (current_position[0], current_position[1])
for p in graph.nodes:
d = heuristic(xy_position, p)
if d < dist:
closest_node = p
dist = d
return closest_node
def create_grid_and_edges(data, drone_altitude, safety_distance):
'''
Create a grid representation of a 2D configuration space and a Voronoi Graph
'''
# minimum and maximum north coordinates
north_min = np.floor(np.min(data[:, 0] - data[:, 3]))
north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))
# minimum and maximum east coordinates
east_min = np.floor(np.min(data[:, 1] - data[:, 4]))
east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))
# given the minimum and maximum coordinates we can
# calculate the size of the grid.
north_size = int(np.ceil(north_max - north_min))
east_size = int(np.ceil(east_max - east_min))
# Initialize an empty grid
grid = np.zeros((north_size, east_size))
# Initialize an empty list for Voronoi points
points = []
# Populate the grid with obstacles
for i in range(data.shape[0]):
north, east, alt, d_north, d_east, d_alt = data[i, :]
if alt + d_alt + safety_distance > drone_altitude:
obstacle = [
int(np.clip(north - d_north - safety_distance - north_min, 0, north_size - 1)),
int(np.clip(north + d_north + safety_distance - north_min, 0, north_size - 1)),
int(np.clip(east - d_east - safety_distance - east_min, 0, east_size - 1)),
int(np.clip(east + d_east + safety_distance - east_min, 0, east_size - 1)),
]
grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1
# add center of obstacles to points list
points.append([north - north_min, east - east_min])
# create a voronoi graph based on location of obstacle centres
graph = Voronoi(points)
# check each edge from graph.ridge_vertices for collision
edges = []
for v in graph.ridge_vertices:
p1 = graph.vertices[v[0]]
p2 = graph.vertices[v[1]]
cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))
hit = False
for c in cells:
# First check if we're off the map
if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:
hit = True
break
# Next check if we're in collision
if grid[c[0], c[1]] == 1:
hit = True
break
# If the edge does not hit on obstacle
# add it to the list
if not hit:
# array to tuple for future graph creation step)
p1 = (p1[0], p1[1])
p2 = (p2[0], p2[1])
edges.append((p1, p2))
return grid, edges, int(north_min), int(east_min)
def create_graph(edges):
graph = nx.Graph()
for elem in edges:
p1 = elem[0]
p2 = elem[1]
dist = heuristic(p1, p2)
graph.add_edge(p1, p2, weight=dist)
return graph
def heuristic(n1, n2):
return(np.linalg.norm(np.array(n2) - np.array(n1)))
def a_star_graph(graph, start, goal, h):
'''
A* working with NetworkX graphs
'''
path = []
path_cost = 0
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
while not queue.empty():
item = queue.get()
current_node = item[1]
if current_node == start:
current_cost = 0.0
else:
current_cost = branch[current_node][0]
if current_node == goal:
print('Found a path.')
found = True
break
else:
for next_node in graph[current_node]:
cost = graph.edges[current_node, next_node]['weight']
branch_cost = current_cost + cost
queue_cost = branch_cost + h(next_node, goal)
if next_node not in visited:
visited.add(next_node)
branch[next_node] = (branch_cost, current_node)
queue.put((queue_cost, next_node))
if found:
# retrace steps
n = goal
path_cost = branch[n][0]
path.append(goal)
while branch[n][1] != start:
path.append(branch[n][1])
n = branch[n][1]
path.append(branch[n][1])
else:
print('**********************')
print('Failed to find a path!')
print('**********************')
return path[::-1], path_cost
|
lib/voronoi_utils.py
|
from bresenham import bresenham
from scipy.spatial import Voronoi
import numpy as np
from queue import PriorityQueue
import networkx as nx
def closest_node(graph, current_position):
'''
Compute the closest node in the graph to the current position
'''
closest_node = None
dist = 100000
xy_position = (current_position[0], current_position[1])
for p in graph.nodes:
d = heuristic(xy_position, p)
if d < dist:
closest_node = p
dist = d
return closest_node
def create_grid_and_edges(data, drone_altitude, safety_distance):
'''
Create a grid representation of a 2D configuration space and a Voronoi Graph
'''
# minimum and maximum north coordinates
north_min = np.floor(np.min(data[:, 0] - data[:, 3]))
north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))
# minimum and maximum east coordinates
east_min = np.floor(np.min(data[:, 1] - data[:, 4]))
east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))
# given the minimum and maximum coordinates we can
# calculate the size of the grid.
north_size = int(np.ceil(north_max - north_min))
east_size = int(np.ceil(east_max - east_min))
# Initialize an empty grid
grid = np.zeros((north_size, east_size))
# Initialize an empty list for Voronoi points
points = []
# Populate the grid with obstacles
for i in range(data.shape[0]):
north, east, alt, d_north, d_east, d_alt = data[i, :]
if alt + d_alt + safety_distance > drone_altitude:
obstacle = [
int(np.clip(north - d_north - safety_distance - north_min, 0, north_size - 1)),
int(np.clip(north + d_north + safety_distance - north_min, 0, north_size - 1)),
int(np.clip(east - d_east - safety_distance - east_min, 0, east_size - 1)),
int(np.clip(east + d_east + safety_distance - east_min, 0, east_size - 1)),
]
grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1
# add center of obstacles to points list
points.append([north - north_min, east - east_min])
# create a voronoi graph based on location of obstacle centres
graph = Voronoi(points)
# check each edge from graph.ridge_vertices for collision
edges = []
for v in graph.ridge_vertices:
p1 = graph.vertices[v[0]]
p2 = graph.vertices[v[1]]
cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))
hit = False
for c in cells:
# First check if we're off the map
if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:
hit = True
break
# Next check if we're in collision
if grid[c[0], c[1]] == 1:
hit = True
break
# If the edge does not hit on obstacle
# add it to the list
if not hit:
# array to tuple for future graph creation step)
p1 = (p1[0], p1[1])
p2 = (p2[0], p2[1])
edges.append((p1, p2))
return grid, edges, int(north_min), int(east_min)
def create_graph(edges):
graph = nx.Graph()
for elem in edges:
p1 = elem[0]
p2 = elem[1]
dist = heuristic(p1, p2)
graph.add_edge(p1, p2, weight=dist)
return graph
def heuristic(n1, n2):
return(np.linalg.norm(np.array(n2) - np.array(n1)))
def a_star_graph(graph, start, goal, h):
'''
A* working with NetworkX graphs
'''
path = []
path_cost = 0
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
while not queue.empty():
item = queue.get()
current_node = item[1]
if current_node == start:
current_cost = 0.0
else:
current_cost = branch[current_node][0]
if current_node == goal:
print('Found a path.')
found = True
break
else:
for next_node in graph[current_node]:
cost = graph.edges[current_node, next_node]['weight']
branch_cost = current_cost + cost
queue_cost = branch_cost + h(next_node, goal)
if next_node not in visited:
visited.add(next_node)
branch[next_node] = (branch_cost, current_node)
queue.put((queue_cost, next_node))
if found:
# retrace steps
n = goal
path_cost = branch[n][0]
path.append(goal)
while branch[n][1] != start:
path.append(branch[n][1])
n = branch[n][1]
path.append(branch[n][1])
else:
print('**********************')
print('Failed to find a path!')
print('**********************')
return path[::-1], path_cost
| 0.729905 | 0.574275 |
from cStringIO import StringIO
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.web import server, resource, util
from twisted.internet import defer, interfaces, error, task
from twisted.web import iweb, http, http_headers
from twisted.python import log
class DummyRequest:
"""
Represents a dummy or fake request.
@ivar _finishedDeferreds: C{None} or a C{list} of L{Deferreds} which will
be called back with C{None} when C{finish} is called or which will be
errbacked if C{processingFailed} is called.
@type headers: C{dict}
@ivar headers: A mapping of header name to header value for all request
headers.
@type outgoingHeaders: C{dict}
@ivar outgoingHeaders: A mapping of header name to header value for all
response headers.
@type responseCode: C{int}
@ivar responseCode: The response code which was passed to
C{setResponseCode}.
@type written: C{list} of C{str}
@ivar written: The bytes which have been written to the request.
"""
uri = 'http://dummy/'
method = 'GET'
client = None
def registerProducer(self, prod,s):
self.go = 1
while self.go:
prod.resumeProducing()
def unregisterProducer(self):
self.go = 0
def __init__(self, postpath, session=None):
self.sitepath = []
self.written = []
self.finished = 0
self.postpath = postpath
self.prepath = []
self.session = None
self.protoSession = session or server.Session(0, self)
self.args = {}
self.outgoingHeaders = {}
self.responseHeaders = http_headers.Headers()
self.responseCode = None
self.headers = {}
self._finishedDeferreds = []
def getHeader(self, name):
"""
Retrieve the value of a request header.
@type name: C{str}
@param name: The name of the request header for which to retrieve the
value. Header names are compared case-insensitively.
@rtype: C{str} or L{NoneType}
@return: The value of the specified request header.
"""
return self.headers.get(name.lower(), None)
def setHeader(self, name, value):
"""TODO: make this assert on write() if the header is content-length
"""
self.outgoingHeaders[name.lower()] = value
def getSession(self):
if self.session:
return self.session
assert not self.written, "Session cannot be requested after data has been written."
self.session = self.protoSession
return self.session
def write(self, data):
self.written.append(data)
def notifyFinish(self):
"""
Return a L{Deferred} which is called back with C{None} when the request
is finished. This will probably only work if you haven't called
C{finish} yet.
"""
finished = Deferred()
self._finishedDeferreds.append(finished)
return finished
def finish(self):
"""
Record that the request is finished and callback and L{Deferred}s
waiting for notification of this.
"""
self.finished = self.finished + 1
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.callback(None)
def processingFailed(self, reason):
"""
Errback and L{Deferreds} waiting for finish notification.
"""
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.errback(reason)
def addArg(self, name, value):
self.args[name] = [value]
def setResponseCode(self, code, message=None):
"""
Set the HTTP status response code, but takes care that this is called
before any data is written.
"""
assert not self.written, "Response code cannot be set after data has been written: %s." % "@@@@".join(self.written)
self.responseCode = code
self.responseMessage = message
def setLastModified(self, when):
assert not self.written, "Last-Modified cannot be set after data has been written: %s." % "@@@@".join(self.written)
def setETag(self, tag):
assert not self.written, "ETag cannot be set after data has been written: %s." % "@@@@".join(self.written)
def getClientIP(self):
"""
Return the IPv4 address of the client which made this request, if there
is one, otherwise C{None}.
"""
if isinstance(self.client, IPv4Address):
return self.client.host
return None
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.failUnlessEqual([], r.listEntities())
class SimpleResource(resource.Resource):
def render(self, request):
if http.CACHED in (request.setLastModified(10),
request.setETag('MatchingTag')):
return ''
else:
return "correct"
class DummyChannel:
class TCP:
port = 80
def __init__(self):
self.written = StringIO()
self.producers = []
def getPeer(self):
return IPv4Address("TCP", '192.168.1.1', 12344)
def write(self, bytes):
assert isinstance(bytes, str)
self.written.write(bytes)
def writeSequence(self, iovec):
map(self.write, iovec)
def getHost(self):
return IPv4Address("TCP", '10.0.0.1', self.port)
def registerProducer(self, producer, streaming):
self.producers.append((producer, streaming))
class SSL(TCP):
implements(interfaces.ISSLTransport)
site = server.Site(resource.Resource())
def __init__(self):
self.transport = self.TCP()
def requestDone(self, request):
pass
class SiteTest(unittest.TestCase):
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild("",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([''])),
sres2, "Got the wrong resource.")
class SessionTest(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = task.Clock()
self.uid = 'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), '123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
def test_startCheckingExpirationParameterDeprecated(self):
"""
L{server.Session.startCheckingExpiration} emits a deprecation warning
if it is invoked with a parameter.
"""
self.session.startCheckingExpiration(123)
warnings = self.flushWarnings([
self.test_startCheckingExpirationParameterDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"The lifetime parameter to startCheckingExpiration is deprecated "
"since Twisted 9.0. See Session.sessionTimeout instead.")
def test_checkExpiredDeprecated(self):
"""
L{server.Session.checkExpired} is deprecated.
"""
self.session.checkExpired()
warnings = self.flushWarnings([self.test_checkExpiredDeprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Session.checkExpired is deprecated since Twisted 9.0; sessions "
"check themselves now, you don't need to.")
self.assertEqual(len(warnings), 1)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split('\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split('\r\n\r\n', 1)[0]
for header in headers.split('\r\n'):
if header.lower().startswith(key):
return header.split(':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split('\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
# XXX: test web.distrib.
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild('', self.resrc)
self.site = server.Site(self.resrc)
self.site = server.Site(self.resrc)
self.site.logFile = log.logfile
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
for l in ["GET / HTTP/1.1",
"Accept: text/html"]:
self.channel.lineReceived(l)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since}
header, verify that a response with a 200 code and the resource as
the body is returned.
"""
self.channel.lineReceived("If-Modified-Since: " + modifiedSince)
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time after the last modification of the
request resource, a 304 response is returned along with an empty
response body.
"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(100))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""If-None-Match ETag cache validator (positive)"""
self.channel.lineReceived("If-None-Match: unmatchedTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_etagMatched(self):
"""If-None-Match ETag cache validator (negative)"""
self.channel.lineReceived("If-None-Match: MatchingTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag")
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
from twisted.web import google
class GoogleTestCase(unittest.TestCase):
def testCheckGoogle(self):
raise unittest.SkipTest("no violation of google ToS")
d = google.checkGoogle('site:www.twistedmatrix.com twisted')
d.addCallback(self.assertEquals, 'http://twistedmatrix.com/')
return d
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
request.setHost('example.com', 80)
self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('foo.com', 81, 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo%2Fbar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com/foo%2Fbar')
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild('foo', rr)
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(r)
for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(rr)
for url in ['/', '/bar', '/bar/baz', '/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return "hi hi"
def render_HEH(self, request):
return "ho ho"
class NewRenderTestCase(unittest.TestCase):
def _getReq(self):
d = DummyChannel()
d.site.resource.putChild('newrender', NewRenderResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived('GET', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi')
req = self._getReq()
req.requestReceived('HEH', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
req = self._getReq()
req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived('HEAD', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 200)
self.assertEquals(-1, req.transport.getvalue().find('hi hi'))
class SDResource(resource.Resource):
def __init__(self,default): self.default=default
def getChildWithDefault(self,name,request):
d=defer.succeed(self.default)
return util.DeferredResource(d).getChildWithDefault(name, request)
class SDTest(unittest.TestCase):
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
class DummyRequestForLogTest(DummyRequest):
uri = '/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = 'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.site = http.HTTPFactory()
self.site.logFile = StringIO()
self.request = DummyRequestForLogTest(self.site, False)
def testSimple(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def testMethodQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = 'G"T'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def testRequestQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri='/dummy"withquote'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def testProtoQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto='HT"P/1.0'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def testRefererQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['referer'] = 'http://malicious" ".website.invalid'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n')
def testUserAgentQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['user-agent'] = 'Malicious Web" Evil'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
|
twisted/web/test/test_web.py
|
from cStringIO import StringIO
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.web import server, resource, util
from twisted.internet import defer, interfaces, error, task
from twisted.web import iweb, http, http_headers
from twisted.python import log
class DummyRequest:
"""
Represents a dummy or fake request.
@ivar _finishedDeferreds: C{None} or a C{list} of L{Deferreds} which will
be called back with C{None} when C{finish} is called or which will be
errbacked if C{processingFailed} is called.
@type headers: C{dict}
@ivar headers: A mapping of header name to header value for all request
headers.
@type outgoingHeaders: C{dict}
@ivar outgoingHeaders: A mapping of header name to header value for all
response headers.
@type responseCode: C{int}
@ivar responseCode: The response code which was passed to
C{setResponseCode}.
@type written: C{list} of C{str}
@ivar written: The bytes which have been written to the request.
"""
uri = 'http://dummy/'
method = 'GET'
client = None
def registerProducer(self, prod,s):
self.go = 1
while self.go:
prod.resumeProducing()
def unregisterProducer(self):
self.go = 0
def __init__(self, postpath, session=None):
self.sitepath = []
self.written = []
self.finished = 0
self.postpath = postpath
self.prepath = []
self.session = None
self.protoSession = session or server.Session(0, self)
self.args = {}
self.outgoingHeaders = {}
self.responseHeaders = http_headers.Headers()
self.responseCode = None
self.headers = {}
self._finishedDeferreds = []
def getHeader(self, name):
"""
Retrieve the value of a request header.
@type name: C{str}
@param name: The name of the request header for which to retrieve the
value. Header names are compared case-insensitively.
@rtype: C{str} or L{NoneType}
@return: The value of the specified request header.
"""
return self.headers.get(name.lower(), None)
def setHeader(self, name, value):
"""TODO: make this assert on write() if the header is content-length
"""
self.outgoingHeaders[name.lower()] = value
def getSession(self):
if self.session:
return self.session
assert not self.written, "Session cannot be requested after data has been written."
self.session = self.protoSession
return self.session
def write(self, data):
self.written.append(data)
def notifyFinish(self):
"""
Return a L{Deferred} which is called back with C{None} when the request
is finished. This will probably only work if you haven't called
C{finish} yet.
"""
finished = Deferred()
self._finishedDeferreds.append(finished)
return finished
def finish(self):
"""
Record that the request is finished and callback and L{Deferred}s
waiting for notification of this.
"""
self.finished = self.finished + 1
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.callback(None)
def processingFailed(self, reason):
"""
Errback and L{Deferreds} waiting for finish notification.
"""
if self._finishedDeferreds is not None:
observers = self._finishedDeferreds
self._finishedDeferreds = None
for obs in observers:
obs.errback(reason)
def addArg(self, name, value):
self.args[name] = [value]
def setResponseCode(self, code, message=None):
"""
Set the HTTP status response code, but takes care that this is called
before any data is written.
"""
assert not self.written, "Response code cannot be set after data has been written: %s." % "@@@@".join(self.written)
self.responseCode = code
self.responseMessage = message
def setLastModified(self, when):
assert not self.written, "Last-Modified cannot be set after data has been written: %s." % "@@@@".join(self.written)
def setETag(self, tag):
assert not self.written, "ETag cannot be set after data has been written: %s." % "@@@@".join(self.written)
def getClientIP(self):
"""
Return the IPv4 address of the client which made this request, if there
is one, otherwise C{None}.
"""
if isinstance(self.client, IPv4Address):
return self.client.host
return None
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.failUnlessEqual([], r.listEntities())
class SimpleResource(resource.Resource):
def render(self, request):
if http.CACHED in (request.setLastModified(10),
request.setETag('MatchingTag')):
return ''
else:
return "correct"
class DummyChannel:
class TCP:
port = 80
def __init__(self):
self.written = StringIO()
self.producers = []
def getPeer(self):
return IPv4Address("TCP", '192.168.1.1', 12344)
def write(self, bytes):
assert isinstance(bytes, str)
self.written.write(bytes)
def writeSequence(self, iovec):
map(self.write, iovec)
def getHost(self):
return IPv4Address("TCP", '10.0.0.1', self.port)
def registerProducer(self, producer, streaming):
self.producers.append((producer, streaming))
class SSL(TCP):
implements(interfaces.ISSLTransport)
site = server.Site(resource.Resource())
def __init__(self):
self.transport = self.TCP()
def requestDone(self, request):
pass
class SiteTest(unittest.TestCase):
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild("",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([''])),
sres2, "Got the wrong resource.")
class SessionTest(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = task.Clock()
self.uid = 'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), '123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
def test_startCheckingExpirationParameterDeprecated(self):
"""
L{server.Session.startCheckingExpiration} emits a deprecation warning
if it is invoked with a parameter.
"""
self.session.startCheckingExpiration(123)
warnings = self.flushWarnings([
self.test_startCheckingExpirationParameterDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"The lifetime parameter to startCheckingExpiration is deprecated "
"since Twisted 9.0. See Session.sessionTimeout instead.")
def test_checkExpiredDeprecated(self):
"""
L{server.Session.checkExpired} is deprecated.
"""
self.session.checkExpired()
warnings = self.flushWarnings([self.test_checkExpiredDeprecated])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Session.checkExpired is deprecated since Twisted 9.0; sessions "
"check themselves now, you don't need to.")
self.assertEqual(len(warnings), 1)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split('\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split('\r\n\r\n', 1)[0]
for header in headers.split('\r\n'):
if header.lower().startswith(key):
return header.split(':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split('\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
# XXX: test web.distrib.
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild('', self.resrc)
self.site = server.Site(self.resrc)
self.site = server.Site(self.resrc)
self.site.logFile = log.logfile
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
for l in ["GET / HTTP/1.1",
"Accept: text/html"]:
self.channel.lineReceived(l)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since}
header, verify that a response with a 200 code and the resource as
the body is returned.
"""
self.channel.lineReceived("If-Modified-Since: " + modifiedSince)
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time after the last modification of the
request resource, a 304 response is returned along with an empty
response body.
"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(100))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest("Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest("Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""If-None-Match ETag cache validator (positive)"""
self.channel.lineReceived("If-None-Match: unmatchedTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_etagMatched(self):
"""If-None-Match ETag cache validator (negative)"""
self.channel.lineReceived("If-None-Match: MatchingTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag")
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
from twisted.web import google
class GoogleTestCase(unittest.TestCase):
def testCheckGoogle(self):
raise unittest.SkipTest("no violation of google ToS")
d = google.checkGoogle('site:www.twistedmatrix.com twisted')
d.addCallback(self.assertEquals, 'http://twistedmatrix.com/')
return d
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
request.setHost('example.com', 80)
self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('foo.com', 81, 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo%2Fbar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com/foo%2Fbar')
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild('foo', rr)
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(r)
for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(rr)
for url in ['/', '/bar', '/bar/baz', '/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return "hi hi"
def render_HEH(self, request):
return "ho ho"
class NewRenderTestCase(unittest.TestCase):
def _getReq(self):
d = DummyChannel()
d.site.resource.putChild('newrender', NewRenderResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived('GET', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi')
req = self._getReq()
req.requestReceived('HEH', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
req = self._getReq()
req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived('HEAD', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 200)
self.assertEquals(-1, req.transport.getvalue().find('hi hi'))
class SDResource(resource.Resource):
def __init__(self,default): self.default=default
def getChildWithDefault(self,name,request):
d=defer.succeed(self.default)
return util.DeferredResource(d).getChildWithDefault(name, request)
class SDTest(unittest.TestCase):
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
class DummyRequestForLogTest(DummyRequest):
uri = '/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = 'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.site = http.HTTPFactory()
self.site.logFile = StringIO()
self.request = DummyRequestForLogTest(self.site, False)
def testSimple(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def testMethodQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = 'G"T'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def testRequestQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri='/dummy"withquote'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def testProtoQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto='HT"P/1.0'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def testRefererQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['referer'] = 'http://malicious" ".website.invalid'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n')
def testUserAgentQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['user-agent'] = 'Malicious Web" Evil'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
| 0.437583 | 0.188884 |
import logging
from threading import Thread, Lock
from impacket.structure import Structure
lock = Lock()
class RemComMessage(Structure):
structure = (
('Command','4096s=""'),
('WorkingDir','260s=""'),
('Priority','<L=0x20'),
('ProcessID','<L=0x01'),
('Machine','260s=""'),
('NoWait','<L=0'),
)
class RemComResponse(Structure):
structure = (
('ErrorCode','<L=0'),
('ReturnCode','<L=0'),
)
RemComSTDOUT = "RemCom_stdout"
RemComSTDIN = "RemCom_stdin"
RemComSTDERR = "RemCom_stderr"
class CommanderPipes(Thread):
def __init__(self, connection, pipe, permissions, share=None):
Thread.__init__(self)
self.server = connection
self.tid = 0
self.fid = 0
self.share = share
#self.port = transport.get_dport()
self.pipe = pipe
self.permissions = permissions
self.daemon = True
def connectPipe(self):
try:
lock.acquire()
self.tid = self.server.connectTree('IPC$')
self.server.waitNamedPipe(self.tid, self.pipe)
self.fid = self.server.openFile(self.tid,self.pipe,self.permissions,
creationOption = 0x40,
fileAttributes = 0x80)
self.server.setTimeout(1000000)
except:
import traceback
traceback.print_exc()
logging.error("Something wen't wrong connecting the pipes(%s), try again",
self.__class__)
class CommanderRemoteStdInPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions, share=None):
self.shell = None
CommanderPipes.__init__(self, connection, pipe, permisssions, share)
def run(self):
self.connectPipe()
#self.shell = RemoteShell(self.server,
# self.port,
# self.credentials,
# self.tid,
# self.fid,
# self.share,
# self.transport)
#self.shell.cmdloop()
class CommanderRemoteStdOutPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions):
CommanderPipes.__init__(self, connection, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
global LastDataSent
if ans != LastDataSent:
sys.stdout.write(ans.decode('cp437'))
sys.stdout.flush()
else:
LastDataSent = ''
if LastDataSent > 10:
LastDataSent = ''
except:
pass
class CommanderRemoteStdErrPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions):
CommanderPipes.__init__(self, connection, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
sys.stderr.write(str(ans))
sys.stderr.flush()
except:
pass
|
lib/psexec.py
|
import logging
from threading import Thread, Lock
from impacket.structure import Structure
lock = Lock()
class RemComMessage(Structure):
structure = (
('Command','4096s=""'),
('WorkingDir','260s=""'),
('Priority','<L=0x20'),
('ProcessID','<L=0x01'),
('Machine','260s=""'),
('NoWait','<L=0'),
)
class RemComResponse(Structure):
structure = (
('ErrorCode','<L=0'),
('ReturnCode','<L=0'),
)
RemComSTDOUT = "RemCom_stdout"
RemComSTDIN = "RemCom_stdin"
RemComSTDERR = "RemCom_stderr"
class CommanderPipes(Thread):
def __init__(self, connection, pipe, permissions, share=None):
Thread.__init__(self)
self.server = connection
self.tid = 0
self.fid = 0
self.share = share
#self.port = transport.get_dport()
self.pipe = pipe
self.permissions = permissions
self.daemon = True
def connectPipe(self):
try:
lock.acquire()
self.tid = self.server.connectTree('IPC$')
self.server.waitNamedPipe(self.tid, self.pipe)
self.fid = self.server.openFile(self.tid,self.pipe,self.permissions,
creationOption = 0x40,
fileAttributes = 0x80)
self.server.setTimeout(1000000)
except:
import traceback
traceback.print_exc()
logging.error("Something wen't wrong connecting the pipes(%s), try again",
self.__class__)
class CommanderRemoteStdInPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions, share=None):
self.shell = None
CommanderPipes.__init__(self, connection, pipe, permisssions, share)
def run(self):
self.connectPipe()
#self.shell = RemoteShell(self.server,
# self.port,
# self.credentials,
# self.tid,
# self.fid,
# self.share,
# self.transport)
#self.shell.cmdloop()
class CommanderRemoteStdOutPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions):
CommanderPipes.__init__(self, connection, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
global LastDataSent
if ans != LastDataSent:
sys.stdout.write(ans.decode('cp437'))
sys.stdout.flush()
else:
LastDataSent = ''
if LastDataSent > 10:
LastDataSent = ''
except:
pass
class CommanderRemoteStdErrPipe(CommanderPipes):
def __init__(self, connection, pipe, permisssions):
CommanderPipes.__init__(self, connection, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
sys.stderr.write(str(ans))
sys.stderr.flush()
except:
pass
| 0.361616 | 0.039862 |
import torch
import math
import numpy as np
class LabelGuessor(object):
def __init__(self, thresh):
self.thresh = thresh
def __call__(self, model, ims, balance, delT):
org_state = {
k: v.clone().detach()
for k, v in model.state_dict().items()
}
is_train = model.training
with torch.no_grad():
model.train()
all_probs = []
logits = model(ims)
probs = torch.softmax(logits, dim=1)
scores, lbs = torch.max(probs, dim=1)
# print("lbs ", lbs)
# print("scores ", scores)
mask = torch.ones_like(lbs,dtype=torch.float)
labels, counts = np.unique(lbs.cpu(),return_counts=True)
# print("labels", labels)
# print("counts ", counts)
# count_unlabels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# for i in range(len(labels)):
# count_unlabels[labels[i]] = counts[i]
mxCount = max(counts)
stdClass = np.std(counts)
# print("stdClass ", stdClass)
if balance > 0:
if balance == 1 or balance == 4:
idx = (mask == 0.0)
else:
idx = (scores > self.thresh)
delT = 0
if mxCount > 0:
ratios = [x/mxCount for x in counts]
for i in range(len(labels)):
tmp = (scores*(lbs==labels[i]).float()).ge(self.thresh - delT*(1-ratios[i])) # Which elements
idx = idx | tmp
if balance > 2:
labels, counts = np.unique(lbs[idx].cpu(),return_counts=True)
ratio = torch.zeros_like(mask,dtype=torch.float)
for i in range(len(labels)):
ratio += ((1/counts[i])*(lbs==labels[i]).float()) # Magnitude of mask elements
Z = torch.sum(mask[idx])
# print("ratio ",ratio)
mask = ratio[idx]
if Z > 0:
mask = Z * mask / torch.sum(mask)
else:
idx = (scores > self.thresh)
mask[idx] = 1.0
else:
idx = scores > self.thresh
mask = mask[idx]
lbs = lbs[idx]
# print("1. lbs ", lbs)
# print("2. mask ", mask)
model.load_state_dict(org_state)
if is_train:
model.train()
else:
model.eval()
return lbs.detach(), idx, mask, stdClass
|
PT-BOSS/label_guessor.py
|
import torch
import math
import numpy as np
class LabelGuessor(object):
def __init__(self, thresh):
self.thresh = thresh
def __call__(self, model, ims, balance, delT):
org_state = {
k: v.clone().detach()
for k, v in model.state_dict().items()
}
is_train = model.training
with torch.no_grad():
model.train()
all_probs = []
logits = model(ims)
probs = torch.softmax(logits, dim=1)
scores, lbs = torch.max(probs, dim=1)
# print("lbs ", lbs)
# print("scores ", scores)
mask = torch.ones_like(lbs,dtype=torch.float)
labels, counts = np.unique(lbs.cpu(),return_counts=True)
# print("labels", labels)
# print("counts ", counts)
# count_unlabels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# for i in range(len(labels)):
# count_unlabels[labels[i]] = counts[i]
mxCount = max(counts)
stdClass = np.std(counts)
# print("stdClass ", stdClass)
if balance > 0:
if balance == 1 or balance == 4:
idx = (mask == 0.0)
else:
idx = (scores > self.thresh)
delT = 0
if mxCount > 0:
ratios = [x/mxCount for x in counts]
for i in range(len(labels)):
tmp = (scores*(lbs==labels[i]).float()).ge(self.thresh - delT*(1-ratios[i])) # Which elements
idx = idx | tmp
if balance > 2:
labels, counts = np.unique(lbs[idx].cpu(),return_counts=True)
ratio = torch.zeros_like(mask,dtype=torch.float)
for i in range(len(labels)):
ratio += ((1/counts[i])*(lbs==labels[i]).float()) # Magnitude of mask elements
Z = torch.sum(mask[idx])
# print("ratio ",ratio)
mask = ratio[idx]
if Z > 0:
mask = Z * mask / torch.sum(mask)
else:
idx = (scores > self.thresh)
mask[idx] = 1.0
else:
idx = scores > self.thresh
mask = mask[idx]
lbs = lbs[idx]
# print("1. lbs ", lbs)
# print("2. mask ", mask)
model.load_state_dict(org_state)
if is_train:
model.train()
else:
model.eval()
return lbs.detach(), idx, mask, stdClass
| 0.247714 | 0.384768 |
import matplotlib.pyplot
import seaborn
import pandas
import numpy
class TfidfPlotter:
color = seaborn.color_palette()
def plot(
self,
dataset,
tfidf_mat,
tfidf_features,
labels,
top_n=10,
):
print('calculating top tfidf scores per label')
matplotlib.pyplot.figure(1)
matplotlib.pyplot.suptitle(
'TF-IDF top words per label',
fontsize=20,
)
for label_index, label in enumerate(labels):
print('\tlabel - {label}'.format(
label=label,
))
comments_label_index = dataset.index[dataset[label] == 1]
top_tfidf_features = self.get_top_tfidf_features(
tfidf_mat=tfidf_mat[comments_label_index, :],
tfidf_features=tfidf_features,
top_n=top_n,
)
matplotlib.pyplot.subplot(
3,
2,
label_index + 1,
)
seaborn.barplot(
top_tfidf_features.feature.iloc[0:top_n],
top_tfidf_features.tfidf_score.iloc[0:top_n],
color=self.color[label_index],
alpha=0.8,
)
matplotlib.pyplot.title(
'label : {label}'.format(
label=label,
),
fontsize=15,
)
matplotlib.pyplot.xlabel('', fontsize=12)
matplotlib.pyplot.show()
def get_top_tfidf_features(
self,
tfidf_mat,
tfidf_features,
top_n,
):
tfidf_mean_values = numpy.mean(
tfidf_mat.toarray(),
axis=0,
)
top_n_ids = numpy.argsort(tfidf_mean_values)[::-1][:top_n]
top_n_features = [
(
tfidf_features[index],
tfidf_mean_values[index],
)
for index in top_n_ids
]
top_tfidf_features = pandas.DataFrame(top_n_features)
top_tfidf_features.columns = [
'feature',
'tfidf_score',
]
return top_tfidf_features
|
plotting_utils/plot_tfidf.py
|
import matplotlib.pyplot
import seaborn
import pandas
import numpy
class TfidfPlotter:
color = seaborn.color_palette()
def plot(
self,
dataset,
tfidf_mat,
tfidf_features,
labels,
top_n=10,
):
print('calculating top tfidf scores per label')
matplotlib.pyplot.figure(1)
matplotlib.pyplot.suptitle(
'TF-IDF top words per label',
fontsize=20,
)
for label_index, label in enumerate(labels):
print('\tlabel - {label}'.format(
label=label,
))
comments_label_index = dataset.index[dataset[label] == 1]
top_tfidf_features = self.get_top_tfidf_features(
tfidf_mat=tfidf_mat[comments_label_index, :],
tfidf_features=tfidf_features,
top_n=top_n,
)
matplotlib.pyplot.subplot(
3,
2,
label_index + 1,
)
seaborn.barplot(
top_tfidf_features.feature.iloc[0:top_n],
top_tfidf_features.tfidf_score.iloc[0:top_n],
color=self.color[label_index],
alpha=0.8,
)
matplotlib.pyplot.title(
'label : {label}'.format(
label=label,
),
fontsize=15,
)
matplotlib.pyplot.xlabel('', fontsize=12)
matplotlib.pyplot.show()
def get_top_tfidf_features(
self,
tfidf_mat,
tfidf_features,
top_n,
):
tfidf_mean_values = numpy.mean(
tfidf_mat.toarray(),
axis=0,
)
top_n_ids = numpy.argsort(tfidf_mean_values)[::-1][:top_n]
top_n_features = [
(
tfidf_features[index],
tfidf_mean_values[index],
)
for index in top_n_ids
]
top_tfidf_features = pandas.DataFrame(top_n_features)
top_tfidf_features.columns = [
'feature',
'tfidf_score',
]
return top_tfidf_features
| 0.385375 | 0.333273 |
import json
from Utils import RAR
from Utils import Download
from rarfile import RarFile
from Utils import Json
import os
class ModelMan :
def __init__(self ,googleDriveId:str , name:str , fileName:str):
self.name = name
self.fileName = fileName
self.googleDriveId = googleDriveId
def DownloadRAR(self):
flag:bool = True
try :
flag = not RAR.Check(f'./Imports/{self.fileName}.rar')
except:
pass
while flag:
print(f'Download Model for {self.fileName}')
Download.DownloadFromDrive(self.googleDriveId , f'./Imports/{self.fileName}.rar')
flag = flag and not RAR.Check(f'./Imports/{self.fileName}.rar')
def Extract(self):
rarF = RarFile(f'Imports/{self.fileName}.rar')
if len(rarF.namelist()) !=2 :
warn = f'Error in rar file of model {self.name}'
os.remove(f'Imports/{self.fileName}.rar')
raise ValueError(warn)
else :
cnt =0
for inner in rarF.namelist():
ext = os.path.splitext(inner)[1]
if ext.lower() == '.json' :
cnt +=1
fileInBytes = rarF.read(inner)
dic = Json.ConvertFromBinToJson(fileInBytes , True)
self.shape = len(dic)
with open(f"./Labels/{self.fileName}.json", "w") as f:
json.dump(dic , f)
elif ext.lower() == '.h5' :
cnt+=1
fileInBytes = rarF.read(inner)
with open(f'./Models/{self.fileName}.h5' ,"ab") as f :
f.write(fileInBytes)
if cnt !=2 :
warn = f'Error in rar file (there are files not in same extension) on model {self.name}'
os.remove(f'Imports/{self.fileName}.rar')
raise ValueError(warn)
def CreateRar(self):
self.DownloadRAR()
self.Extract()
|
Src/ModelM.py
|
import json
from Utils import RAR
from Utils import Download
from rarfile import RarFile
from Utils import Json
import os
class ModelMan :
def __init__(self ,googleDriveId:str , name:str , fileName:str):
self.name = name
self.fileName = fileName
self.googleDriveId = googleDriveId
def DownloadRAR(self):
flag:bool = True
try :
flag = not RAR.Check(f'./Imports/{self.fileName}.rar')
except:
pass
while flag:
print(f'Download Model for {self.fileName}')
Download.DownloadFromDrive(self.googleDriveId , f'./Imports/{self.fileName}.rar')
flag = flag and not RAR.Check(f'./Imports/{self.fileName}.rar')
def Extract(self):
rarF = RarFile(f'Imports/{self.fileName}.rar')
if len(rarF.namelist()) !=2 :
warn = f'Error in rar file of model {self.name}'
os.remove(f'Imports/{self.fileName}.rar')
raise ValueError(warn)
else :
cnt =0
for inner in rarF.namelist():
ext = os.path.splitext(inner)[1]
if ext.lower() == '.json' :
cnt +=1
fileInBytes = rarF.read(inner)
dic = Json.ConvertFromBinToJson(fileInBytes , True)
self.shape = len(dic)
with open(f"./Labels/{self.fileName}.json", "w") as f:
json.dump(dic , f)
elif ext.lower() == '.h5' :
cnt+=1
fileInBytes = rarF.read(inner)
with open(f'./Models/{self.fileName}.h5' ,"ab") as f :
f.write(fileInBytes)
if cnt !=2 :
warn = f'Error in rar file (there are files not in same extension) on model {self.name}'
os.remove(f'Imports/{self.fileName}.rar')
raise ValueError(warn)
def CreateRar(self):
self.DownloadRAR()
self.Extract()
| 0.103942 | 0.078395 |
import os
import sys
import time
import text_utils as tu
from rfid_mifare_cloner import (
is_tag_reader_connected,
create_dump_tag,
write_new_tag,
check_dependencies_installled,
)
from exceptions import TagNotFoundException, NotClassifMifareTagException
def welcome_screen():
""" Only shown when the user starts the CLI"""
ascii_art = """
____ ______________ __ ____ ____ ________
/ __ \/ ____/ _/ __ \ / |/ (_) __/___ _________ / ____/ /___ ____ ___ _____
/ /_/ / /_ / // / / / / /|_/ / / /_/ __ `/ ___/ _ \ / / / / __ \/ __ \/ _ \/ ___/
/ _, _/ __/ _/ // /_/ / / / / / / __/ /_/ / / / __/ / /___/ / /_/ / / / / __/ /
/_/ |_/_/ /___/_____/ /_/ /_/_/_/ \__,_/_/ \___/ \____/_/\____/_/ /_/\___/_/
"""
print(tu.write_text_color(ascii_art, tu.bright_color(tu.YELLOW)))
print(
tu.write_text_color("#Sorasful " + "\n" * 2, tu.bright_color(tu.BLUE))
)
def first_screen():
""" Screen where the user should plug his reader or has already. """
if not is_tag_reader_connected():
sys.stdout.write(
tu.write_text_color(
"Please connect your tag reader", tu.bright_color(tu.RED)
)
)
sys.stdout.flush()
while not is_tag_reader_connected():
time.sleep(0.4)
sys.stdout.write(u"\u001b[1000D") # move cursor to left to erase previous message
sys.stdout.write(
tu.write_text_color(
"Tag reader detected and connected ! ", tu.bright_color(tu.GREEN)
)
)
sys.stdout.write("\n" * 5)
sys.stdout.flush()
time.sleep(0.5)
def dump_card_screen(tag_to_copy=True):
""" The screen where the user will put his card he wants to duplicate on the reader. """
initial_text = (
"Put the tag you want to duplicate on the reader. "
if tag_to_copy
else "Put the destination tag to clone to. "
)
sys.stdout.write(tu.write_text_color(initial_text, tu.bright_color(tu.RED)))
sys.stdout.flush()
valid_tag = False
while not valid_tag:
try:
create_dump_tag(
"to-copy" if tag_to_copy else "destination"
) # should success or raise something.
valid_tag = True
except TagNotFoundException:
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(
" " * 80
) # Clear the previous line which is longer that this one.
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(tu.write_text_color("No tag found :" + initial_text, tu.bright_color(tu.RED)))
sys.stdout.flush()
time.sleep(0.2)
except NotClassifMifareTagException:
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(
tu.write_text_color(
"This tag is not a classic mifare tag, it won't work... Please try another",
tu.bright_color(tu.RED),
)
)
sys.stdout.flush()
time.sleep(0.2)
except KeyboardInterrupt:
sys.exit()
except:
pass # Occasionnal errors when trying to access to the device too many times ...
sys.stdout.write(u"\u001b[1000D") # move cursor to left to erase previous message
sys.stdout.write(
tu.write_text_color("Card successfully copied ! ", tu.bright_color(tu.GREEN))
)
if tag_to_copy:
sys.stdout.write(
tu.write_text_color(
"Please, remove the tag from the reader within 10 seconds",
tu.bright_color(tu.CYAN),
)
)
else:
sys.stdout.write(
tu.write_text_color(
"Don't remove the tag, we are processing to the copy of the tag. This shouldn't be long ",
tu.bright_color(tu.CYAN),
)
)
sys.stdout.write("\n" * 2)
sys.stdout.flush()
if tag_to_copy:
time.sleep(10)
def write_tag_screen():
""" The screen when the card is being duplicated to the destination file """
if write_new_tag(tag_to_copy="to-copy", destination="destination"):
sys.stdout.write(
tu.write_text_color(
"Card succesfully duplicated ! You can now test it !\n Enjoy !",
tu.bright_color(tu.GREEN),
)
)
else:
sys.stdout.write(
tu.write_text_color(
"An error has occured, please retry.", tu.bright_color(tu.RED)
)
)
sys.stdout.flush()
def command_line():
check_dependencies_installled()
welcome_screen()
first_screen()
dump_card_screen(tag_to_copy=True) # We first copy data we want to duplicate
dump_card_screen(tag_to_copy=False) # then we copy on the destination tag
write_tag_screen()
# clean files
os.remove("to-copy.dmp")
os.remove("destination.dmp")
if __name__ == "__main__":
command_line()
|
RFID_mifare_cloner/cli.py
|
import os
import sys
import time
import text_utils as tu
from rfid_mifare_cloner import (
is_tag_reader_connected,
create_dump_tag,
write_new_tag,
check_dependencies_installled,
)
from exceptions import TagNotFoundException, NotClassifMifareTagException
def welcome_screen():
""" Only shown when the user starts the CLI"""
ascii_art = """
____ ______________ __ ____ ____ ________
/ __ \/ ____/ _/ __ \ / |/ (_) __/___ _________ / ____/ /___ ____ ___ _____
/ /_/ / /_ / // / / / / /|_/ / / /_/ __ `/ ___/ _ \ / / / / __ \/ __ \/ _ \/ ___/
/ _, _/ __/ _/ // /_/ / / / / / / __/ /_/ / / / __/ / /___/ / /_/ / / / / __/ /
/_/ |_/_/ /___/_____/ /_/ /_/_/_/ \__,_/_/ \___/ \____/_/\____/_/ /_/\___/_/
"""
print(tu.write_text_color(ascii_art, tu.bright_color(tu.YELLOW)))
print(
tu.write_text_color("#Sorasful " + "\n" * 2, tu.bright_color(tu.BLUE))
)
def first_screen():
""" Screen where the user should plug his reader or has already. """
if not is_tag_reader_connected():
sys.stdout.write(
tu.write_text_color(
"Please connect your tag reader", tu.bright_color(tu.RED)
)
)
sys.stdout.flush()
while not is_tag_reader_connected():
time.sleep(0.4)
sys.stdout.write(u"\u001b[1000D") # move cursor to left to erase previous message
sys.stdout.write(
tu.write_text_color(
"Tag reader detected and connected ! ", tu.bright_color(tu.GREEN)
)
)
sys.stdout.write("\n" * 5)
sys.stdout.flush()
time.sleep(0.5)
def dump_card_screen(tag_to_copy=True):
""" The screen where the user will put his card he wants to duplicate on the reader. """
initial_text = (
"Put the tag you want to duplicate on the reader. "
if tag_to_copy
else "Put the destination tag to clone to. "
)
sys.stdout.write(tu.write_text_color(initial_text, tu.bright_color(tu.RED)))
sys.stdout.flush()
valid_tag = False
while not valid_tag:
try:
create_dump_tag(
"to-copy" if tag_to_copy else "destination"
) # should success or raise something.
valid_tag = True
except TagNotFoundException:
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(
" " * 80
) # Clear the previous line which is longer that this one.
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(tu.write_text_color("No tag found :" + initial_text, tu.bright_color(tu.RED)))
sys.stdout.flush()
time.sleep(0.2)
except NotClassifMifareTagException:
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(
tu.write_text_color(
"This tag is not a classic mifare tag, it won't work... Please try another",
tu.bright_color(tu.RED),
)
)
sys.stdout.flush()
time.sleep(0.2)
except KeyboardInterrupt:
sys.exit()
except:
pass # Occasionnal errors when trying to access to the device too many times ...
sys.stdout.write(u"\u001b[1000D") # move cursor to left to erase previous message
sys.stdout.write(
tu.write_text_color("Card successfully copied ! ", tu.bright_color(tu.GREEN))
)
if tag_to_copy:
sys.stdout.write(
tu.write_text_color(
"Please, remove the tag from the reader within 10 seconds",
tu.bright_color(tu.CYAN),
)
)
else:
sys.stdout.write(
tu.write_text_color(
"Don't remove the tag, we are processing to the copy of the tag. This shouldn't be long ",
tu.bright_color(tu.CYAN),
)
)
sys.stdout.write("\n" * 2)
sys.stdout.flush()
if tag_to_copy:
time.sleep(10)
def write_tag_screen():
""" The screen when the card is being duplicated to the destination file """
if write_new_tag(tag_to_copy="to-copy", destination="destination"):
sys.stdout.write(
tu.write_text_color(
"Card succesfully duplicated ! You can now test it !\n Enjoy !",
tu.bright_color(tu.GREEN),
)
)
else:
sys.stdout.write(
tu.write_text_color(
"An error has occured, please retry.", tu.bright_color(tu.RED)
)
)
sys.stdout.flush()
def command_line():
check_dependencies_installled()
welcome_screen()
first_screen()
dump_card_screen(tag_to_copy=True) # We first copy data we want to duplicate
dump_card_screen(tag_to_copy=False) # then we copy on the destination tag
write_tag_screen()
# clean files
os.remove("to-copy.dmp")
os.remove("destination.dmp")
if __name__ == "__main__":
command_line()
| 0.268174 | 0.074703 |
from __future__ import unicode_literals
import core.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('proposals', '0020_auto_20160202_1048'),
]
operations = [
migrations.AlterField(
model_name='talkproposal',
name='abstract',
field=core.models.EAWTextField(help_text='The overview of what the talk is about. If the talk assume some domain knowledge please state it here. If your talk is accepted, this will be displayed on both the website and the handbook. Should be one paragraph.', max_length=500, verbose_name='abstract'),
),
migrations.AlterField(
model_name='talkproposal',
name='objective',
field=core.models.EAWTextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='abstract',
field=core.models.EAWTextField(help_text='The overview of what the talk is about. If the talk assume some domain knowledge please state it here. If your talk is accepted, this will be displayed on both the website and the handbook. Should be one paragraph.', max_length=500, verbose_name='abstract'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='objective',
field=core.models.EAWTextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
]
|
src/proposals/migrations/0021_auto_20160222_0709.py
|
from __future__ import unicode_literals
import core.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('proposals', '0020_auto_20160202_1048'),
]
operations = [
migrations.AlterField(
model_name='talkproposal',
name='abstract',
field=core.models.EAWTextField(help_text='The overview of what the talk is about. If the talk assume some domain knowledge please state it here. If your talk is accepted, this will be displayed on both the website and the handbook. Should be one paragraph.', max_length=500, verbose_name='abstract'),
),
migrations.AlterField(
model_name='talkproposal',
name='objective',
field=core.models.EAWTextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='abstract',
field=core.models.EAWTextField(help_text='The overview of what the talk is about. If the talk assume some domain knowledge please state it here. If your talk is accepted, this will be displayed on both the website and the handbook. Should be one paragraph.', max_length=500, verbose_name='abstract'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='objective',
field=core.models.EAWTextField(help_text='Who is the intended audience for your talk? (Be specific, "Python users" is not a good answer). And what will the attendees get out of your talk? When they leave the room, what will they learn that they didn\'t know before? This is NOT made public and for REVIEW ONLY.', max_length=500, verbose_name='objective'),
),
]
| 0.58522 | 0.17989 |
from discord import Guild
from discord.errors import NotFound
from contextlib import suppress
from src.utils import channel_to_dict, member_to_dict, role_to_dict, user_to_dict, guild_to_dict
class Pools:
def __init__(self, bot):
self.nonexistant = []
self.bot = bot
async def get_all_emoji(self, guild: Guild):
try:
emoji_manager = self.bot.get_cog("Emoji Manager").managers[guild.id]
except KeyError:
return []
return [await e.as_dict_url() for e in emoji_manager.emojis]
async def get_emoji(self, guild: Guild, emoji_id, fetch=False):
if fetch:
return {}
try:
emoji_manager = self.bot.get_cog("Emoji Manager").managers[guild.id]
except KeyError:
raise
emoji = emoji_manager.find_emoji(a_id=emoji_id)
return await emoji.as_dict_url()
def get_all_channels(self, guild: Guild):
return [channel_to_dict(ch) for ch in guild.channels]
def get_all_roles(self, guild: Guild):
return [role_to_dict(r) for r in guild.roles]
def get_all_members(self, guild: Guild):
return [member_to_dict(m) for m in guild.members]
async def get_member(self, guild: Guild, member_id, fetch=False):
if member_id in self.nonexistant:
raise NotFound(f"unknown member {member_id}")
member = guild.get_member(int(member_id))
if member is None and fetch:
try:
member = await guild.fetch_member(int(member_id))
except NotFound:
self.nonexistant.append(member_id)
if member is None:
raise NotFound(f"unknown member {member_id}")
return member_to_dict(member)
async def get_user(self, user_id, fetch=False):
if user_id in self.nonexistant:
raise NotFound(f"unknown user {user_id}")
user = self.bot.get_user(int(user_id))
if user is None and fetch:
try:
user = await self.bot.fetch_user(int(user_id))
except NotFound:
self.nonexistant.append(user_id)
if user is None:
raise NotFound(f"unknown user {user_id}")
return user_to_dict(user)
def get_all_responses(self, guild: Guild):
try:
auto_responses = self.bot.get_cog("Auto Responses").responses[guild.id]
except KeyError:
return []
return [r.as_dict() for r in auto_responses.auto_responses]
async def get_guild(self, member_id: int, guild_id: int, fetch=False):
guild = self.bot.get_guild(int(guild_id))
if guild is None and fetch:
with suppress(NotFound):
guild = await self.bot.fetch_guild(int(guild_id))
if guild is None:
raise NotFound(f"unknown guild {guild_id}, attempted fetch: {fetch}")
member = guild.get_member(int(member_id))
settings = self.bot.settings[guild]
g = guild_to_dict(guild)
g.update({
'owner': int(g['owner_id']) == int(member_id),
'has_architus': True,
'architus_admin': int(member_id) in settings.admins_ids,
'permissions': member.guild_permissions.value,
})
return g
|
shard/src/api/pools.py
|
from discord import Guild
from discord.errors import NotFound
from contextlib import suppress
from src.utils import channel_to_dict, member_to_dict, role_to_dict, user_to_dict, guild_to_dict
class Pools:
def __init__(self, bot):
self.nonexistant = []
self.bot = bot
async def get_all_emoji(self, guild: Guild):
try:
emoji_manager = self.bot.get_cog("Emoji Manager").managers[guild.id]
except KeyError:
return []
return [await e.as_dict_url() for e in emoji_manager.emojis]
async def get_emoji(self, guild: Guild, emoji_id, fetch=False):
if fetch:
return {}
try:
emoji_manager = self.bot.get_cog("Emoji Manager").managers[guild.id]
except KeyError:
raise
emoji = emoji_manager.find_emoji(a_id=emoji_id)
return await emoji.as_dict_url()
def get_all_channels(self, guild: Guild):
return [channel_to_dict(ch) for ch in guild.channels]
def get_all_roles(self, guild: Guild):
return [role_to_dict(r) for r in guild.roles]
def get_all_members(self, guild: Guild):
return [member_to_dict(m) for m in guild.members]
async def get_member(self, guild: Guild, member_id, fetch=False):
if member_id in self.nonexistant:
raise NotFound(f"unknown member {member_id}")
member = guild.get_member(int(member_id))
if member is None and fetch:
try:
member = await guild.fetch_member(int(member_id))
except NotFound:
self.nonexistant.append(member_id)
if member is None:
raise NotFound(f"unknown member {member_id}")
return member_to_dict(member)
async def get_user(self, user_id, fetch=False):
if user_id in self.nonexistant:
raise NotFound(f"unknown user {user_id}")
user = self.bot.get_user(int(user_id))
if user is None and fetch:
try:
user = await self.bot.fetch_user(int(user_id))
except NotFound:
self.nonexistant.append(user_id)
if user is None:
raise NotFound(f"unknown user {user_id}")
return user_to_dict(user)
def get_all_responses(self, guild: Guild):
try:
auto_responses = self.bot.get_cog("Auto Responses").responses[guild.id]
except KeyError:
return []
return [r.as_dict() for r in auto_responses.auto_responses]
async def get_guild(self, member_id: int, guild_id: int, fetch=False):
guild = self.bot.get_guild(int(guild_id))
if guild is None and fetch:
with suppress(NotFound):
guild = await self.bot.fetch_guild(int(guild_id))
if guild is None:
raise NotFound(f"unknown guild {guild_id}, attempted fetch: {fetch}")
member = guild.get_member(int(member_id))
settings = self.bot.settings[guild]
g = guild_to_dict(guild)
g.update({
'owner': int(g['owner_id']) == int(member_id),
'has_architus': True,
'architus_admin': int(member_id) in settings.admins_ids,
'permissions': member.guild_permissions.value,
})
return g
| 0.494629 | 0.097262 |
import sys
import pandas as pd
from sqlalchemy import create_engine
def transform_categories(categories_df):
"""
Makes transformations in the categories data.
Arguments:
categories_df: the categories dataframe
Output:
categories_df_trans: the transformed categories dataframe
"""
categories_df_trans = categories_df.copy()
#Store rows ids
ids = categories_df_trans['id']
categories_df_trans = categories_df_trans['categories'].str.split(";",expand=True)
row = categories_df_trans.iloc[0,:]
category_colnames = row.apply(lambda x: x.split('-')[0]).values
categories_df_trans.columns = category_colnames
for column in categories_df_trans:
# set each value to be the last character of the string
categories_df_trans[column] = categories_df_trans[column].apply(lambda x: x.split('-')[1])
# convert column from string to numeric
categories_df_trans[column] = categories_df_trans[column].apply(lambda x: int(x))
#Re-assign ids
categories_df_trans['id'] = ids
return categories_df_trans
def transform_data(categories_df, messages_df):
"""
Performs all the necessary data transformations.
Arguments:
categories_df: categories dataframe
messages_df: messages dataframe
Output:
df: the transformed dataframe
"""
categories_df_trans = transform_categories(categories_df)
df = pd.merge(messages_df, categories_df_trans, how='inner', on='id')
df = df[~df.duplicated()]
return df
def load_data(messages_filepath, categories_filepath):
"""
Reads the raw data saved as csv files and convert it
into dataframes.
Arguments:
messages_filepath: path to the messages.csv file
categories_filepath: path to the categories.csv file
Output:
messages_df: messages dataframe
categories_df: categories dataframe
"""
messages_df = pd.read_csv(messages_filepath)
categories_df = pd.read_csv(categories_filepath)
return messages_df, categories_df
def save_data(df, database_filename):
"""
Saves the clean modeling data into a SQLite database.
Arguments:
df: the transformed dataframe
database_filename: the resulting database name
Output:
None
"""
engine = create_engine(f"sqlite:///{database_filename}")
df.to_sql(name=database_filename.split('/')[1].split('.')[0],
con=engine,
if_exists = 'replace',
index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
messages_df, categories_df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = transform_data(categories_df, messages_df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
data/process_data.py
|
import sys
import pandas as pd
from sqlalchemy import create_engine
def transform_categories(categories_df):
"""
Makes transformations in the categories data.
Arguments:
categories_df: the categories dataframe
Output:
categories_df_trans: the transformed categories dataframe
"""
categories_df_trans = categories_df.copy()
#Store rows ids
ids = categories_df_trans['id']
categories_df_trans = categories_df_trans['categories'].str.split(";",expand=True)
row = categories_df_trans.iloc[0,:]
category_colnames = row.apply(lambda x: x.split('-')[0]).values
categories_df_trans.columns = category_colnames
for column in categories_df_trans:
# set each value to be the last character of the string
categories_df_trans[column] = categories_df_trans[column].apply(lambda x: x.split('-')[1])
# convert column from string to numeric
categories_df_trans[column] = categories_df_trans[column].apply(lambda x: int(x))
#Re-assign ids
categories_df_trans['id'] = ids
return categories_df_trans
def transform_data(categories_df, messages_df):
"""
Performs all the necessary data transformations.
Arguments:
categories_df: categories dataframe
messages_df: messages dataframe
Output:
df: the transformed dataframe
"""
categories_df_trans = transform_categories(categories_df)
df = pd.merge(messages_df, categories_df_trans, how='inner', on='id')
df = df[~df.duplicated()]
return df
def load_data(messages_filepath, categories_filepath):
"""
Reads the raw data saved as csv files and convert it
into dataframes.
Arguments:
messages_filepath: path to the messages.csv file
categories_filepath: path to the categories.csv file
Output:
messages_df: messages dataframe
categories_df: categories dataframe
"""
messages_df = pd.read_csv(messages_filepath)
categories_df = pd.read_csv(categories_filepath)
return messages_df, categories_df
def save_data(df, database_filename):
"""
Saves the clean modeling data into a SQLite database.
Arguments:
df: the transformed dataframe
database_filename: the resulting database name
Output:
None
"""
engine = create_engine(f"sqlite:///{database_filename}")
df.to_sql(name=database_filename.split('/')[1].split('.')[0],
con=engine,
if_exists = 'replace',
index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
messages_df, categories_df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = transform_data(categories_df, messages_df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 0.639624 | 0.51251 |
class Entry:
def __init__(self, tagged, untagged):
self.tagged = tagged
self.untagged = untagged
def get_tagged(self):
return self.tagged
def get_untagged(self):
return self.untagged
@staticmethod
def decode(data):
f_tagged = None
if "tagged" in data:
f_tagged = data["tagged"]
if f_tagged is not None:
f_tagged = Tagged.decode(f_tagged)
f_untagged = None
if "untagged" in data:
f_untagged = data["untagged"]
if f_untagged is not None:
f_untagged = Untagged.decode(f_untagged)
return Entry(f_tagged, f_untagged)
def encode(self):
data = dict()
if self.tagged is not None:
data["tagged"] = self.tagged.encode()
if self.untagged is not None:
data["untagged"] = self.untagged.encode()
return data
def __repr__(self):
return "<Entry tagged:{!r}, untagged:{!r}>".format(self.tagged, self.untagged)
class Tagged:
@staticmethod
def decode(data):
f_tag = data["@type"]
if f_tag == "foo":
return Tagged_A.decode(data)
if f_tag == "b":
return Tagged_B.decode(data)
if f_tag == "Bar":
return Tagged_Bar.decode(data)
if f_tag == "Baz":
return Tagged_Baz.decode(data)
raise Exception("bad type: " + f_tag)
class Tagged_A:
TYPE = "foo"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_A(f_shared)
def encode(self):
data = dict()
data["@type"] = "foo"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_A shared:{!r}>".format(self.shared)
class Tagged_B:
TYPE = "b"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_B(f_shared)
def encode(self):
data = dict()
data["@type"] = "b"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_B shared:{!r}>".format(self.shared)
class Tagged_Bar:
TYPE = "Bar"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_Bar(f_shared)
def encode(self):
data = dict()
data["@type"] = "Bar"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_Bar shared:{!r}>".format(self.shared)
class Tagged_Baz:
TYPE = "Baz"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_Baz(f_shared)
def encode(self):
data = dict()
data["@type"] = "Baz"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_Baz shared:{!r}>".format(self.shared)
class Untagged:
@staticmethod
def decode(data):
keys = set(data.keys())
if keys >= set(("a", "b")):
return Untagged_A.decode(data)
if keys >= set(("a",)):
return Untagged_B.decode(data)
if keys >= set(("b",)):
return Untagged_C.decode(data)
raise Exception("no sub type matching the given fields: " + repr(keys))
class Untagged_A:
TYPE = "A"
def __init__(self, shared, shared_ignore, a, b, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.a = a
self.b = b
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_a(self):
return self.a
def get_b(self):
return self.b
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_a = data["a"]
if not isinstance(f_a, unicode):
raise Exception("not a string")
f_b = data["b"]
if not isinstance(f_b, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_A(f_shared, f_shared_ignore, f_a, f_b, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.a is None:
raise Exception("a: is a required field")
data["a"] = self.a
if self.b is None:
raise Exception("b: is a required field")
data["b"] = self.b
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_A shared:{!r}, shared_ignore:{!r}, a:{!r}, b:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.a, self.b, self.ignore)
class Untagged_B:
TYPE = "B"
def __init__(self, shared, shared_ignore, a, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.a = a
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_a(self):
return self.a
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_a = data["a"]
if not isinstance(f_a, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_B(f_shared, f_shared_ignore, f_a, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.a is None:
raise Exception("a: is a required field")
data["a"] = self.a
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_B shared:{!r}, shared_ignore:{!r}, a:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.a, self.ignore)
class Untagged_C:
TYPE = "C"
def __init__(self, shared, shared_ignore, b, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.b = b
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_b(self):
return self.b
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_b = data["b"]
if not isinstance(f_b, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_C(f_shared, f_shared_ignore, f_b, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.b is None:
raise Exception("b: is a required field")
data["b"] = self.b
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_C shared:{!r}, shared_ignore:{!r}, b:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.b, self.ignore)
|
it/interfaces/structures/default/python/test.py
|
class Entry:
def __init__(self, tagged, untagged):
self.tagged = tagged
self.untagged = untagged
def get_tagged(self):
return self.tagged
def get_untagged(self):
return self.untagged
@staticmethod
def decode(data):
f_tagged = None
if "tagged" in data:
f_tagged = data["tagged"]
if f_tagged is not None:
f_tagged = Tagged.decode(f_tagged)
f_untagged = None
if "untagged" in data:
f_untagged = data["untagged"]
if f_untagged is not None:
f_untagged = Untagged.decode(f_untagged)
return Entry(f_tagged, f_untagged)
def encode(self):
data = dict()
if self.tagged is not None:
data["tagged"] = self.tagged.encode()
if self.untagged is not None:
data["untagged"] = self.untagged.encode()
return data
def __repr__(self):
return "<Entry tagged:{!r}, untagged:{!r}>".format(self.tagged, self.untagged)
class Tagged:
@staticmethod
def decode(data):
f_tag = data["@type"]
if f_tag == "foo":
return Tagged_A.decode(data)
if f_tag == "b":
return Tagged_B.decode(data)
if f_tag == "Bar":
return Tagged_Bar.decode(data)
if f_tag == "Baz":
return Tagged_Baz.decode(data)
raise Exception("bad type: " + f_tag)
class Tagged_A:
TYPE = "foo"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_A(f_shared)
def encode(self):
data = dict()
data["@type"] = "foo"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_A shared:{!r}>".format(self.shared)
class Tagged_B:
TYPE = "b"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_B(f_shared)
def encode(self):
data = dict()
data["@type"] = "b"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_B shared:{!r}>".format(self.shared)
class Tagged_Bar:
TYPE = "Bar"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_Bar(f_shared)
def encode(self):
data = dict()
data["@type"] = "Bar"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_Bar shared:{!r}>".format(self.shared)
class Tagged_Baz:
TYPE = "Baz"
def __init__(self, shared):
self.shared = shared
def get_shared(self):
return self.shared
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
return Tagged_Baz(f_shared)
def encode(self):
data = dict()
data["@type"] = "Baz"
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
return data
def __repr__(self):
return "<Tagged_Baz shared:{!r}>".format(self.shared)
class Untagged:
@staticmethod
def decode(data):
keys = set(data.keys())
if keys >= set(("a", "b")):
return Untagged_A.decode(data)
if keys >= set(("a",)):
return Untagged_B.decode(data)
if keys >= set(("b",)):
return Untagged_C.decode(data)
raise Exception("no sub type matching the given fields: " + repr(keys))
class Untagged_A:
TYPE = "A"
def __init__(self, shared, shared_ignore, a, b, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.a = a
self.b = b
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_a(self):
return self.a
def get_b(self):
return self.b
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_a = data["a"]
if not isinstance(f_a, unicode):
raise Exception("not a string")
f_b = data["b"]
if not isinstance(f_b, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_A(f_shared, f_shared_ignore, f_a, f_b, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.a is None:
raise Exception("a: is a required field")
data["a"] = self.a
if self.b is None:
raise Exception("b: is a required field")
data["b"] = self.b
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_A shared:{!r}, shared_ignore:{!r}, a:{!r}, b:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.a, self.b, self.ignore)
class Untagged_B:
TYPE = "B"
def __init__(self, shared, shared_ignore, a, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.a = a
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_a(self):
return self.a
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_a = data["a"]
if not isinstance(f_a, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_B(f_shared, f_shared_ignore, f_a, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.a is None:
raise Exception("a: is a required field")
data["a"] = self.a
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_B shared:{!r}, shared_ignore:{!r}, a:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.a, self.ignore)
class Untagged_C:
TYPE = "C"
def __init__(self, shared, shared_ignore, b, ignore):
self.shared = shared
self.shared_ignore = shared_ignore
self.b = b
self.ignore = ignore
def get_shared(self):
return self.shared
def get_shared_ignore(self):
return self.shared_ignore
def get_b(self):
return self.b
def get_ignore(self):
return self.ignore
@staticmethod
def decode(data):
f_shared = data["shared"]
if not isinstance(f_shared, unicode):
raise Exception("not a string")
f_shared_ignore = None
if "shared_ignore" in data:
f_shared_ignore = data["shared_ignore"]
if f_shared_ignore is not None:
if not isinstance(f_shared_ignore, unicode):
raise Exception("not a string")
f_b = data["b"]
if not isinstance(f_b, unicode):
raise Exception("not a string")
f_ignore = None
if "ignore" in data:
f_ignore = data["ignore"]
if f_ignore is not None:
if not isinstance(f_ignore, unicode):
raise Exception("not a string")
return Untagged_C(f_shared, f_shared_ignore, f_b, f_ignore)
def encode(self):
data = dict()
if self.shared is None:
raise Exception("shared: is a required field")
data["shared"] = self.shared
if self.shared_ignore is not None:
data["shared_ignore"] = self.shared_ignore
if self.b is None:
raise Exception("b: is a required field")
data["b"] = self.b
if self.ignore is not None:
data["ignore"] = self.ignore
return data
def __repr__(self):
return "<Untagged_C shared:{!r}, shared_ignore:{!r}, b:{!r}, ignore:{!r}>".format(self.shared, self.shared_ignore, self.b, self.ignore)
| 0.781247 | 0.221793 |
__author__ = '<NAME>'
__date__ = '2019-11-17'
__copyright__ = '(C) 2019, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterString,
QgsProcessingParameterPoint,
QgsProcessingParameterNumber,
QgsProcessingParameterCrs,
QgsProcessingParameterFileDestination,
QgsFields,
QgsField,
QgsWkbTypes,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsPoint,
QgsApplication
)
from numpy import radians, arctan, pi, sin, cos, matrix, sqrt, degrees, array, diag, ones, zeros, floor
from numpy.linalg import norm, pinv, inv
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.topogeo import *
import os
from qgis.PyQt.QtGui import QIcon
class TraverseAdjustment(QgsProcessingAlgorithm):
A = 'A'
B = 'B'
Y = 'Y'
Z = 'Z'
DIST = 'DIST'
ANGS = 'ANGS'
DIST_PREC = 'DIST_PREC'
PPM = 'PPM'
ANGS_PREC = 'ANGS_PREC'
CRS = 'CRS'
OUTPUT = 'OUTPUT'
HTML = 'HTML'
rho = 180*3600/pi
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return TraverseAdjustment()
def name(self):
return 'traverseadjustment'
def displayName(self):
return self.tr('Traverse adjustment', 'Poligonal enquadrada')
def group(self):
return self.tr('Survey', 'Agrimensura')
def groupId(self):
return 'survey'
def tags(self):
return self.tr('survey,agrimensura,polygonal,adjustment,total,station,angle,least square').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png'))
txt_en = 'This algorithm performs the traverse adjustments of a framed polygonal by least squares method, where the distances, angles, and directions observations are adjusted simultaneously, providing the most probable values for the given data set. Futhermore, the observations can be rigorously weighted based on their estimated errors and adjusted accordingly.'
txt_pt = 'Este algoritmo realiza o ajustamento de poligonal enquadrada pelo método dos mínimos quadrados, onde as observações de distâncias, ângulos e direções são ajustadas simultaneamente, fornecendo os valores mais prováveis para o conjunto de dados. Além disso, as observações podem ser rigorosamente ponderadas considerando os erros estimados e ajustados.'
figure = 'images/tutorial/survey_traverse.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
nota_en = '''Note: Sample data obtained from class notes of the Geodetic Survey discipline at UFPE.
'''
nota_pt = '''Nota: Dados de exemplo obtidos das notas de aula da disciplina de Levantamento Geodésicos na UFPE.
'''
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<div>''' + self.tr(nota_en, nota_pt) + '''
''' +'</a><br><b>'+ self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterPoint(
self.A,
self.tr('A: first (E,N) coordinates','A: 1º ponto (E,N)'),
defaultValue = QgsPointXY(150000, 250000)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.B,
self.tr('B: second (E,N) coordinates','B: 2º ponto (E,N)'),
defaultValue = QgsPointXY(149922.119, 249875.269)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Y,
self.tr('Y: penultimate (E,N) coordinates', 'Y: penúltimo ponto (E,N)'),
defaultValue = QgsPointXY(150347.054, 249727.281)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Z,
self.tr('Z: final (E,N) coordinates', 'Z: último ponto (E,N)'),
defaultValue = QgsPointXY(150350.201, 249622.000)
)
)
self.addParameter(
QgsProcessingParameterString(
self.DIST,
self.tr('List of Horizontal Distances (m)', 'Lista de Distâncias Horizontais (m)'),
defaultValue = '110.426, 72.375, 186.615, 125.153, 78.235, 130.679, 110.854',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterString(
self.ANGS,
self.tr('List of Angles', 'Lista de Ângulos'),
defaultValue = '''75°23'34", 202°4'36", 56°51'15", 283°31'32", 242°57'31", 185°5'12", 94°11'35", 266°13'20" ''',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.DIST_PREC,
self.tr('Initial distance precision (mm)', 'Precisão linear inicial (mm)'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.PPM,
self.tr('PPM distance precision', 'Precisão linear em PPM'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.ANGS_PREC,
self.tr('Angular precision (seconds)', 'Precisão angular (em segundos)'),
type = 1,
defaultValue = 10
)
)
self.addParameter(
QgsProcessingParameterCrs(
self.CRS,
self.tr('CRS','SRC'),
'ProjectCrs'))
# OUTPUT
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Adjusted Points', 'Pontos da Poligonal')
)
)
self.addParameter(
QgsProcessingParameterFileDestination(
'HTML',
self.tr('Report of the closed traverse', 'Relatório de ajuste da Poligonal'),
self.tr('HTML files (*.html)')
)
)
# F(Xo) para distâncias:
def F_X_d(self, pnts, B, Y):
F_X = [[sqrt((B[0]-pnts[0][0])**2 + (B[1]-pnts[0][1])**2)]]
for k in range(len(pnts)-1):
x1 = pnts[k][0]
y1 = pnts[k][1]
x2 = pnts[k+1][0]
y2 = pnts[k+1][1]
F_X += [[sqrt((x1-x2)**2 + (y1-y2)**2)]]
F_X += [[sqrt((Y[0]-pnts[-1][0])**2 + (Y[1]-pnts[-1][1])**2)]]
return F_X
# F(Xo) para ângulos:
def F_X_a(self, pnts, A, B, Y, Z):
pnts2 = [B] + pnts + [Y]
# leitura do ângulo no sentido horário
F_X = [[3600*degrees(DifAz(azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0], azimute(QgsPointXY(B[0], B[1]),QgsPointXY(pnts[0][0], pnts[0][1]))[0]))]]
for k in range(len(pnts2)-2):
pnt0 = QgsPointXY(pnts2[k][0], pnts2[k][1])
pnt1 = QgsPointXY(pnts2[k+1][0], pnts2[k+1][1])
pnt2 = QgsPointXY(pnts2[k+2][0], pnts2[k+2][1])
F_X += [[3600*degrees(DifAz(azimute(pnt1,pnt0)[0], azimute(pnt1, pnt2)[0]))]]
F_X += [[3600*degrees(DifAz(azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(pnts2[-2][0], pnts2[-2][1]))[0], azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0]))]]
return F_X
def Jacobiana_d(self, pnts, B, Y, n_d, n_par):
Jac = zeros([n_d, n_par])
pnts2 = [B] + pnts + [Y]
for k in range(n_d):
I = pnts2[k]
J = pnts2[k+1]
IJ = norm(array(J) - array(I))
linha = [(I[0]-J[0])/IJ, (I[1]-J[1])/IJ, (J[0]-I[0])/IJ, (J[1]-I[1])/IJ]
if k == 0:
Jac[k, 0:2] = linha[2:]
elif k < (n_d-1):
Jac[k, (2*k-2):(2*k-2 + 4)] = linha
else:
Jac[k, (2*k-2):(2*k-2 + 2)] = linha[:2]
return list(Jac)
def Jacobiana_a(self, pnts, A, B, Y, Z, n_angs, n_par):
Jac = zeros([n_angs, n_par])
pnts2 = [A, B] + pnts + [Y, Z]
for k in range(n_angs):
B = pnts2[k]
I = pnts2[k+1]
F = pnts2[k+2]
IB = norm(array(B) - array(I))
IF = norm(array(F) - array(I))
linha = [(I[1]-B[1])/IB**2, (B[0]-I[0])/IB**2, (B[1]-I[1])/IB**2 - (F[1]-I[1])/IF**2,
(I[0]-B[0])/IB**2 - (I[0]-F[0])/IF**2, (F[1]-I[1])/IF**2, (I[0]-F[0])/IF**2]
linha = list(self.rho*array(linha))
if n_par > 2:
if k == 0:
Jac[k, 0:2] = linha[4:]
elif k==1:
Jac[k, 0:4] = linha[2:]
elif k < (n_angs-2):
Jac[k, (2*k-4):(2*k-4 + 6)] = linha
elif k == n_angs-2:
Jac[k, (2*k-4):(2*k-4 + 4)] = linha[:4]
else:
Jac[k, (2*k-4):(2*k-4 + 2)] = linha[:2]
else:
if k == 0:
Jac[0, 0:2] = linha[4:]
elif k == 1:
Jac[1, 0:2] = linha[2:4]
elif k == 2:
Jac[2, 0:2] = linha[:2]
return list(Jac)
def processAlgorithm(self, parameters, context, feedback):
A = self.parameterAsPoint(
parameters,
self.A,
context
)
A = [A.x(), A.y()]
B = self.parameterAsPoint(
parameters,
self.B,
context
)
B = [B.x(), B.y()]
Y = self.parameterAsPoint(
parameters,
self.Y,
context
)
Y = [Y.x(), Y.y()]
Z = self.parameterAsPoint(
parameters,
self.Z,
context
)
Z = [Z.x(), Z.y()]
d = self.parameterAsString(
parameters,
self.DIST,
context
)
d = String2NumberList(d)
#feedback.pushInfo('Distances list: ' + str(d))
angs = self.parameterAsString(
parameters,
self.ANGS,
context
)
angs = String2StringList(angs)
#feedback.pushInfo('Angles list: ' + str(angs))
lista = []
for ang in angs:
lista += [3600*float(dms2dd(ang))]
angs = lista
dist_precision = self.parameterAsDouble(
parameters,
self.DIST_PREC,
context
)
dist_precision *= 1e-3 # milimeter to meters
ppm = self.parameterAsDouble(
parameters,
self.PPM,
context
)
ppm *= 1e-6 # ppm
ang_precision = self.parameterAsDouble(
parameters,
self.ANGS_PREC,
context
)
CRS = self.parameterAsCrs(
parameters,
self.CRS,
context
)
if CRS.isGeographic():
raise QgsProcessingException(self.tr('The output CRS must be Projected!', 'O SRC da camada de saída deve ser Projetado!'))
# OUTPUT
Fields = QgsFields()
Fields.append(QgsField('id', QVariant.Int))
GeomType = QgsWkbTypes.Point
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
GeomType,
CRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
html_output = self.parameterAsFileOutput(
parameters,
self.HTML,
context
)
# Precisões
sd_d = list(dist_precision + array(d)*ppm)
sd_a = list(ang_precision*ones(len(angs)))
# Observações
Lb = matrix(d + angs).reshape([len(d)+len(angs),1])
# Cálculo de aproximações inicias
Xo = []
pnts = []
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
p0 = B
for k in range(len(d)-1):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
x = p0[0] + d[k]*cos(ang)
y = p0[1] + d[k]*sin(ang)
Xo += [[x], [y]]
pnts += [(x, y)]
Az0 = -pi/2 - ang
p0 = (x, y)
pnts_ini = pnts
# Cálculo do Erro de Fechamento Linear
ang = pi/2 - Az0 - radians(angs[-2]/3600)
x = p0[0] + d[-1]*cos(ang)
y = p0[1] + d[-1]*sin(ang)
Y_ = (x, y)
Erro = array(Y_)-array(Y)
feedback.pushInfo('Linear closure error: ' + str(round(norm(array(Y_)-array(Y)),4)) + ' m')
feedback.pushInfo('E and N errors: ' + str((round(Erro[0],4),round(Erro[1],4))) + ' m')
# Cálculo do Erro de Azimute
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
for k in range(len(angs)):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
Az = pi/2 - ang
Az0 = Az -pi
if Az<0 or Az>2*pi:
if (Az<0):
Az=Az+2*pi
else:
Az=Az-2*pi
feedback.pushInfo('Angular closure error: ' + str(round(3600*(degrees(Az - azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0])),2)) + ' sec')
# Dados para matrix jacobiana
n_par = len(pnts)*2
n_d = len(d)
n_angs = len(angs)
n_obs = n_d + n_angs
# Matriz Peso
P = matrix(diag(array(sd_d + sd_a)**(-2)))
# Cálculo iterativo das Coordenadas (Parâmetros)
cont = 0
cont_max = 10
tol = 1e-4
while cont < cont_max:
F_Xo = self.F_X_d(pnts, B, Y) + self.F_X_a(pnts, A, B, Y, Z)
J = matrix(list(self.Jacobiana_d(pnts, B, Y, n_d, n_par)) + list(self.Jacobiana_a(pnts, A, B, Y, Z, n_angs, n_par)))
L = matrix(Lb - F_Xo)
delta = pinv(J.T*P*J)*J.T*P*L
Xa = array(Xo) + array(delta)
cont += 1
#print('Iteração:', cont, '\ncoord:', Xa.T, '\ndelta:', delta.T)
feedback.pushInfo('Iteração: ' + str( cont) + '\nCoord: ' + str(Xa.T) + '\nDelta:' + str(delta.T))
if max(abs(delta))[0,0] > tol:
Xo = Xa
pnts = []
for k in range(int(len(Xo)/2)):
pnts += [(float(Xo[2*k][0]), float(Xo[2*k+1][0]))]
else:
break
# Resíduos
V = Lb - F_Xo
# Sigma posteriori
n = len(Lb) # número de observações
u = len(Xa) # número de parâmetros
sigma2 = V.T*P*V/(n-u)
# Observações Ajustadas (La)
La = Lb + V
# MVC de Xa
SigmaXa = sigma2[0,0]*pinv(J.T*P*J)
# MVC de La
SigmaLa = J*SigmaXa*J.T
# MVC de Lb
var_priori = 1.0
SigmaLb = var_priori*inv(P)
# MVC dos resíduos
SigmaV = SigmaLa + SigmaLb
feature = QgsFeature()
total = 100.0 /len(pnts) if len(pnts) else 0
for current, pnt in enumerate(pnts):
geom = QgsGeometry(QgsPoint(float(pnt[0]), float(pnt[1])))
feature.setGeometry(geom)
feature.setAttributes([current+1])
sink.addFeature(feature, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
# Relatório
tabela1 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
</tr>
'''
tabela2 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
<td style="text-align: center;"><i> [S_E] </i></td>
<td style="text-align: center;"><i> [S_N] </i></td>
</tr>
'''
tabela3 = '''<tr style="">
<td
style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[r]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[adj_obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[sd]<o:p></o:p></span></p>
</td>
</tr>
'''
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>'''+ self.tr('Traverse Adjustment Report', 'Relatório de Ajuste de Poligonal') + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body
style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);"
alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span
style="font-size: 12pt; line-height: 107%;">''' + self.tr('TRAVERSE ADJUSTMENT', 'POLIGONAL ENQUADRADA') + '''<o:p></o:p></span></b></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><span style="font-style: italic;">''' + self.tr('Method of Least Squares', str2HTML('Método dos Mínimos Quadrados')) + '''</span></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><b><u>''' + self.tr('REPORT', str2HTML('RELATÓRIO')) + '''<o:p></o:p></u></b></p>
<div align="center">
<table style="text-align: center; width: 100%;" border="1"
cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td width="50%"><b><span style=""
>'''+ self.tr('Initial approximation', str2HTML('Aproximação Incial')) + '''</span></b></td>
<td width="50%"><b><span style=""
>'''+ self.tr('Adjusted Coordinates', 'Coordenadas Ajustadas') + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
</tr>
[TABLE 1]
</tbody>
</table>
</div>
</td>
<td style="text-align: center;">
<div align="center"></br>
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
<td style="text-align: center;">
σ<span style="font-style: italic;">E </span></td>
<td style="text-align: center;">
σ<span style="font-style: italic;">N </span></td>
</tr>
[TABLE 2]
</tbody>
</table>
<i><span style="" >''' + self.tr('Posteriori variance', str2HTML('Variância a posteriori')) + '''</span></i><span style="" >: </span><span
style="" > <span
style="color: red;">[sigma2]</span></span>
</div>
</td>
</tr>
<tr>
<td colspan="2" rowspan="1"><b><span
style="" >''' + self.tr('Observations', str2HTML('Observações')) + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td colspan="2" rowspan="1"
style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; width: 389.5pt; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0"
width="519">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Observation', str2HTML('Observação')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Residual', str2HTML('Resíduo')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Adjusted Observation', str2HTML('Observação Ajustada')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Standard Deviation', str2HTML('Desvio Padrão')) + '''<o:p></o:p></span></p>
</td>
</tr>
[TABLE 3]
</tbody>
</table></br>
</div>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align: left;"
align="left"><i><span
style="font-size: 10pt; line-height: 100%; color: rgb(127, 127, 127);">''' + self.tr(str2HTML('*The unit of measurement of the adjusted coordinates is the same as the input coordinates.'), str2HTML('*A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada.')) + '''<o:p></o:p></span></i></p>
</div>
<footer">
<p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + self.tr('<NAME>', str2HTML('<NAME>')) + '''
</br>''' + self.tr('Cartographic Engineer', 'Eng. Cartógrafo') + '''<o:p></o:p></b></p>
</br>
<div align="right">'''+ Imgs().social_table_color + '''
</div>
<o:p></o:p></b></p>
</footer>
</body>
</html>
'''
# Aproximação inicial
cont = 0
table1 = ''
for k, pnt in enumerate(pnts_ini):
X = pnt[0]
Y = pnt[1]
cont += 1
tableRowN = tabela1
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table1 += tableRowN
# Ajustamento
cont = 0
table2 = ''
SD = SigmaXa.diagonal()
for k in range(len(pnts_ini)):
X = Xa[2*k, 0]
Y = Xa[2*k+1, 0]
sdx = sqrt(SD[0, 2*k])
sdy = sqrt(SD[0, 2*k+1])
cont += 1
tableRowN = tabela2
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_E]': self.tr('{:,.3f}'.format(sdx), '{:,.3f}'.format(sdx).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_N]': self.tr('{:,.3f}'.format(sdy), '{:,.3f}'.format(sdy).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table2 += tableRowN
# Observações
table3 = ''
SD = SigmaLa.diagonal()
for k in range(n_d): # Distâncias
obs = Lb[k, 0]
adj_obs = La[k, 0]
sd = sqrt(SD[0, k])
r = V[k, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str(round(obs,3)),
'[r]': str(round(r,4)),
'[adj_obs]': str(round(adj_obs,3)),
'[sd]': str(round(sd,3))
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
for t in range(k+1, k+1+ n_angs): # Ângulos
obs = Lb[t, 0]
adj_obs = La[t, 0]
sd = sqrt(SD[0, t])
r = V[t, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str2HTML(dd2dms(obs/3600,3)),
'[r]': str(round(r,4)) + '"',
'[adj_obs]': str2HTML(dd2dms(adj_obs/3600,3)),
'[sd]': str(round(sd,3)) + '"'
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
# Documento prinicipal
itens = {
'[TABLE 1]': table1,
'[TABLE 2]': table2,
'[TABLE 3]': table3,
'[sigma2]': str(round(sigma2[0,0],3))
}
for item in itens:
texto = texto.replace(item, itens[item])
# Exportar HTML
arq = open(html_output, 'w')
arq.write(texto)
arq.close()
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
return {self.OUTPUT: dest_id,
self.HTML: html_output}
|
processing_provider/Survey_traverseAdjustment.py
|
__author__ = '<NAME>'
__date__ = '2019-11-17'
__copyright__ = '(C) 2019, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterString,
QgsProcessingParameterPoint,
QgsProcessingParameterNumber,
QgsProcessingParameterCrs,
QgsProcessingParameterFileDestination,
QgsFields,
QgsField,
QgsWkbTypes,
QgsFeature,
QgsGeometry,
QgsPointXY,
QgsPoint,
QgsApplication
)
from numpy import radians, arctan, pi, sin, cos, matrix, sqrt, degrees, array, diag, ones, zeros, floor
from numpy.linalg import norm, pinv, inv
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.topogeo import *
import os
from qgis.PyQt.QtGui import QIcon
class TraverseAdjustment(QgsProcessingAlgorithm):
A = 'A'
B = 'B'
Y = 'Y'
Z = 'Z'
DIST = 'DIST'
ANGS = 'ANGS'
DIST_PREC = 'DIST_PREC'
PPM = 'PPM'
ANGS_PREC = 'ANGS_PREC'
CRS = 'CRS'
OUTPUT = 'OUTPUT'
HTML = 'HTML'
rho = 180*3600/pi
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return TraverseAdjustment()
def name(self):
return 'traverseadjustment'
def displayName(self):
return self.tr('Traverse adjustment', 'Poligonal enquadrada')
def group(self):
return self.tr('Survey', 'Agrimensura')
def groupId(self):
return 'survey'
def tags(self):
return self.tr('survey,agrimensura,polygonal,adjustment,total,station,angle,least square').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png'))
txt_en = 'This algorithm performs the traverse adjustments of a framed polygonal by least squares method, where the distances, angles, and directions observations are adjusted simultaneously, providing the most probable values for the given data set. Futhermore, the observations can be rigorously weighted based on their estimated errors and adjusted accordingly.'
txt_pt = 'Este algoritmo realiza o ajustamento de poligonal enquadrada pelo método dos mínimos quadrados, onde as observações de distâncias, ângulos e direções são ajustadas simultaneamente, fornecendo os valores mais prováveis para o conjunto de dados. Além disso, as observações podem ser rigorosamente ponderadas considerando os erros estimados e ajustados.'
figure = 'images/tutorial/survey_traverse.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
nota_en = '''Note: Sample data obtained from class notes of the Geodetic Survey discipline at UFPE.
'''
nota_pt = '''Nota: Dados de exemplo obtidos das notas de aula da disciplina de Levantamento Geodésicos na UFPE.
'''
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<div>''' + self.tr(nota_en, nota_pt) + '''
''' +'</a><br><b>'+ self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterPoint(
self.A,
self.tr('A: first (E,N) coordinates','A: 1º ponto (E,N)'),
defaultValue = QgsPointXY(150000, 250000)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.B,
self.tr('B: second (E,N) coordinates','B: 2º ponto (E,N)'),
defaultValue = QgsPointXY(149922.119, 249875.269)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Y,
self.tr('Y: penultimate (E,N) coordinates', 'Y: penúltimo ponto (E,N)'),
defaultValue = QgsPointXY(150347.054, 249727.281)
)
)
self.addParameter(
QgsProcessingParameterPoint(
self.Z,
self.tr('Z: final (E,N) coordinates', 'Z: último ponto (E,N)'),
defaultValue = QgsPointXY(150350.201, 249622.000)
)
)
self.addParameter(
QgsProcessingParameterString(
self.DIST,
self.tr('List of Horizontal Distances (m)', 'Lista de Distâncias Horizontais (m)'),
defaultValue = '110.426, 72.375, 186.615, 125.153, 78.235, 130.679, 110.854',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterString(
self.ANGS,
self.tr('List of Angles', 'Lista de Ângulos'),
defaultValue = '''75°23'34", 202°4'36", 56°51'15", 283°31'32", 242°57'31", 185°5'12", 94°11'35", 266°13'20" ''',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.DIST_PREC,
self.tr('Initial distance precision (mm)', 'Precisão linear inicial (mm)'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.PPM,
self.tr('PPM distance precision', 'Precisão linear em PPM'),
type = 1,
defaultValue = 3
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.ANGS_PREC,
self.tr('Angular precision (seconds)', 'Precisão angular (em segundos)'),
type = 1,
defaultValue = 10
)
)
self.addParameter(
QgsProcessingParameterCrs(
self.CRS,
self.tr('CRS','SRC'),
'ProjectCrs'))
# OUTPUT
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Adjusted Points', 'Pontos da Poligonal')
)
)
self.addParameter(
QgsProcessingParameterFileDestination(
'HTML',
self.tr('Report of the closed traverse', 'Relatório de ajuste da Poligonal'),
self.tr('HTML files (*.html)')
)
)
# F(Xo) para distâncias:
def F_X_d(self, pnts, B, Y):
F_X = [[sqrt((B[0]-pnts[0][0])**2 + (B[1]-pnts[0][1])**2)]]
for k in range(len(pnts)-1):
x1 = pnts[k][0]
y1 = pnts[k][1]
x2 = pnts[k+1][0]
y2 = pnts[k+1][1]
F_X += [[sqrt((x1-x2)**2 + (y1-y2)**2)]]
F_X += [[sqrt((Y[0]-pnts[-1][0])**2 + (Y[1]-pnts[-1][1])**2)]]
return F_X
# F(Xo) para ângulos:
def F_X_a(self, pnts, A, B, Y, Z):
pnts2 = [B] + pnts + [Y]
# leitura do ângulo no sentido horário
F_X = [[3600*degrees(DifAz(azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0], azimute(QgsPointXY(B[0], B[1]),QgsPointXY(pnts[0][0], pnts[0][1]))[0]))]]
for k in range(len(pnts2)-2):
pnt0 = QgsPointXY(pnts2[k][0], pnts2[k][1])
pnt1 = QgsPointXY(pnts2[k+1][0], pnts2[k+1][1])
pnt2 = QgsPointXY(pnts2[k+2][0], pnts2[k+2][1])
F_X += [[3600*degrees(DifAz(azimute(pnt1,pnt0)[0], azimute(pnt1, pnt2)[0]))]]
F_X += [[3600*degrees(DifAz(azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(pnts2[-2][0], pnts2[-2][1]))[0], azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0]))]]
return F_X
def Jacobiana_d(self, pnts, B, Y, n_d, n_par):
Jac = zeros([n_d, n_par])
pnts2 = [B] + pnts + [Y]
for k in range(n_d):
I = pnts2[k]
J = pnts2[k+1]
IJ = norm(array(J) - array(I))
linha = [(I[0]-J[0])/IJ, (I[1]-J[1])/IJ, (J[0]-I[0])/IJ, (J[1]-I[1])/IJ]
if k == 0:
Jac[k, 0:2] = linha[2:]
elif k < (n_d-1):
Jac[k, (2*k-2):(2*k-2 + 4)] = linha
else:
Jac[k, (2*k-2):(2*k-2 + 2)] = linha[:2]
return list(Jac)
def Jacobiana_a(self, pnts, A, B, Y, Z, n_angs, n_par):
Jac = zeros([n_angs, n_par])
pnts2 = [A, B] + pnts + [Y, Z]
for k in range(n_angs):
B = pnts2[k]
I = pnts2[k+1]
F = pnts2[k+2]
IB = norm(array(B) - array(I))
IF = norm(array(F) - array(I))
linha = [(I[1]-B[1])/IB**2, (B[0]-I[0])/IB**2, (B[1]-I[1])/IB**2 - (F[1]-I[1])/IF**2,
(I[0]-B[0])/IB**2 - (I[0]-F[0])/IF**2, (F[1]-I[1])/IF**2, (I[0]-F[0])/IF**2]
linha = list(self.rho*array(linha))
if n_par > 2:
if k == 0:
Jac[k, 0:2] = linha[4:]
elif k==1:
Jac[k, 0:4] = linha[2:]
elif k < (n_angs-2):
Jac[k, (2*k-4):(2*k-4 + 6)] = linha
elif k == n_angs-2:
Jac[k, (2*k-4):(2*k-4 + 4)] = linha[:4]
else:
Jac[k, (2*k-4):(2*k-4 + 2)] = linha[:2]
else:
if k == 0:
Jac[0, 0:2] = linha[4:]
elif k == 1:
Jac[1, 0:2] = linha[2:4]
elif k == 2:
Jac[2, 0:2] = linha[:2]
return list(Jac)
def processAlgorithm(self, parameters, context, feedback):
A = self.parameterAsPoint(
parameters,
self.A,
context
)
A = [A.x(), A.y()]
B = self.parameterAsPoint(
parameters,
self.B,
context
)
B = [B.x(), B.y()]
Y = self.parameterAsPoint(
parameters,
self.Y,
context
)
Y = [Y.x(), Y.y()]
Z = self.parameterAsPoint(
parameters,
self.Z,
context
)
Z = [Z.x(), Z.y()]
d = self.parameterAsString(
parameters,
self.DIST,
context
)
d = String2NumberList(d)
#feedback.pushInfo('Distances list: ' + str(d))
angs = self.parameterAsString(
parameters,
self.ANGS,
context
)
angs = String2StringList(angs)
#feedback.pushInfo('Angles list: ' + str(angs))
lista = []
for ang in angs:
lista += [3600*float(dms2dd(ang))]
angs = lista
dist_precision = self.parameterAsDouble(
parameters,
self.DIST_PREC,
context
)
dist_precision *= 1e-3 # milimeter to meters
ppm = self.parameterAsDouble(
parameters,
self.PPM,
context
)
ppm *= 1e-6 # ppm
ang_precision = self.parameterAsDouble(
parameters,
self.ANGS_PREC,
context
)
CRS = self.parameterAsCrs(
parameters,
self.CRS,
context
)
if CRS.isGeographic():
raise QgsProcessingException(self.tr('The output CRS must be Projected!', 'O SRC da camada de saída deve ser Projetado!'))
# OUTPUT
Fields = QgsFields()
Fields.append(QgsField('id', QVariant.Int))
GeomType = QgsWkbTypes.Point
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
GeomType,
CRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
html_output = self.parameterAsFileOutput(
parameters,
self.HTML,
context
)
# Precisões
sd_d = list(dist_precision + array(d)*ppm)
sd_a = list(ang_precision*ones(len(angs)))
# Observações
Lb = matrix(d + angs).reshape([len(d)+len(angs),1])
# Cálculo de aproximações inicias
Xo = []
pnts = []
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
p0 = B
for k in range(len(d)-1):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
x = p0[0] + d[k]*cos(ang)
y = p0[1] + d[k]*sin(ang)
Xo += [[x], [y]]
pnts += [(x, y)]
Az0 = -pi/2 - ang
p0 = (x, y)
pnts_ini = pnts
# Cálculo do Erro de Fechamento Linear
ang = pi/2 - Az0 - radians(angs[-2]/3600)
x = p0[0] + d[-1]*cos(ang)
y = p0[1] + d[-1]*sin(ang)
Y_ = (x, y)
Erro = array(Y_)-array(Y)
feedback.pushInfo('Linear closure error: ' + str(round(norm(array(Y_)-array(Y)),4)) + ' m')
feedback.pushInfo('E and N errors: ' + str((round(Erro[0],4),round(Erro[1],4))) + ' m')
# Cálculo do Erro de Azimute
Az0 = azimute(QgsPointXY(B[0], B[1]), QgsPointXY(A[0], A[1]))[0]
for k in range(len(angs)):
ang = pi/2 - Az0 - radians(angs[k]/3600) # leitura do ângulo no sentido horário
Az = pi/2 - ang
Az0 = Az -pi
if Az<0 or Az>2*pi:
if (Az<0):
Az=Az+2*pi
else:
Az=Az-2*pi
feedback.pushInfo('Angular closure error: ' + str(round(3600*(degrees(Az - azimute(QgsPointXY(Y[0], Y[1]), QgsPointXY(Z[0], Z[1]))[0])),2)) + ' sec')
# Dados para matrix jacobiana
n_par = len(pnts)*2
n_d = len(d)
n_angs = len(angs)
n_obs = n_d + n_angs
# Matriz Peso
P = matrix(diag(array(sd_d + sd_a)**(-2)))
# Cálculo iterativo das Coordenadas (Parâmetros)
cont = 0
cont_max = 10
tol = 1e-4
while cont < cont_max:
F_Xo = self.F_X_d(pnts, B, Y) + self.F_X_a(pnts, A, B, Y, Z)
J = matrix(list(self.Jacobiana_d(pnts, B, Y, n_d, n_par)) + list(self.Jacobiana_a(pnts, A, B, Y, Z, n_angs, n_par)))
L = matrix(Lb - F_Xo)
delta = pinv(J.T*P*J)*J.T*P*L
Xa = array(Xo) + array(delta)
cont += 1
#print('Iteração:', cont, '\ncoord:', Xa.T, '\ndelta:', delta.T)
feedback.pushInfo('Iteração: ' + str( cont) + '\nCoord: ' + str(Xa.T) + '\nDelta:' + str(delta.T))
if max(abs(delta))[0,0] > tol:
Xo = Xa
pnts = []
for k in range(int(len(Xo)/2)):
pnts += [(float(Xo[2*k][0]), float(Xo[2*k+1][0]))]
else:
break
# Resíduos
V = Lb - F_Xo
# Sigma posteriori
n = len(Lb) # número de observações
u = len(Xa) # número de parâmetros
sigma2 = V.T*P*V/(n-u)
# Observações Ajustadas (La)
La = Lb + V
# MVC de Xa
SigmaXa = sigma2[0,0]*pinv(J.T*P*J)
# MVC de La
SigmaLa = J*SigmaXa*J.T
# MVC de Lb
var_priori = 1.0
SigmaLb = var_priori*inv(P)
# MVC dos resíduos
SigmaV = SigmaLa + SigmaLb
feature = QgsFeature()
total = 100.0 /len(pnts) if len(pnts) else 0
for current, pnt in enumerate(pnts):
geom = QgsGeometry(QgsPoint(float(pnt[0]), float(pnt[1])))
feature.setGeometry(geom)
feature.setAttributes([current+1])
sink.addFeature(feature, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
# Relatório
tabela1 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
</tr>
'''
tabela2 = '''<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[EST]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>[E]</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">[N]</span><o:p></o:p></p>
</td>
<td style="text-align: center;"><i> [S_E] </i></td>
<td style="text-align: center;"><i> [S_N] </i></td>
</tr>
'''
tabela3 = '''<tr style="">
<td
style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[r]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[adj_obs]<o:p></o:p></span></p>
</td>
<td
style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >[sd]<o:p></o:p></span></p>
</td>
</tr>
'''
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>'''+ self.tr('Traverse Adjustment Report', 'Relatório de Ajuste de Poligonal') + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body
style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);"
alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span
style="font-size: 12pt; line-height: 107%;">''' + self.tr('TRAVERSE ADJUSTMENT', 'POLIGONAL ENQUADRADA') + '''<o:p></o:p></span></b></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><span style="font-style: italic;">''' + self.tr('Method of Least Squares', str2HTML('Método dos Mínimos Quadrados')) + '''</span></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><b><u>''' + self.tr('REPORT', str2HTML('RELATÓRIO')) + '''<o:p></o:p></u></b></p>
<div align="center">
<table style="text-align: center; width: 100%;" border="1"
cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td width="50%"><b><span style=""
>'''+ self.tr('Initial approximation', str2HTML('Aproximação Incial')) + '''</span></b></td>
<td width="50%"><b><span style=""
>'''+ self.tr('Adjusted Coordinates', 'Coordenadas Ajustadas') + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
</tr>
[TABLE 1]
</tbody>
</table>
</div>
</td>
<td style="text-align: center;">
<div align="center"></br>
<table class="MsoTableGrid"
style="border: medium none ; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station',
str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">E</span><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="font-style: italic;">N</span><o:p></o:p></p>
</td>
<td style="text-align: center;">
σ<span style="font-style: italic;">E </span></td>
<td style="text-align: center;">
σ<span style="font-style: italic;">N </span></td>
</tr>
[TABLE 2]
</tbody>
</table>
<i><span style="" >''' + self.tr('Posteriori variance', str2HTML('Variância a posteriori')) + '''</span></i><span style="" >: </span><span
style="" > <span
style="color: red;">[sigma2]</span></span>
</div>
</td>
</tr>
<tr>
<td colspan="2" rowspan="1"><b><span
style="" >''' + self.tr('Observations', str2HTML('Observações')) + '''<o:p></o:p></span></b></td>
</tr>
<tr>
<td colspan="2" rowspan="1"
style="text-align: center;">
<div align="center">
<table class="MsoTableGrid"
style="border: medium none ; width: 389.5pt; border-collapse: collapse;"
border="1" cellpadding="0" cellspacing="0"
width="519">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Observation', str2HTML('Observação')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Residual', str2HTML('Resíduo')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 117.6pt;"
valign="top" width="157">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Adjusted Observation', str2HTML('Observação Ajustada')) + '''<o:p></o:p></span></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 102.05pt;"
valign="top" width="136">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="" >''' + self.tr('Standard Deviation', str2HTML('Desvio Padrão')) + '''<o:p></o:p></span></p>
</td>
</tr>
[TABLE 3]
</tbody>
</table></br>
</div>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align: left;"
align="left"><i><span
style="font-size: 10pt; line-height: 100%; color: rgb(127, 127, 127);">''' + self.tr(str2HTML('*The unit of measurement of the adjusted coordinates is the same as the input coordinates.'), str2HTML('*A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada.')) + '''<o:p></o:p></span></i></p>
</div>
<footer">
<p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + self.tr('<NAME>', str2HTML('<NAME>')) + '''
</br>''' + self.tr('Cartographic Engineer', 'Eng. Cartógrafo') + '''<o:p></o:p></b></p>
</br>
<div align="right">'''+ Imgs().social_table_color + '''
</div>
<o:p></o:p></b></p>
</footer>
</body>
</html>
'''
# Aproximação inicial
cont = 0
table1 = ''
for k, pnt in enumerate(pnts_ini):
X = pnt[0]
Y = pnt[1]
cont += 1
tableRowN = tabela1
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table1 += tableRowN
# Ajustamento
cont = 0
table2 = ''
SD = SigmaXa.diagonal()
for k in range(len(pnts_ini)):
X = Xa[2*k, 0]
Y = Xa[2*k+1, 0]
sdx = sqrt(SD[0, 2*k])
sdy = sqrt(SD[0, 2*k+1])
cont += 1
tableRowN = tabela2
itens = {
'[EST]' : str(k+1),
'[E]': self.tr('{:,.3f}'.format(X), '{:,.3f}'.format(X).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[N]': self.tr('{:,.3f}'.format(Y), '{:,.3f}'.format(Y).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_E]': self.tr('{:,.3f}'.format(sdx), '{:,.3f}'.format(sdx).replace(',', 'X').replace('.', ',').replace('X', '.')),
'[S_N]': self.tr('{:,.3f}'.format(sdy), '{:,.3f}'.format(sdy).replace(',', 'X').replace('.', ',').replace('X', '.')),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table2 += tableRowN
# Observações
table3 = ''
SD = SigmaLa.diagonal()
for k in range(n_d): # Distâncias
obs = Lb[k, 0]
adj_obs = La[k, 0]
sd = sqrt(SD[0, k])
r = V[k, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str(round(obs,3)),
'[r]': str(round(r,4)),
'[adj_obs]': str(round(adj_obs,3)),
'[sd]': str(round(sd,3))
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
for t in range(k+1, k+1+ n_angs): # Ângulos
obs = Lb[t, 0]
adj_obs = La[t, 0]
sd = sqrt(SD[0, t])
r = V[t, 0]
cont += 1
tableRowN = tabela3
itens = {
'[obs]' : str2HTML(dd2dms(obs/3600,3)),
'[r]': str(round(r,4)) + '"',
'[adj_obs]': str2HTML(dd2dms(adj_obs/3600,3)),
'[sd]': str(round(sd,3)) + '"'
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
table3 += tableRowN
# Documento prinicipal
itens = {
'[TABLE 1]': table1,
'[TABLE 2]': table2,
'[TABLE 3]': table3,
'[sigma2]': str(round(sigma2[0,0],3))
}
for item in itens:
texto = texto.replace(item, itens[item])
# Exportar HTML
arq = open(html_output, 'w')
arq.write(texto)
arq.close()
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
return {self.OUTPUT: dest_id,
self.HTML: html_output}
| 0.623721 | 0.207757 |
import unittest
import jinja2
import app
class OSTemplateViewTestCase(unittest.TestCase):
def setUp(self):
config = app.ConfigWrapper()
self.app = app.app(config).test_client()
def test_latest_defaults(self):
obs = self.app.get('/')
exp = jinja2.Template(TEMPLATE).render(tag='stable').encode('utf-8')
assert obs.data == exp
def test_latest_version(self):
obs = self.app.get('/?version=latest')
exp = jinja2.Template(TEMPLATE).render(tag='stable').encode('utf-8')
assert obs.data == exp
def test_404_version(self):
obs = self.app.get('/?version=NON-EXISTANT')
assert obs.status == '404 NOT FOUND'
def test_latest_tag(self):
obs = self.app.get('/?tag=testXX.YY.ZZ')
exp = jinja2.Template(TEMPLATE).render(tag='testXX.YY.ZZ').encode('utf-8')
assert obs.data == exp
TEMPLATE = """kind: List
apiVersion: v1
metadata: {}
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: oshinko
- apiVersion: v1
kind: RoleBinding
metadata:
name: oshinko-edit
roleRef:
name: edit
subjects:
- kind: ServiceAccount
name: oshinko
- apiVersion: v1
kind: Template
labels:
application: oshinko-pyspark
createdBy: template-oshinko-pyspark-build-dc
metadata:
annotations:
openshift.io/display-name: PySpark
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and pyspark source hosted in git
name: oshinko-pyspark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-pyspark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentconfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentconfig: ${APPLICATION_NAME}
app: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: The name to use for the BuildConfig, ImageStream and DeploymentConfig components
from: pyspark-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: The URL of the repository with your application source code
required: true
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: List of additional options to pass to spark-submit (for exmaple --conf property=value or --package ...). --master and --class are set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: The name of the spark cluster to run against. The cluster will be created if it does not exist, and a random cluster name will be chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: The name of a stored cluster configuration to use if a cluster is created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: The name of a configmap to use for the spark configuration of the driver. If this configmap is empty the default spark configuration will be used.
- name: OSHINKO_DEL_CLUSTER
description: If a cluster is created on-demand, delete the cluster when the application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: The name of the main py file to run. If this is not specified and there is a single py file at top level of the git respository, that file will be chosen.
- apiVersion: v1
kind: Template
labels:
application: oshinko-java-spark
createdBy: template-oshinko-java-build-dc
metadata:
annotations:
openshift.io/display-name: JavaSpark
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and java spark source hosted in git
name: oshinko-java-spark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-java-spark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentconfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentconfig: ${APPLICATION_NAME}
app: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: APP_MAIN_CLASS
value: ${APP_MAIN_CLASS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: |-
The name to use for the BuildConfig, ImageStream and
DeploymentConfig components
from: java-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: |-
The URL of the repository with your application source code
required: true
- name: APP_MAIN_CLASS
description: Application main class for jar-based applications
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: |-
List of additional options to pass to spark-submit (for exmaple
--conf property=value or --package ...). --master and --class are
set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: |-
The name of the spark cluster to run against. The cluster will be
created if it does not exist, and a random cluster name will be
chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: |-
The name of a stored cluster configuration to use if a cluster is
created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: |-
The name of a configmap to use for the spark configuration of the
driver. If this configmap is empty the default spark configuration
will be used.
- name: OSHINKO_DEL_CLUSTER
description: |-
If a cluster is created on-demand, delete the cluster when the
application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: |-
The name of the main JAR file. If this is not specified and there is
a single JAR produced by the build, that JAR will be chosen.
- apiVersion: v1
kind: Template
labels:
application: oshinko-scala-spark
createdBy: template-oshinko-scala-spark-build-dc
metadata:
annotations:
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and Scala Spark source hosted in git
name: oshinko-scala-spark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-scala-spark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
deploymentConfig: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentConfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentConfig: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: APP_MAIN_CLASS
value: ${APP_MAIN_CLASS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: |-
The name to use for the BuildConfig, ImageStream and
DeploymentConfig components
from: scala-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: |-
The URL of the repository with your application source code
required: true
- name: APP_MAIN_CLASS
description: Application main class for jar-based applications
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: |-
List of additional options to pass to spark-submit (for exmaple
--conf property=value or --package ...). --master and --class are
set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: |-
The name of the spark cluster to run against. The cluster will be
created if it does not exist, and a random cluster name will be
chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: |-
The name of a stored cluster configuration to use if a cluster is
created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: |-
The name of a configmap to use for the spark configuration of the
driver. If this configmap is empty the default spark configuration
will be used.
- name: OSHINKO_DEL_CLUSTER
description: |-
If a cluster is created on-demand, delete the cluster when the
application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: |-
The name of the main JAR file. If this is not specified and there is
a single JAR produced by the build, that JAR will be chosen.
- apiVersion: v1
kind: Template
template: oshinko-webui
metadata:
name: oshinko-webui
objects:
- kind: Service
apiVersion: v1
metadata:
name: ${OSHINKO_WEB_NAME}
labels:
name: ${OSHINKO_WEB_NAME}
spec:
ports:
- name: o-web-port
protocol: TCP
port: 8080
targetPort: 8080
selector:
name: ${OSHINKO_WEB_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
name: ${OSHINKO_DEPLOYMENT_NAME}
spec:
strategy:
type: Rolling
triggers:
- type: ConfigChange
replicas: 1
selector:
name: ${OSHINKO_WEB_NAME}
template:
metadata:
labels:
name: ${OSHINKO_WEB_NAME}
spec:
containers:
- name: ${OSHINKO_WEB_NAME}
image: ${OSHINKO_WEB_IMAGE}
ports:
- name: o-web-port
containerPort: 8080
protocol: TCP
env:
- name: OSHINKO_SPARK_IMAGE
value: ${OSHINKO_CLUSTER_IMAGE}
- name: OSHINKO_REFRESH_INTERVAL
value: ${OSHINKO_REFRESH_INTERVAL}
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8080
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8080
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
serviceAccount: oshinko
- kind: Route
apiVersion: v1
metadata:
name: ${OSHINKO_WEB_NAME}
spec:
host: ${OSHINKO_WEB_ROUTE_HOSTNAME}
to:
kind: Service
name: ${OSHINKO_WEB_NAME}
parameters:
- name: OSHINKO_CLUSTER_IMAGE
description: Full name of the spark image to use when creating clusters
required: false
- name: OSHINKO_WEB_NAME
description: Name of the oshinko web service
value: "oshinko-web"
- name: OSHINKO_WEB_IMAGE
description: Full name of the oshinko web image
required: true
value: radanalyticsio/oshinko-webui:{{ tag }}
- name: OSHINKO_WEB_ROUTE_HOSTNAME
description: The hostname used to create the external route for the webui
- name: OSHINKO_DEPLOYMENT_NAME
description: Name of the oshinko deployment
value: "oshinko"
- name: OSHINKO_REFRESH_INTERVAL
value: "5"
description: Refresh interval for updating cluster list in seconds"""
|
tests.py
|
import unittest
import jinja2
import app
class OSTemplateViewTestCase(unittest.TestCase):
def setUp(self):
config = app.ConfigWrapper()
self.app = app.app(config).test_client()
def test_latest_defaults(self):
obs = self.app.get('/')
exp = jinja2.Template(TEMPLATE).render(tag='stable').encode('utf-8')
assert obs.data == exp
def test_latest_version(self):
obs = self.app.get('/?version=latest')
exp = jinja2.Template(TEMPLATE).render(tag='stable').encode('utf-8')
assert obs.data == exp
def test_404_version(self):
obs = self.app.get('/?version=NON-EXISTANT')
assert obs.status == '404 NOT FOUND'
def test_latest_tag(self):
obs = self.app.get('/?tag=testXX.YY.ZZ')
exp = jinja2.Template(TEMPLATE).render(tag='testXX.YY.ZZ').encode('utf-8')
assert obs.data == exp
TEMPLATE = """kind: List
apiVersion: v1
metadata: {}
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: oshinko
- apiVersion: v1
kind: RoleBinding
metadata:
name: oshinko-edit
roleRef:
name: edit
subjects:
- kind: ServiceAccount
name: oshinko
- apiVersion: v1
kind: Template
labels:
application: oshinko-pyspark
createdBy: template-oshinko-pyspark-build-dc
metadata:
annotations:
openshift.io/display-name: PySpark
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and pyspark source hosted in git
name: oshinko-pyspark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-pyspark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentconfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentconfig: ${APPLICATION_NAME}
app: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: The name to use for the BuildConfig, ImageStream and DeploymentConfig components
from: pyspark-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: The URL of the repository with your application source code
required: true
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: List of additional options to pass to spark-submit (for exmaple --conf property=value or --package ...). --master and --class are set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: The name of the spark cluster to run against. The cluster will be created if it does not exist, and a random cluster name will be chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: The name of a stored cluster configuration to use if a cluster is created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: The name of a configmap to use for the spark configuration of the driver. If this configmap is empty the default spark configuration will be used.
- name: OSHINKO_DEL_CLUSTER
description: If a cluster is created on-demand, delete the cluster when the application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: The name of the main py file to run. If this is not specified and there is a single py file at top level of the git respository, that file will be chosen.
- apiVersion: v1
kind: Template
labels:
application: oshinko-java-spark
createdBy: template-oshinko-java-build-dc
metadata:
annotations:
openshift.io/display-name: JavaSpark
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and java spark source hosted in git
name: oshinko-java-spark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-java-spark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentconfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentconfig: ${APPLICATION_NAME}
app: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: APP_MAIN_CLASS
value: ${APP_MAIN_CLASS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: |-
The name to use for the BuildConfig, ImageStream and
DeploymentConfig components
from: java-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: |-
The URL of the repository with your application source code
required: true
- name: APP_MAIN_CLASS
description: Application main class for jar-based applications
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: |-
List of additional options to pass to spark-submit (for exmaple
--conf property=value or --package ...). --master and --class are
set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: |-
The name of the spark cluster to run against. The cluster will be
created if it does not exist, and a random cluster name will be
chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: |-
The name of a stored cluster configuration to use if a cluster is
created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: |-
The name of a configmap to use for the spark configuration of the
driver. If this configmap is empty the default spark configuration
will be used.
- name: OSHINKO_DEL_CLUSTER
description: |-
If a cluster is created on-demand, delete the cluster when the
application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: |-
The name of the main JAR file. If this is not specified and there is
a single JAR produced by the build, that JAR will be chosen.
- apiVersion: v1
kind: Template
labels:
application: oshinko-scala-spark
createdBy: template-oshinko-scala-spark-build-dc
metadata:
annotations:
description: Create a buildconfig, imagestream and deploymentconfig using source-to-image and Scala Spark source hosted in git
name: oshinko-scala-spark-build-dc
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: ${APPLICATION_NAME}
spec:
dockerImageRepository: ${APPLICATION_NAME}
tags:
- name: latest
- apiVersion: v1
kind: BuildConfig
metadata:
name: ${APPLICATION_NAME}
spec:
output:
to:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
source:
git:
ref: ${GIT_REF}
uri: ${GIT_URI}
type: Git
strategy:
sourceStrategy:
type: Source
env:
- name: APP_FILE
value: ${APP_FILE}
forcePull: true
from:
kind: DockerImage
name: radanalyticsio/radanalytics-scala-spark:{{ tag }}
triggers:
- type: ConfigChange
- type: ImageChange
imageChange: {}
- type: GitHub
github:
secret: ${APPLICATION_NAME}
- type: Generic
generic:
secret: ${APPLICATION_NAME}
- apiVersion: v1
kind: DeploymentConfig
metadata:
labels:
deploymentConfig: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
deploymentConfig: ${APPLICATION_NAME}
strategy:
type: Rolling
template:
metadata:
labels:
deploymentConfig: ${APPLICATION_NAME}
spec:
containers:
- name: ${APPLICATION_NAME}
image: ${APPLICATION_NAME}
imagePullPolicy: Always
resources: {}
terminationMessagePath: /dev/termination-log
env:
- name: OSHINKO_CLUSTER_NAME
value: ${OSHINKO_CLUSTER_NAME}
- name: APP_ARGS
value: ${APP_ARGS}
- name: SPARK_OPTIONS
value: ${SPARK_OPTIONS}
- name: APP_MAIN_CLASS
value: ${APP_MAIN_CLASS}
- name: OSHINKO_DEL_CLUSTER
value: ${OSHINKO_DEL_CLUSTER}
- name: APP_EXIT
value: "true"
- name: OSHINKO_NAMED_CONFIG
value: ${OSHINKO_NAMED_CONFIG}
- name: OSHINKO_SPARK_DRIVER_CONFIG
value: ${OSHINKO_SPARK_DRIVER_CONFIG}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
volumeMounts:
- mountPath: /etc/podinfo
name: podinfo
readOnly: false
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: oshinko
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.labels
path: labels
name: podinfo
triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:latest
- apiVersion: v1
kind: Service
metadata:
name: ${APPLICATION_NAME}
labels:
app: ${APPLICATION_NAME}
spec:
ports:
- name: 8080-tcp
port: 8080
protocol: TCP
targetPort: 8080
selector:
deploymentconfig: ${APPLICATION_NAME}
parameters:
- name: APPLICATION_NAME
displayName: Application Name
description: |-
The name to use for the BuildConfig, ImageStream and
DeploymentConfig components
from: scala-[a-z0-9]{4}
generate: expression
- name: GIT_URI
displayName: Git Repository URL
description: |-
The URL of the repository with your application source code
required: true
- name: APP_MAIN_CLASS
description: Application main class for jar-based applications
- name: APP_ARGS
displayName: Application Arguments
description: Command line arguments to pass to the application
- name: SPARK_OPTIONS
displayName: spark-submit Options
description: |-
List of additional options to pass to spark-submit (for exmaple
--conf property=value or --package ...). --master and --class are
set by the launcher and should not be set here.
- name: GIT_REF
displayName: Git Reference
description: Optional branch, tag or commit
- name: OSHINKO_CLUSTER_NAME
description: |-
The name of the spark cluster to run against. The cluster will be
created if it does not exist, and a random cluster name will be
chosen if this value is left blank.
- name: OSHINKO_NAMED_CONFIG
description: |-
The name of a stored cluster configuration to use if a cluster is
created, default is 'default'.
- name: OSHINKO_SPARK_DRIVER_CONFIG
description: |-
The name of a configmap to use for the spark configuration of the
driver. If this configmap is empty the default spark configuration
will be used.
- name: OSHINKO_DEL_CLUSTER
description: |-
If a cluster is created on-demand, delete the cluster when the
application finishes if this option is set to 'true'
value: "true"
required: true
- name: APP_FILE
description: |-
The name of the main JAR file. If this is not specified and there is
a single JAR produced by the build, that JAR will be chosen.
- apiVersion: v1
kind: Template
template: oshinko-webui
metadata:
name: oshinko-webui
objects:
- kind: Service
apiVersion: v1
metadata:
name: ${OSHINKO_WEB_NAME}
labels:
name: ${OSHINKO_WEB_NAME}
spec:
ports:
- name: o-web-port
protocol: TCP
port: 8080
targetPort: 8080
selector:
name: ${OSHINKO_WEB_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
name: ${OSHINKO_DEPLOYMENT_NAME}
spec:
strategy:
type: Rolling
triggers:
- type: ConfigChange
replicas: 1
selector:
name: ${OSHINKO_WEB_NAME}
template:
metadata:
labels:
name: ${OSHINKO_WEB_NAME}
spec:
containers:
- name: ${OSHINKO_WEB_NAME}
image: ${OSHINKO_WEB_IMAGE}
ports:
- name: o-web-port
containerPort: 8080
protocol: TCP
env:
- name: OSHINKO_SPARK_IMAGE
value: ${OSHINKO_CLUSTER_IMAGE}
- name: OSHINKO_REFRESH_INTERVAL
value: ${OSHINKO_REFRESH_INTERVAL}
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8080
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8080
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
serviceAccount: oshinko
- kind: Route
apiVersion: v1
metadata:
name: ${OSHINKO_WEB_NAME}
spec:
host: ${OSHINKO_WEB_ROUTE_HOSTNAME}
to:
kind: Service
name: ${OSHINKO_WEB_NAME}
parameters:
- name: OSHINKO_CLUSTER_IMAGE
description: Full name of the spark image to use when creating clusters
required: false
- name: OSHINKO_WEB_NAME
description: Name of the oshinko web service
value: "oshinko-web"
- name: OSHINKO_WEB_IMAGE
description: Full name of the oshinko web image
required: true
value: radanalyticsio/oshinko-webui:{{ tag }}
- name: OSHINKO_WEB_ROUTE_HOSTNAME
description: The hostname used to create the external route for the webui
- name: OSHINKO_DEPLOYMENT_NAME
description: Name of the oshinko deployment
value: "oshinko"
- name: OSHINKO_REFRESH_INTERVAL
value: "5"
description: Refresh interval for updating cluster list in seconds"""
| 0.54819 | 0.271783 |
from unittest import TestCase
from unittest.mock import MagicMock, patch, Mock
from samcli.lib.sync.sync_flow_factory import SyncFlowFactory
from samcli.lib.utils.cloudformation import CloudFormationResourceSummary
class TestSyncFlowFactory(TestCase):
def create_factory(self, auto_dependency_layer: bool = False):
stack_resource = MagicMock()
stack_resource.resources = {
"Resource1": {
"Type": "TypeA",
"Properties": {"Body1"},
},
"Resource2": {
"Type": "TypeB",
"Properties": {"Body2"},
"Metadata": {
"SamResourceId": "CDKResource2",
},
},
}
factory = SyncFlowFactory(
build_context=MagicMock(),
deploy_context=MagicMock(),
stacks=[stack_resource, MagicMock()],
auto_dependency_layer=auto_dependency_layer,
)
return factory
@patch("samcli.lib.sync.sync_flow_factory.get_resource_summaries")
@patch("samcli.lib.sync.sync_flow_factory.get_boto_resource_provider_with_config")
@patch("samcli.lib.sync.sync_flow_factory.get_boto_client_provider_with_config")
def test_load_physical_id_mapping(
self, get_boto_client_provider_mock, get_boto_resource_provider_mock, get_resource_summaries_mock
):
resource_summary_1 = CloudFormationResourceSummary(
resource_type="", logical_resource_id="", physical_resource_id="PhysicalResource1"
)
resource_summary_2 = CloudFormationResourceSummary(
resource_type="", logical_resource_id="", physical_resource_id="PhysicalResource2"
)
# get_resource_summaries_mock.return_value = {"Resource1": "PhysicalResource1", "Resource2": "PhysicalResource2"}
get_resource_summaries_mock.return_value = {"Resource1": resource_summary_1, "Resource2": resource_summary_2}
factory = self.create_factory()
factory.load_physical_id_mapping()
self.assertEqual(len(factory._physical_id_mapping), 2)
self.assertEqual(
factory._physical_id_mapping,
{"Resource1": "PhysicalResource1", "Resource2": "PhysicalResource2"},
)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_zip(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Zip"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, zip_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.AutoDependencyLayerParentSyncFlow")
def test_create_lambda_flow_zip_with_auto_dependency_layer(
self, auto_dependency_layer_mock, zip_function_mock, image_function_mock
):
factory = self.create_factory(True)
resource = {"Properties": {"PackageType": "Zip", "Runtime": "python3.8"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, auto_dependency_layer_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.AutoDependencyLayerParentSyncFlow")
def test_create_lambda_flow_zip_with_unsupported_runtime_auto_dependency_layer(
self, auto_dependency_layer_mock, zip_function_mock, image_function_mock
):
factory = self.create_factory(True)
resource = {"Properties": {"PackageType": "Zip", "Runtime": "ruby2.7"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, zip_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_image(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Image"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, image_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.LayerSyncFlow")
def test_create_layer_flow(self, layer_sync_mock):
factory = self.create_factory()
result = factory._create_layer_flow("Layer1", {})
self.assertEqual(result, layer_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_other(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Other"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, None)
@patch("samcli.lib.sync.sync_flow_factory.RestApiSyncFlow")
def test_create_rest_api_flow(self, rest_api_sync_mock):
factory = self.create_factory()
result = factory._create_rest_api_flow("API1", {})
self.assertEqual(result, rest_api_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.HttpApiSyncFlow")
def test_create_api_flow(self, http_api_sync_mock):
factory = self.create_factory()
result = factory._create_api_flow("API1", {})
self.assertEqual(result, http_api_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.StepFunctionsSyncFlow")
def test_create_stepfunctions_flow(self, stepfunctions_sync_mock):
factory = self.create_factory()
result = factory._create_stepfunctions_flow("StateMachine1", {})
self.assertEqual(result, stepfunctions_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_sync_flow(self, get_resource_by_id_mock):
factory = self.create_factory()
sync_flow = MagicMock()
resource_identifier = MagicMock()
get_resource_by_id = MagicMock()
get_resource_by_id_mock.return_value = get_resource_by_id
generator_mock = MagicMock()
generator_mock.return_value = sync_flow
get_generator_function_mock = MagicMock()
get_generator_function_mock.return_value = generator_mock
factory._get_generator_function = get_generator_function_mock
result = factory.create_sync_flow(resource_identifier)
self.assertEqual(result, sync_flow)
generator_mock.assert_called_once_with(factory, resource_identifier, get_resource_by_id)
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_unknown_resource_sync_flow(self, get_resource_by_id_mock):
get_resource_by_id_mock.return_value = None
factory = self.create_factory()
self.assertIsNone(factory.create_sync_flow(MagicMock()))
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_none_generator_sync_flow(self, get_resource_by_id_mock):
factory = self.create_factory()
resource_identifier = MagicMock()
get_resource_by_id = MagicMock()
get_resource_by_id_mock.return_value = get_resource_by_id
get_generator_function_mock = MagicMock()
get_generator_function_mock.return_value = None
factory._get_generator_function = get_generator_function_mock
self.assertIsNone(factory.create_sync_flow(resource_identifier))
|
tests/unit/lib/sync/test_sync_flow_factory.py
|
from unittest import TestCase
from unittest.mock import MagicMock, patch, Mock
from samcli.lib.sync.sync_flow_factory import SyncFlowFactory
from samcli.lib.utils.cloudformation import CloudFormationResourceSummary
class TestSyncFlowFactory(TestCase):
def create_factory(self, auto_dependency_layer: bool = False):
stack_resource = MagicMock()
stack_resource.resources = {
"Resource1": {
"Type": "TypeA",
"Properties": {"Body1"},
},
"Resource2": {
"Type": "TypeB",
"Properties": {"Body2"},
"Metadata": {
"SamResourceId": "CDKResource2",
},
},
}
factory = SyncFlowFactory(
build_context=MagicMock(),
deploy_context=MagicMock(),
stacks=[stack_resource, MagicMock()],
auto_dependency_layer=auto_dependency_layer,
)
return factory
@patch("samcli.lib.sync.sync_flow_factory.get_resource_summaries")
@patch("samcli.lib.sync.sync_flow_factory.get_boto_resource_provider_with_config")
@patch("samcli.lib.sync.sync_flow_factory.get_boto_client_provider_with_config")
def test_load_physical_id_mapping(
self, get_boto_client_provider_mock, get_boto_resource_provider_mock, get_resource_summaries_mock
):
resource_summary_1 = CloudFormationResourceSummary(
resource_type="", logical_resource_id="", physical_resource_id="PhysicalResource1"
)
resource_summary_2 = CloudFormationResourceSummary(
resource_type="", logical_resource_id="", physical_resource_id="PhysicalResource2"
)
# get_resource_summaries_mock.return_value = {"Resource1": "PhysicalResource1", "Resource2": "PhysicalResource2"}
get_resource_summaries_mock.return_value = {"Resource1": resource_summary_1, "Resource2": resource_summary_2}
factory = self.create_factory()
factory.load_physical_id_mapping()
self.assertEqual(len(factory._physical_id_mapping), 2)
self.assertEqual(
factory._physical_id_mapping,
{"Resource1": "PhysicalResource1", "Resource2": "PhysicalResource2"},
)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_zip(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Zip"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, zip_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.AutoDependencyLayerParentSyncFlow")
def test_create_lambda_flow_zip_with_auto_dependency_layer(
self, auto_dependency_layer_mock, zip_function_mock, image_function_mock
):
factory = self.create_factory(True)
resource = {"Properties": {"PackageType": "Zip", "Runtime": "python3.8"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, auto_dependency_layer_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.AutoDependencyLayerParentSyncFlow")
def test_create_lambda_flow_zip_with_unsupported_runtime_auto_dependency_layer(
self, auto_dependency_layer_mock, zip_function_mock, image_function_mock
):
factory = self.create_factory(True)
resource = {"Properties": {"PackageType": "Zip", "Runtime": "ruby2.7"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, zip_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_image(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Image"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, image_function_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.LayerSyncFlow")
def test_create_layer_flow(self, layer_sync_mock):
factory = self.create_factory()
result = factory._create_layer_flow("Layer1", {})
self.assertEqual(result, layer_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.ImageFunctionSyncFlow")
@patch("samcli.lib.sync.sync_flow_factory.ZipFunctionSyncFlow")
def test_create_lambda_flow_other(self, zip_function_mock, image_function_mock):
factory = self.create_factory()
resource = {"Properties": {"PackageType": "Other"}}
result = factory._create_lambda_flow("Function1", resource)
self.assertEqual(result, None)
@patch("samcli.lib.sync.sync_flow_factory.RestApiSyncFlow")
def test_create_rest_api_flow(self, rest_api_sync_mock):
factory = self.create_factory()
result = factory._create_rest_api_flow("API1", {})
self.assertEqual(result, rest_api_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.HttpApiSyncFlow")
def test_create_api_flow(self, http_api_sync_mock):
factory = self.create_factory()
result = factory._create_api_flow("API1", {})
self.assertEqual(result, http_api_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.StepFunctionsSyncFlow")
def test_create_stepfunctions_flow(self, stepfunctions_sync_mock):
factory = self.create_factory()
result = factory._create_stepfunctions_flow("StateMachine1", {})
self.assertEqual(result, stepfunctions_sync_mock.return_value)
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_sync_flow(self, get_resource_by_id_mock):
factory = self.create_factory()
sync_flow = MagicMock()
resource_identifier = MagicMock()
get_resource_by_id = MagicMock()
get_resource_by_id_mock.return_value = get_resource_by_id
generator_mock = MagicMock()
generator_mock.return_value = sync_flow
get_generator_function_mock = MagicMock()
get_generator_function_mock.return_value = generator_mock
factory._get_generator_function = get_generator_function_mock
result = factory.create_sync_flow(resource_identifier)
self.assertEqual(result, sync_flow)
generator_mock.assert_called_once_with(factory, resource_identifier, get_resource_by_id)
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_unknown_resource_sync_flow(self, get_resource_by_id_mock):
get_resource_by_id_mock.return_value = None
factory = self.create_factory()
self.assertIsNone(factory.create_sync_flow(MagicMock()))
@patch("samcli.lib.sync.sync_flow_factory.get_resource_by_id")
def test_create_none_generator_sync_flow(self, get_resource_by_id_mock):
factory = self.create_factory()
resource_identifier = MagicMock()
get_resource_by_id = MagicMock()
get_resource_by_id_mock.return_value = get_resource_by_id
get_generator_function_mock = MagicMock()
get_generator_function_mock.return_value = None
factory._get_generator_function = get_generator_function_mock
self.assertIsNone(factory.create_sync_flow(resource_identifier))
| 0.693992 | 0.308737 |
from __future__ import print_function
import itertools
from PhysicsTools.Heppy.analyzers.core.VertexHistograms import VertexHistograms
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.HeppyCore.statistics.average import Average
from PhysicsTools.Heppy.physicsutils.PileUpSummaryInfo import PileUpSummaryInfo
import PhysicsTools.HeppyCore.framework.config as cfg
class VertexAnalyzer( Analyzer ):
"""selects a list of good primary vertices,
and optionally add a pile-up weight to MC events.
The list of good primary vertices is put in event.goodVertices.
if no good vertex is found, the process function returns False.
The weight is put in event.vertexWeight, and is multiplied to
the global event weight, event.eventWeight.
Example:
vertexAna = cfg.Analyzer(
'VertexAnalyzer',
goodVertices = 'goodPVFilter',
vertexWeight = 'vertexWeightFall112011AB',
# uncomment the following line if you want a vertex weight = 1 (no weighting)
# fixedWeight = 1,
verbose = False
)
If fixedWeight is set to None, the vertex weight is read from the EDM collection with module name
'vertexWeightFall112011AB'.
Otherwise, the weight is set to fixedWeight.
The vertex weight collection was at some point produced in the PAT+CMG step,
and could directly be accessed from the PAT or CMG tuple.
In the most recent versions of the PAT+CMG tuple, this collection is not present anymore,
and an additional full framework process must be ran to produce this collection,
so that this analyzer can read it. An example cfg to do that can be found here:
http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/UserCode/CMG/CMGTools/H2TauTau/prod/vertexWeight2011_cfg.py?view=markup
"""
def __init__(self, cfg_ana, cfg_comp, looperName):
super(VertexAnalyzer, self).__init__(cfg_ana, cfg_comp, looperName)
self.doHists=True
if (hasattr(self.cfg_ana,'makeHists')) and (not self.cfg_ana.makeHists):
self.doHists=False
if self.doHists:
self.pileup = VertexHistograms('/'.join([self.dirName,
'pileup.root']))
self.allVertices = self.cfg_ana.allVertices if (hasattr(self.cfg_ana,'allVertices')) else "_AUTO_"
def declareHandles(self):
super(VertexAnalyzer, self).declareHandles()
if self.allVertices == '_AUTO_':
self.handles['vertices'] = AutoHandle( "offlineSlimmedPrimaryVertices", 'std::vector<reco::Vertex>', fallbackLabel="offlinePrimaryVertices" )
else:
self.handles['vertices'] = AutoHandle( self.allVertices, 'std::vector<reco::Vertex>' )
self.fixedWeight = None
if self.cfg_comp.isMC:
if hasattr( self.cfg_ana, 'fixedWeight'):
self.fixedWeight = self.cfg_ana.fixedWeight
else:
self.mchandles['vertexWeight'] = AutoHandle( self.cfg_ana.vertexWeight,
'double' )
self.mchandles['pusi'] = AutoHandle(
'slimmedAddPileupInfo',
'std::vector<PileupSummaryInfo>',
fallbackLabel='addPileupInfo',
)
self.handles['rho'] = AutoHandle(
('fixedGridRhoFastjetAll',''),
'double'
)
self.handles['rhoCN'] = AutoHandle(
('fixedGridRhoFastjetCentralNeutral',''),
'double'
)
self.handles['sigma'] = AutoHandle(
('fixedGridSigmaFastjetAll',''),
'double',
mayFail=True
)
def beginLoop(self, setup):
super(VertexAnalyzer,self).beginLoop(setup)
self.averages.add('vertexWeight', Average('vertexWeight') )
self.counters.addCounter('GoodVertex')
self.count = self.counters.counter('GoodVertex')
self.count.register('All Events')
self.count.register('Events With Good Vertex')
def process(self, event):
self.readCollections(event.input )
event.rho = self.handles['rho'].product()[0]
event.rhoCN = self.handles['rhoCN'].product()[0]
event.sigma = self.handles['sigma'].product()[0] if self.handles['sigma'].isValid() else -999
event.vertices = self.handles['vertices'].product()
event.goodVertices = list(filter(self.testGoodVertex,event.vertices))
self.count.inc('All Events')
event.vertexWeight = 1
if self.cfg_comp.isMC:
event.pileUpInfo = map( PileUpSummaryInfo,
self.mchandles['pusi'].product() )
if self.fixedWeight is None:
event.vertexWeight = self.mchandles['vertexWeight'].product()[0]
else:
event.vertexWeight = self.fixedWeight
event.eventWeight *= event.vertexWeight
self.averages['vertexWeight'].add( event.vertexWeight )
if self.verbose:
print('VertexAnalyzer: #vert = ', len(event.vertices), \
', weight = ', event.vertexWeight)
# Check if events needs to be skipped if no good vertex is found (useful for generator level studies)
keepFailingEvents = False
if hasattr( self.cfg_ana, 'keepFailingEvents'):
keepFailingEvents = self.cfg_ana.keepFailingEvents
if len(event.goodVertices)==0:
event.passedVertexAnalyzer=False
if not keepFailingEvents:
return False
else:
event.passedVertexAnalyzer=True
if self.doHists:
self.pileup.hist.Fill( len(event.goodVertices) )
#A.R. mindist is one of the slowest functions, default commented
# self.pileup.mindist.Fill( self.mindist(event.goodVertices) )
self.count.inc('Events With Good Vertex')
return True
def testGoodVertex(self,vertex):
if vertex.isFake():
return False
if vertex.ndof()<=4:
return False
if abs(vertex.z())>24:
return False
if vertex.position().Rho()>2:
return False
return True
def mindist(self, vertices):
mindist = 999999
for comb in itertools.combinations(vertices, 2):
dist = abs(comb[0].z() - comb[1].z())
if dist<mindist:
mindist = dist
return mindist
def write(self, setup):
super(VertexAnalyzer, self).write(setup)
if self.doHists:
self.pileup.write()
setattr(VertexAnalyzer,"defaultConfig",cfg.Analyzer(
class_object=VertexAnalyzer,
vertexWeight = None,
fixedWeight = 1,
verbose = False
)
)
|
PhysicsTools/Heppy/python/analyzers/objects/VertexAnalyzer.py
|
from __future__ import print_function
import itertools
from PhysicsTools.Heppy.analyzers.core.VertexHistograms import VertexHistograms
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.HeppyCore.statistics.average import Average
from PhysicsTools.Heppy.physicsutils.PileUpSummaryInfo import PileUpSummaryInfo
import PhysicsTools.HeppyCore.framework.config as cfg
class VertexAnalyzer( Analyzer ):
"""selects a list of good primary vertices,
and optionally add a pile-up weight to MC events.
The list of good primary vertices is put in event.goodVertices.
if no good vertex is found, the process function returns False.
The weight is put in event.vertexWeight, and is multiplied to
the global event weight, event.eventWeight.
Example:
vertexAna = cfg.Analyzer(
'VertexAnalyzer',
goodVertices = 'goodPVFilter',
vertexWeight = 'vertexWeightFall112011AB',
# uncomment the following line if you want a vertex weight = 1 (no weighting)
# fixedWeight = 1,
verbose = False
)
If fixedWeight is set to None, the vertex weight is read from the EDM collection with module name
'vertexWeightFall112011AB'.
Otherwise, the weight is set to fixedWeight.
The vertex weight collection was at some point produced in the PAT+CMG step,
and could directly be accessed from the PAT or CMG tuple.
In the most recent versions of the PAT+CMG tuple, this collection is not present anymore,
and an additional full framework process must be ran to produce this collection,
so that this analyzer can read it. An example cfg to do that can be found here:
http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/UserCode/CMG/CMGTools/H2TauTau/prod/vertexWeight2011_cfg.py?view=markup
"""
def __init__(self, cfg_ana, cfg_comp, looperName):
super(VertexAnalyzer, self).__init__(cfg_ana, cfg_comp, looperName)
self.doHists=True
if (hasattr(self.cfg_ana,'makeHists')) and (not self.cfg_ana.makeHists):
self.doHists=False
if self.doHists:
self.pileup = VertexHistograms('/'.join([self.dirName,
'pileup.root']))
self.allVertices = self.cfg_ana.allVertices if (hasattr(self.cfg_ana,'allVertices')) else "_AUTO_"
def declareHandles(self):
super(VertexAnalyzer, self).declareHandles()
if self.allVertices == '_AUTO_':
self.handles['vertices'] = AutoHandle( "offlineSlimmedPrimaryVertices", 'std::vector<reco::Vertex>', fallbackLabel="offlinePrimaryVertices" )
else:
self.handles['vertices'] = AutoHandle( self.allVertices, 'std::vector<reco::Vertex>' )
self.fixedWeight = None
if self.cfg_comp.isMC:
if hasattr( self.cfg_ana, 'fixedWeight'):
self.fixedWeight = self.cfg_ana.fixedWeight
else:
self.mchandles['vertexWeight'] = AutoHandle( self.cfg_ana.vertexWeight,
'double' )
self.mchandles['pusi'] = AutoHandle(
'slimmedAddPileupInfo',
'std::vector<PileupSummaryInfo>',
fallbackLabel='addPileupInfo',
)
self.handles['rho'] = AutoHandle(
('fixedGridRhoFastjetAll',''),
'double'
)
self.handles['rhoCN'] = AutoHandle(
('fixedGridRhoFastjetCentralNeutral',''),
'double'
)
self.handles['sigma'] = AutoHandle(
('fixedGridSigmaFastjetAll',''),
'double',
mayFail=True
)
def beginLoop(self, setup):
super(VertexAnalyzer,self).beginLoop(setup)
self.averages.add('vertexWeight', Average('vertexWeight') )
self.counters.addCounter('GoodVertex')
self.count = self.counters.counter('GoodVertex')
self.count.register('All Events')
self.count.register('Events With Good Vertex')
def process(self, event):
self.readCollections(event.input )
event.rho = self.handles['rho'].product()[0]
event.rhoCN = self.handles['rhoCN'].product()[0]
event.sigma = self.handles['sigma'].product()[0] if self.handles['sigma'].isValid() else -999
event.vertices = self.handles['vertices'].product()
event.goodVertices = list(filter(self.testGoodVertex,event.vertices))
self.count.inc('All Events')
event.vertexWeight = 1
if self.cfg_comp.isMC:
event.pileUpInfo = map( PileUpSummaryInfo,
self.mchandles['pusi'].product() )
if self.fixedWeight is None:
event.vertexWeight = self.mchandles['vertexWeight'].product()[0]
else:
event.vertexWeight = self.fixedWeight
event.eventWeight *= event.vertexWeight
self.averages['vertexWeight'].add( event.vertexWeight )
if self.verbose:
print('VertexAnalyzer: #vert = ', len(event.vertices), \
', weight = ', event.vertexWeight)
# Check if events needs to be skipped if no good vertex is found (useful for generator level studies)
keepFailingEvents = False
if hasattr( self.cfg_ana, 'keepFailingEvents'):
keepFailingEvents = self.cfg_ana.keepFailingEvents
if len(event.goodVertices)==0:
event.passedVertexAnalyzer=False
if not keepFailingEvents:
return False
else:
event.passedVertexAnalyzer=True
if self.doHists:
self.pileup.hist.Fill( len(event.goodVertices) )
#A.R. mindist is one of the slowest functions, default commented
# self.pileup.mindist.Fill( self.mindist(event.goodVertices) )
self.count.inc('Events With Good Vertex')
return True
def testGoodVertex(self,vertex):
if vertex.isFake():
return False
if vertex.ndof()<=4:
return False
if abs(vertex.z())>24:
return False
if vertex.position().Rho()>2:
return False
return True
def mindist(self, vertices):
mindist = 999999
for comb in itertools.combinations(vertices, 2):
dist = abs(comb[0].z() - comb[1].z())
if dist<mindist:
mindist = dist
return mindist
def write(self, setup):
super(VertexAnalyzer, self).write(setup)
if self.doHists:
self.pileup.write()
setattr(VertexAnalyzer,"defaultConfig",cfg.Analyzer(
class_object=VertexAnalyzer,
vertexWeight = None,
fixedWeight = 1,
verbose = False
)
)
| 0.658198 | 0.219923 |
# <codecell>
from pandas import read_csv
import os, os.path
import csv
import matplotlib.pyplot as plt
import re
import dateutil.parser
os.chdir('/home/will/HIVReportGen/')
# <codecell>
def extract_YOB(inp):
try:
return float(inp.split('-')[0])
except AttributeError:
return float(inp)
except ValueError:
#print('Bad YOB', inp)
return np.nan
def safe_float(m, default = np.nan):
try:
return float(m)
except:
return default
def feet2meters(height):
if (height == 'ND') or (height != height):
return np.nan
try:
res = re.findall('(\d).\s{0,1}(\d{0,2})\D?', height)
except TypeError:
#print(height)
raise TypeError
try:
ft = float(res[0][0])
inc = safe_float(res[0][1], default = 0.0)
except IndexError:
#print(height,res)
raise IndexError
except ValueError:
#print(height, res)
raise ValueError
tot_inches = ft*12+inc
meters = tot_inches*0.0254
if meters > 2:
print(meters, height, res)
return meters
def checkbox_conv(inp):
if inp != inp:
return np.nan
valdict = {
'checked':True,
'test positive':True,
'positive':True,
'yes':True,
'unchecked':False,
'test negative':False,
'negative':False,
'no':True}
return valdict.get(inp.lower(), np.nan)
def verbose_parser(inp):
try:
return dateutil.parser.parse(inp)
except:
return np.nan
def fix_col_name(name):
if "(choice='" in name:
return name.split("(choice='",1)[1][:-2]
else:
return name
# <codecell>
from datetime import date, datetime
from pandas import merge
from copy import deepcopy
class PatData(object):
def __init__(self, redcap_file, config_file):
if redcap_file is None:
return
self.config_data = read_csv(config_file, sep = '\t')
self._generate_converter_dict()
self._generate_agg_dict()
with open(redcap_file) as handle:
handle.read(1)
self.redcap = read_csv(handle, converters=self.conv_dict)
self.clip_dates()
self.visit_redcap = None
self.pat_redcap = None
def CopyFromOtherData(self, OtherData):
self.config_data = OtherData.config_data.copy()
self.redcap = OtherData.redcap.copy()
self.conv_dict = deepcopy(OtherData.conv_dict)
self.date_clip_cols = deepcopy(OtherData.date_clip_cols)
self.visit_agg = deepcopy(OtherData.visit_agg)
self.pat_agg = deepcopy(OtherData.pat_agg)
if OtherData.pat_redcap is not None:
self.pat_redcap = OtherData.pat_redcap.copy()
if OtherData.visit_redcap is not None:
self.visit_redcap = OtherData.visit_redcap.copy()
if OtherData.all_group is not None:
self.all_group = OtherData.all_group.copy()
def _generate_converter_dict(self):
cdict = {
'DateParser':verbose_parser,
'extract_YOB':extract_YOB,
'checkbox_conv':checkbox_conv,
'safe_float':safe_float
}
conv_dict = {}
date_clip_cols = set()
for colname, convfun in zip(self.config_data['RawName'].values, self.config_data['ConvertFun'].values):
cfun = cdict.get(convfun, None)
if cfun:
conv_dict[colname] = cfun
if convfun == 'DateParser':
date_clip_cols.add(colname)
self.conv_dict = conv_dict
self.date_clip_cols = date_clip_cols
def _generate_agg_dict(self):
self.visit_agg = {}
self.pat_agg = {}
for colname, aggfun in zip(self.config_data['RawName'].values, self.config_data['AggFunc'].values):
if aggfun == aggfun:
self.visit_agg[colname] = aggfun
self.pat_agg[colname] = aggfun
def clip_dates(self):
maxday = datetime.today()
minday = datetime(1900,1,1)
for col in self.date_clip_cols:
self.redcap[col] = self.redcap[col].clip(lower = minday, upper = maxday)
def fix_visits(self):
def fix_v(vis):
if vis == 'first':
return 0.0
try:
return float(vis[1:])
except:
return None
self.redcap['VisitNum'] = self.redcap['Patient visit number'].apply(fix_v)
def CalcAge(self):
visit_years = self.redcap['Date of visit'].dropna().apply(lambda x:x.year)
birth_years = self.redcap['Year of Birth'].dropna()
self.redcap['CalcAge'] = visit_years-birth_years
def CalcGender(self):
self.redcap['GenotypicMale'] = (self.redcap['Gender'] == 'Male') | (self.redcap['Transgender designation'] == 'male to female')
self.redcap['IdentifiesMale'] = (self.redcap['Gender'] == 'Male') & (self.redcap['Transgender designation'] != 'male to female')
self.redcap['GenotypicFemale'] = (self.redcap['Gender'] == 'Female') | (self.redcap['Transgender designation'] == 'female to male')
self.redcap['IdentifiesFemale'] = (self.redcap['Gender'] == 'Female') & (self.redcap['Transgender designation'] != 'female to male')
def CalcBMI(self):
self.redcap['Weight-kg'] = self.redcap['Weight'].apply(safe_float)/2.2
self.redcap['Height-m'] = self.redcap['Height'].apply(feet2meters)
self.redcap['BMI'] = self.redcap['Weight-kg']/(self.redcap['Height-m']*self.redcap['Height-m'])
def CalcYearsSero(self):
visit_years = self.redcap['Date of visit'].dropna().apply(lambda x:x.year)
seropos_years = self.redcap['HIV seropositive date'].dropna().apply(lambda x:x.year)
self.redcap['Calc-Years-Seropositive'] = visit_years - seropos_years
def CalcExposure(self):
merge_cols = {'Exposure-MSM': ("Exposure Category (choice='Men who have sex with men (MSM)')",
"Exposure Category (choice='MSM and IDU')"),
'Exposure-IDU': ("Exposure Category (choice='Injection drug use (IDU)')",
"Exposure Category (choice='MSM and IDU')",
"Exposure Category (choice='Heterosexual and IDU')"),
'Exposure-Heterosexual': ("Exposure Category (choice='Heterosexual and IDU')",
"Exposure Category (choice='Heterosexual')"),
'Exposure-Hemophilia':("Exposure Category (choice='Hemophilia')",),
'Exposure-Transfusion':("Exposure Category (choice='Blood transfusion')",),
'Exposure-Perinatal':("Exposure Category (choice='Perinatal')",)
}
for merged_col, check_cols in merge_cols.items():
self.redcap[merged_col] = False
for col in check_cols:
self.redcap[merged_col] |= self.redcap[col]
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['PosGroup', 'NegGroup']))
def CalcAll(self):
self.AddGroupNames()
self.fix_visits()
self.CalcAge()
self.CalcYearsSero()
self.CalcGender()
self.CalcBMI()
self.CalcExposure()
def ProcessVisits(self, visit_recap):
"""A method to subclass. Must return a DataFrame of the wanted visits."""
return visit_recap
def ProcessPatients(self, pat_redcap):
"""A method to subclass. Must return a DataFrame of the wanted patients."""
return pat_redcap
def ProcessRedcap(self):
gkey = ['Patient ID', 'Patient visit number']
visit_redcap = self.redcap.groupby(gkey).agg(self.visit_agg)
self.visit_redcap = self.ProcessVisits(visit_redcap)
gkey = 'Patient ID'
pat_redcap = self.visit_redcap.groupby(level=gkey).agg(self.pat_agg)
self.pat_redcap = self.ProcessPatients(pat_redcap)
def MakePatientGroups(self):
self.SplitGroups(self.pat_redcap, self.pat_agg)
def MakeVisitGroups(self):
aligned_data, _ = self.visit_redcap.align(self.pat_redcap,
level = 'Patient ID',
join = 'inner')
self.SplitGroups(aligned_data, self.visit_agg)
def AssignGroups(self, aligned_data):
raise NotImplementedError
def SplitGroups(self, aligned_data, agg_dict):
cur_levels = aligned_data.index.names
aligned_data = aligned_data.reset_index()
aligned_data['Grouping'] = self.AssignGroups(aligned_data)
self.all_group = aligned_data.groupby(['Grouping'] + cur_levels).agg(agg_dict)
def make_demo_figures(self):
self.AgeHist()
for key, group in self.config_data.groupby('PlotName'):
if ((group['PlotType'] == 'BarChart').all()) & ((group['DemographicFunction'] == 'ChoiceCount').all()):
try:
self.plot_bar_chart(group['RawName'].values, key)
except:
print('bad on ', key)
elif ((group['PlotType'] == 'BoxPlot').all()) & ((group['DemographicFunction'] == 'MeanFunc').all()):
self.make_box_plot(group['RawName'].values, key)
elif ((group['PlotType'] == 'LogBoxPlot').all()) & ((group['DemographicFunction'] == 'MeanFunc').all()):
self.make_log_box_plot(group['RawName'].values, key)
elif ((group['PlotType'] == 'BarChart').all()) & ((group['DemographicFunction'] == 'IntegerCount').all()):
print(key, group['RawName'].values)
self.make_integer_bar(group['RawName'].values[0], key)
def AgeHist(self):
bins = [20,30,40,50,60,70,80]
fig = plt.figure()
g1data = Series(np.histogram(self.all_group.ix[True]['CalcAge'].values, bins = bins)[0], index = bins[:-1])
g2data = Series(np.histogram(self.all_group.ix[False]['CalcAge'].values, bins = bins)[0], index = bins[:-1])
df = DataFrame({self.groupnames[True]:g1data,
self.groupnames[False]:g2data})
df.plot(kind = 'bar', grid = True)
plt.xlabel('Age at Visit')
plt.ylabel('#')
return fig, self.all_group['CalcAge']
def plot_bar_chart(self, items, title):
g1sum = self.all_group.ix[True][items].mean()*100
g2sum = self.all_group.ix[False][items].mean()*100
allsum = self.all_group[items].mean()*100
df = DataFrame({self.groupnames[True]:g1sum,
self.groupnames[False]:g2sum,
'All':allsum})
ncols = dict([(col, fix_col_name(col)) for col in df.index])
df = df.rename(index=ncols)
fig = plt.figure()
df.plot(kind = 'bar', ax = plt.gca(), grid = True)
plt.title(title)
plt.ylabel('%')
return fig, self.all_group[items]
def make_box_plot(self, items, title):
g1items = self.all_group.ix[True][items].reset_index()
g2items = self.all_group.ix[False][items].reset_index()
allitems = self.all_group[items].reset_index()
pltdata = [(allitems, 'All'),
(g1items, self.groupnames[True]),
(g2items, self.groupnames[False])]
odict = {}
for item, (data, name) in product(items, pltdata):
odict[item + '--' + name] = data[item]
fig = plt.figure()
df = DataFrame(odict)
df.boxplot(rot = 90, ax = plt.gca())
plt.title(title)
plt.ylabel('Value')
return fig, self.all_group[items]
def make_log_box_plot(self, items, title):
g1items = self.all_group.ix[True][items].reset_index()
g2items = self.all_group.ix[False][items].reset_index()
allitems = self.all_group[items].reset_index()
pltdata = [(allitems, 'All'),
(g1items, self.groupnames[True]),
(g2items, self.groupnames[False])]
odict = {}
for item, (data, name) in product(items, pltdata):
odict[item + '--' + name] = data[item]
fig = plt.figure()
df = DataFrame(odict)
df.apply(np.log10).boxplot(rot = 90, ax = plt.gca())
plt.title(title)
plt.ylabel('log10(Value)')
return fig, self.all_group[items]
def make_integer_bar(self, col, title):
if len(self.all_group[col].unique()) < 2:
return None, None
bins = np.arange(0, self.all_group[col].max()+1)
g1data = Series(np.histogram(self.all_group.ix[True][col].values, bins = bins)[0], index = bins[:-1])/len(self.all_group.ix[True])
g2data = Series(np.histogram(self.all_group.ix[False][col].values, bins = bins)[0], index = bins[:-1])/len(self.all_group.ix[False])
alldata = Series(np.histogram(self.all_group[col].values, bins = bins)[0], index = bins[:-1])//len(self.all_group)
ndf = DataFrame({'All': alldata*100,
self.groupnames[True]: g1data*100,
self.groupnames[False]: g2data*100})
fig = plt.figure()
ndf.plot(kind = 'bar', ax = plt.gca(), grid = True)
plt.title(title)
return fig, self.all_group[col]
# <codecell>
class GenderPatData(PatData):
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['Male', 'Female']))
def AssignGroups(self, visit_redcap):
return visit_redcap['IdentifiesMale']
# <codecell>
config_file = 'Data/Config/ReportFile.csv'
demo_file = 'Data/RedcapDumps/HIVAIDSGeneticAnalys_DATA_LABELS_2012-12-11_1720.csv'
tmp = GenderPatData(demo_file, config_file)
tmp.CalcAll()
# <codecell>
tmp.ProcessRedcap()
tmp.MakePatientGroups()
# <codecell>
tmp.make_demo_figures()
# <codecell>
class NeuroPatData(PatData):
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['No Neuro', 'With Neuro']))
def AssignGroups(self, aligned_data):
return aligned_data["Mental Health Issues (choice='No neurological problems')"]
ntmp = NeuroPatData(None, None)
ntmp.CopyFromOtherData(tmp)
ntmp.AddGroupNames()
ntmp.MakePatientGroups()
ntmp.make_demo_figures()
# <codecell>
from itertools import product
items = ['Neurocognitive test',
'MSK Score',
'Psychomotor Speed Score',
'Memory Recall Score',
'Constructional Score',
'Total Modified Hopkins Dementia Score',
]
col = items[-1]
# <codecell>
def safe_float(m):
try:
return float(m)
except:
return None
demo_data['Weight'].apply(safe_float).hist()
# <codecell>
tmp.redcap
# <codecell>
|
DemoGraphs.py
|
# <codecell>
from pandas import read_csv
import os, os.path
import csv
import matplotlib.pyplot as plt
import re
import dateutil.parser
os.chdir('/home/will/HIVReportGen/')
# <codecell>
def extract_YOB(inp):
try:
return float(inp.split('-')[0])
except AttributeError:
return float(inp)
except ValueError:
#print('Bad YOB', inp)
return np.nan
def safe_float(m, default = np.nan):
try:
return float(m)
except:
return default
def feet2meters(height):
if (height == 'ND') or (height != height):
return np.nan
try:
res = re.findall('(\d).\s{0,1}(\d{0,2})\D?', height)
except TypeError:
#print(height)
raise TypeError
try:
ft = float(res[0][0])
inc = safe_float(res[0][1], default = 0.0)
except IndexError:
#print(height,res)
raise IndexError
except ValueError:
#print(height, res)
raise ValueError
tot_inches = ft*12+inc
meters = tot_inches*0.0254
if meters > 2:
print(meters, height, res)
return meters
def checkbox_conv(inp):
if inp != inp:
return np.nan
valdict = {
'checked':True,
'test positive':True,
'positive':True,
'yes':True,
'unchecked':False,
'test negative':False,
'negative':False,
'no':True}
return valdict.get(inp.lower(), np.nan)
def verbose_parser(inp):
try:
return dateutil.parser.parse(inp)
except:
return np.nan
def fix_col_name(name):
if "(choice='" in name:
return name.split("(choice='",1)[1][:-2]
else:
return name
# <codecell>
from datetime import date, datetime
from pandas import merge
from copy import deepcopy
class PatData(object):
def __init__(self, redcap_file, config_file):
if redcap_file is None:
return
self.config_data = read_csv(config_file, sep = '\t')
self._generate_converter_dict()
self._generate_agg_dict()
with open(redcap_file) as handle:
handle.read(1)
self.redcap = read_csv(handle, converters=self.conv_dict)
self.clip_dates()
self.visit_redcap = None
self.pat_redcap = None
def CopyFromOtherData(self, OtherData):
self.config_data = OtherData.config_data.copy()
self.redcap = OtherData.redcap.copy()
self.conv_dict = deepcopy(OtherData.conv_dict)
self.date_clip_cols = deepcopy(OtherData.date_clip_cols)
self.visit_agg = deepcopy(OtherData.visit_agg)
self.pat_agg = deepcopy(OtherData.pat_agg)
if OtherData.pat_redcap is not None:
self.pat_redcap = OtherData.pat_redcap.copy()
if OtherData.visit_redcap is not None:
self.visit_redcap = OtherData.visit_redcap.copy()
if OtherData.all_group is not None:
self.all_group = OtherData.all_group.copy()
def _generate_converter_dict(self):
cdict = {
'DateParser':verbose_parser,
'extract_YOB':extract_YOB,
'checkbox_conv':checkbox_conv,
'safe_float':safe_float
}
conv_dict = {}
date_clip_cols = set()
for colname, convfun in zip(self.config_data['RawName'].values, self.config_data['ConvertFun'].values):
cfun = cdict.get(convfun, None)
if cfun:
conv_dict[colname] = cfun
if convfun == 'DateParser':
date_clip_cols.add(colname)
self.conv_dict = conv_dict
self.date_clip_cols = date_clip_cols
def _generate_agg_dict(self):
self.visit_agg = {}
self.pat_agg = {}
for colname, aggfun in zip(self.config_data['RawName'].values, self.config_data['AggFunc'].values):
if aggfun == aggfun:
self.visit_agg[colname] = aggfun
self.pat_agg[colname] = aggfun
def clip_dates(self):
maxday = datetime.today()
minday = datetime(1900,1,1)
for col in self.date_clip_cols:
self.redcap[col] = self.redcap[col].clip(lower = minday, upper = maxday)
def fix_visits(self):
def fix_v(vis):
if vis == 'first':
return 0.0
try:
return float(vis[1:])
except:
return None
self.redcap['VisitNum'] = self.redcap['Patient visit number'].apply(fix_v)
def CalcAge(self):
visit_years = self.redcap['Date of visit'].dropna().apply(lambda x:x.year)
birth_years = self.redcap['Year of Birth'].dropna()
self.redcap['CalcAge'] = visit_years-birth_years
def CalcGender(self):
self.redcap['GenotypicMale'] = (self.redcap['Gender'] == 'Male') | (self.redcap['Transgender designation'] == 'male to female')
self.redcap['IdentifiesMale'] = (self.redcap['Gender'] == 'Male') & (self.redcap['Transgender designation'] != 'male to female')
self.redcap['GenotypicFemale'] = (self.redcap['Gender'] == 'Female') | (self.redcap['Transgender designation'] == 'female to male')
self.redcap['IdentifiesFemale'] = (self.redcap['Gender'] == 'Female') & (self.redcap['Transgender designation'] != 'female to male')
def CalcBMI(self):
self.redcap['Weight-kg'] = self.redcap['Weight'].apply(safe_float)/2.2
self.redcap['Height-m'] = self.redcap['Height'].apply(feet2meters)
self.redcap['BMI'] = self.redcap['Weight-kg']/(self.redcap['Height-m']*self.redcap['Height-m'])
def CalcYearsSero(self):
visit_years = self.redcap['Date of visit'].dropna().apply(lambda x:x.year)
seropos_years = self.redcap['HIV seropositive date'].dropna().apply(lambda x:x.year)
self.redcap['Calc-Years-Seropositive'] = visit_years - seropos_years
def CalcExposure(self):
merge_cols = {'Exposure-MSM': ("Exposure Category (choice='Men who have sex with men (MSM)')",
"Exposure Category (choice='MSM and IDU')"),
'Exposure-IDU': ("Exposure Category (choice='Injection drug use (IDU)')",
"Exposure Category (choice='MSM and IDU')",
"Exposure Category (choice='Heterosexual and IDU')"),
'Exposure-Heterosexual': ("Exposure Category (choice='Heterosexual and IDU')",
"Exposure Category (choice='Heterosexual')"),
'Exposure-Hemophilia':("Exposure Category (choice='Hemophilia')",),
'Exposure-Transfusion':("Exposure Category (choice='Blood transfusion')",),
'Exposure-Perinatal':("Exposure Category (choice='Perinatal')",)
}
for merged_col, check_cols in merge_cols.items():
self.redcap[merged_col] = False
for col in check_cols:
self.redcap[merged_col] |= self.redcap[col]
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['PosGroup', 'NegGroup']))
def CalcAll(self):
self.AddGroupNames()
self.fix_visits()
self.CalcAge()
self.CalcYearsSero()
self.CalcGender()
self.CalcBMI()
self.CalcExposure()
def ProcessVisits(self, visit_recap):
"""A method to subclass. Must return a DataFrame of the wanted visits."""
return visit_recap
def ProcessPatients(self, pat_redcap):
"""A method to subclass. Must return a DataFrame of the wanted patients."""
return pat_redcap
def ProcessRedcap(self):
gkey = ['Patient ID', 'Patient visit number']
visit_redcap = self.redcap.groupby(gkey).agg(self.visit_agg)
self.visit_redcap = self.ProcessVisits(visit_redcap)
gkey = 'Patient ID'
pat_redcap = self.visit_redcap.groupby(level=gkey).agg(self.pat_agg)
self.pat_redcap = self.ProcessPatients(pat_redcap)
def MakePatientGroups(self):
self.SplitGroups(self.pat_redcap, self.pat_agg)
def MakeVisitGroups(self):
aligned_data, _ = self.visit_redcap.align(self.pat_redcap,
level = 'Patient ID',
join = 'inner')
self.SplitGroups(aligned_data, self.visit_agg)
def AssignGroups(self, aligned_data):
raise NotImplementedError
def SplitGroups(self, aligned_data, agg_dict):
cur_levels = aligned_data.index.names
aligned_data = aligned_data.reset_index()
aligned_data['Grouping'] = self.AssignGroups(aligned_data)
self.all_group = aligned_data.groupby(['Grouping'] + cur_levels).agg(agg_dict)
def make_demo_figures(self):
self.AgeHist()
for key, group in self.config_data.groupby('PlotName'):
if ((group['PlotType'] == 'BarChart').all()) & ((group['DemographicFunction'] == 'ChoiceCount').all()):
try:
self.plot_bar_chart(group['RawName'].values, key)
except:
print('bad on ', key)
elif ((group['PlotType'] == 'BoxPlot').all()) & ((group['DemographicFunction'] == 'MeanFunc').all()):
self.make_box_plot(group['RawName'].values, key)
elif ((group['PlotType'] == 'LogBoxPlot').all()) & ((group['DemographicFunction'] == 'MeanFunc').all()):
self.make_log_box_plot(group['RawName'].values, key)
elif ((group['PlotType'] == 'BarChart').all()) & ((group['DemographicFunction'] == 'IntegerCount').all()):
print(key, group['RawName'].values)
self.make_integer_bar(group['RawName'].values[0], key)
def AgeHist(self):
bins = [20,30,40,50,60,70,80]
fig = plt.figure()
g1data = Series(np.histogram(self.all_group.ix[True]['CalcAge'].values, bins = bins)[0], index = bins[:-1])
g2data = Series(np.histogram(self.all_group.ix[False]['CalcAge'].values, bins = bins)[0], index = bins[:-1])
df = DataFrame({self.groupnames[True]:g1data,
self.groupnames[False]:g2data})
df.plot(kind = 'bar', grid = True)
plt.xlabel('Age at Visit')
plt.ylabel('#')
return fig, self.all_group['CalcAge']
def plot_bar_chart(self, items, title):
g1sum = self.all_group.ix[True][items].mean()*100
g2sum = self.all_group.ix[False][items].mean()*100
allsum = self.all_group[items].mean()*100
df = DataFrame({self.groupnames[True]:g1sum,
self.groupnames[False]:g2sum,
'All':allsum})
ncols = dict([(col, fix_col_name(col)) for col in df.index])
df = df.rename(index=ncols)
fig = plt.figure()
df.plot(kind = 'bar', ax = plt.gca(), grid = True)
plt.title(title)
plt.ylabel('%')
return fig, self.all_group[items]
def make_box_plot(self, items, title):
g1items = self.all_group.ix[True][items].reset_index()
g2items = self.all_group.ix[False][items].reset_index()
allitems = self.all_group[items].reset_index()
pltdata = [(allitems, 'All'),
(g1items, self.groupnames[True]),
(g2items, self.groupnames[False])]
odict = {}
for item, (data, name) in product(items, pltdata):
odict[item + '--' + name] = data[item]
fig = plt.figure()
df = DataFrame(odict)
df.boxplot(rot = 90, ax = plt.gca())
plt.title(title)
plt.ylabel('Value')
return fig, self.all_group[items]
def make_log_box_plot(self, items, title):
g1items = self.all_group.ix[True][items].reset_index()
g2items = self.all_group.ix[False][items].reset_index()
allitems = self.all_group[items].reset_index()
pltdata = [(allitems, 'All'),
(g1items, self.groupnames[True]),
(g2items, self.groupnames[False])]
odict = {}
for item, (data, name) in product(items, pltdata):
odict[item + '--' + name] = data[item]
fig = plt.figure()
df = DataFrame(odict)
df.apply(np.log10).boxplot(rot = 90, ax = plt.gca())
plt.title(title)
plt.ylabel('log10(Value)')
return fig, self.all_group[items]
def make_integer_bar(self, col, title):
if len(self.all_group[col].unique()) < 2:
return None, None
bins = np.arange(0, self.all_group[col].max()+1)
g1data = Series(np.histogram(self.all_group.ix[True][col].values, bins = bins)[0], index = bins[:-1])/len(self.all_group.ix[True])
g2data = Series(np.histogram(self.all_group.ix[False][col].values, bins = bins)[0], index = bins[:-1])/len(self.all_group.ix[False])
alldata = Series(np.histogram(self.all_group[col].values, bins = bins)[0], index = bins[:-1])//len(self.all_group)
ndf = DataFrame({'All': alldata*100,
self.groupnames[True]: g1data*100,
self.groupnames[False]: g2data*100})
fig = plt.figure()
ndf.plot(kind = 'bar', ax = plt.gca(), grid = True)
plt.title(title)
return fig, self.all_group[col]
# <codecell>
class GenderPatData(PatData):
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['Male', 'Female']))
def AssignGroups(self, visit_redcap):
return visit_redcap['IdentifiesMale']
# <codecell>
config_file = 'Data/Config/ReportFile.csv'
demo_file = 'Data/RedcapDumps/HIVAIDSGeneticAnalys_DATA_LABELS_2012-12-11_1720.csv'
tmp = GenderPatData(demo_file, config_file)
tmp.CalcAll()
# <codecell>
tmp.ProcessRedcap()
tmp.MakePatientGroups()
# <codecell>
tmp.make_demo_figures()
# <codecell>
class NeuroPatData(PatData):
def AddGroupNames(self):
self.groupnames = dict(zip([True, False], ['No Neuro', 'With Neuro']))
def AssignGroups(self, aligned_data):
return aligned_data["Mental Health Issues (choice='No neurological problems')"]
ntmp = NeuroPatData(None, None)
ntmp.CopyFromOtherData(tmp)
ntmp.AddGroupNames()
ntmp.MakePatientGroups()
ntmp.make_demo_figures()
# <codecell>
from itertools import product
items = ['Neurocognitive test',
'MSK Score',
'Psychomotor Speed Score',
'Memory Recall Score',
'Constructional Score',
'Total Modified Hopkins Dementia Score',
]
col = items[-1]
# <codecell>
def safe_float(m):
try:
return float(m)
except:
return None
demo_data['Weight'].apply(safe_float).hist()
# <codecell>
tmp.redcap
# <codecell>
| 0.248717 | 0.211702 |
import atexit
import builtins
import os
import tempfile
import webbrowser
from types import TracebackType
from typing import Any, Dict, Iterator, Optional, Union
from urllib import parse
import certifi
import lomond
import requests
import simplejson
import determined_common.requests
from determined_common.api import authentication, errors
# The path to a file containing an SSL certificate to trust specifically for the master, if any, or
# False to disable cert verification entirely. If set to a path, it should always be a temporary
# file that we own and can delete.
_master_cert_bundle = None # type: Optional[Union[str, bool]]
# The name we use to verify the master.
_master_cert_name = None
def set_master_cert_bundle(path: Optional[Union[str, bool]]) -> None:
global _master_cert_bundle
if path == "":
path = None
if path is None or isinstance(path, bool):
_master_cert_bundle = path
return
# Don't use NamedTemporaryFile, since it would make the file inaccessible by path on Windows
# after this (see https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile).
fd, combined_path = tempfile.mkstemp(prefix="det-master-cert-")
atexit.register(os.unlink, combined_path)
with builtins.open(fd, "wb") as out:
with builtins.open(certifi.where(), "rb") as base_certs:
out.write(base_certs.read())
out.write(b"\n")
with builtins.open(path, "rb") as custom_certs:
out.write(custom_certs.read())
_master_cert_bundle = combined_path
def set_master_cert_name(name: Optional[str]) -> None:
if name == "":
name = None
global _master_cert_name
_master_cert_name = name
# Set the bundle if one is specified by the environment. This is done on import since we can't
# always count on having an entry point we control (e.g., if someone is importing this code in a
# notebook).
f = os.environ.get("DET_MASTER_CERT_FILE")
if f and f.lower() == "noverify":
set_master_cert_bundle(False)
else:
set_master_cert_bundle(f)
del f
# Set the master servername from the environment.
set_master_cert_name(os.environ.get("DET_MASTER_CERT_NAME"))
def get_master_cert_bundle() -> Optional[Union[str, bool]]:
return _master_cert_bundle
def get_master_cert_name() -> Optional[str]:
return _master_cert_name
def parse_master_address(master_address: str) -> parse.ParseResult:
if master_address.startswith("https://"):
default_port = 443
elif master_address.startswith("http://"):
default_port = 80
else:
default_port = 8080
master_address = "http://{}".format(master_address)
parsed = parse.urlparse(master_address)
if not parsed.port:
parsed = parsed._replace(netloc="{}:{}".format(parsed.netloc, default_port))
return parsed
def make_url(master_address: str, suffix: str) -> str:
parsed = parse_master_address(master_address)
return parse.urljoin(parsed.geturl(), suffix)
def maybe_upgrade_ws_scheme(master_address: str) -> str:
parsed = parse.urlparse(master_address)
if parsed.scheme == "https":
return parsed._replace(scheme="wss").geturl()
elif parsed.scheme == "http":
return parsed._replace(scheme="ws").geturl()
else:
return master_address
def add_token_to_headers(headers: Dict[str, str]) -> Dict[str, str]:
task_token = authentication.Authentication.instance().get_task_token()
if task_token:
return {**headers, "Grpc-Metadata-x-task-token": "Bearer {}".format(task_token)}
token = authentication.Authentication.instance().get_session_token()
return {**headers, "Authorization": "Bearer {}".format(token)}
def do_request(
method: str,
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
if headers is None:
h = {} # type: Dict[str, str]
else:
h = headers
if params is None:
params = {}
if authenticated:
h = add_token_to_headers(h)
try:
r = determined_common.requests.request(
method,
make_url(host, path),
params=params,
json=body,
headers=h,
verify=_master_cert_bundle,
stream=stream,
server_hostname=_master_cert_name,
)
except requests.exceptions.SSLError:
raise
except requests.exceptions.ConnectionError as e:
raise errors.MasterNotFoundException(str(e))
except requests.exceptions.RequestException as e:
raise errors.BadRequestException(str(e))
if r.status_code == 403:
username = authentication.Authentication.instance().get_session_user()
raise errors.UnauthenticatedException(username=username)
if r.status_code >= 300:
raise errors.APIException(r)
return r
def get(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
"""
Send a GET request to the remote API.
"""
return do_request(
"GET",
host,
path,
params=params,
headers=headers,
authenticated=authenticated,
stream=stream,
)
def delete(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a DELETE request to the remote API.
"""
return do_request(
"DELETE", host, path, params=params, headers=headers, authenticated=authenticated
)
def post(
host: str,
path: str,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a POST request to the remote API.
"""
return do_request("POST", host, path, body=body, headers=headers, authenticated=authenticated)
def patch(
host: str,
path: str,
body: Dict[str, Any],
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a PATCH request to the remote API.
"""
return do_request("PATCH", host, path, body=body, headers=headers, authenticated=authenticated)
def put(
host: str,
path: str,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a PUT request to the remote API.
"""
return do_request("PUT", host, path, body=body, headers=headers, authenticated=authenticated)
def open(host: str, path: str) -> str:
url = make_url(host, path)
webbrowser.open(url)
return url
class WebSocket:
def __init__(self, socket: lomond.WebSocket) -> None:
self.socket = socket
def __enter__(self) -> "WebSocket":
return self
def __iter__(self) -> Iterator[Any]:
for event in self.socket.connect(ping_rate=0):
if isinstance(event, lomond.events.Connected):
# Ignore the initial connection event.
pass
elif isinstance(event, (lomond.events.Closing, lomond.events.Disconnected)):
# The socket was successfully closed so we just return.
return
elif isinstance(
event,
(lomond.events.ConnectFail, lomond.events.Rejected, lomond.events.ProtocolError),
):
# Any unexpected failures raise the standard API exception.
raise errors.BadRequestException(message="WebSocket failure: {}".format(event))
elif isinstance(event, lomond.events.Text):
# All web socket connections are expected to be in a JSON
# format.
yield simplejson.loads(event.text)
def __exit__(
self,
exc_type: Optional[type],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if not self.socket.is_closed:
self.socket.close()
def ws(host: str, path: str) -> WebSocket:
"""
Connect to a web socket at the remote API.
"""
websocket = lomond.WebSocket(maybe_upgrade_ws_scheme(make_url(host, path)))
token = authentication.Authentication.instance().get_session_token()
websocket.add_header("Authorization".encode(), "Bearer {}".format(token).encode())
return WebSocket(websocket)
|
common/determined_common/api/request.py
|
import atexit
import builtins
import os
import tempfile
import webbrowser
from types import TracebackType
from typing import Any, Dict, Iterator, Optional, Union
from urllib import parse
import certifi
import lomond
import requests
import simplejson
import determined_common.requests
from determined_common.api import authentication, errors
# The path to a file containing an SSL certificate to trust specifically for the master, if any, or
# False to disable cert verification entirely. If set to a path, it should always be a temporary
# file that we own and can delete.
_master_cert_bundle = None # type: Optional[Union[str, bool]]
# The name we use to verify the master.
_master_cert_name = None
def set_master_cert_bundle(path: Optional[Union[str, bool]]) -> None:
global _master_cert_bundle
if path == "":
path = None
if path is None or isinstance(path, bool):
_master_cert_bundle = path
return
# Don't use NamedTemporaryFile, since it would make the file inaccessible by path on Windows
# after this (see https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile).
fd, combined_path = tempfile.mkstemp(prefix="det-master-cert-")
atexit.register(os.unlink, combined_path)
with builtins.open(fd, "wb") as out:
with builtins.open(certifi.where(), "rb") as base_certs:
out.write(base_certs.read())
out.write(b"\n")
with builtins.open(path, "rb") as custom_certs:
out.write(custom_certs.read())
_master_cert_bundle = combined_path
def set_master_cert_name(name: Optional[str]) -> None:
if name == "":
name = None
global _master_cert_name
_master_cert_name = name
# Set the bundle if one is specified by the environment. This is done on import since we can't
# always count on having an entry point we control (e.g., if someone is importing this code in a
# notebook).
f = os.environ.get("DET_MASTER_CERT_FILE")
if f and f.lower() == "noverify":
set_master_cert_bundle(False)
else:
set_master_cert_bundle(f)
del f
# Set the master servername from the environment.
set_master_cert_name(os.environ.get("DET_MASTER_CERT_NAME"))
def get_master_cert_bundle() -> Optional[Union[str, bool]]:
return _master_cert_bundle
def get_master_cert_name() -> Optional[str]:
return _master_cert_name
def parse_master_address(master_address: str) -> parse.ParseResult:
if master_address.startswith("https://"):
default_port = 443
elif master_address.startswith("http://"):
default_port = 80
else:
default_port = 8080
master_address = "http://{}".format(master_address)
parsed = parse.urlparse(master_address)
if not parsed.port:
parsed = parsed._replace(netloc="{}:{}".format(parsed.netloc, default_port))
return parsed
def make_url(master_address: str, suffix: str) -> str:
parsed = parse_master_address(master_address)
return parse.urljoin(parsed.geturl(), suffix)
def maybe_upgrade_ws_scheme(master_address: str) -> str:
parsed = parse.urlparse(master_address)
if parsed.scheme == "https":
return parsed._replace(scheme="wss").geturl()
elif parsed.scheme == "http":
return parsed._replace(scheme="ws").geturl()
else:
return master_address
def add_token_to_headers(headers: Dict[str, str]) -> Dict[str, str]:
task_token = authentication.Authentication.instance().get_task_token()
if task_token:
return {**headers, "Grpc-Metadata-x-task-token": "Bearer {}".format(task_token)}
token = authentication.Authentication.instance().get_session_token()
return {**headers, "Authorization": "Bearer {}".format(token)}
def do_request(
method: str,
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
if headers is None:
h = {} # type: Dict[str, str]
else:
h = headers
if params is None:
params = {}
if authenticated:
h = add_token_to_headers(h)
try:
r = determined_common.requests.request(
method,
make_url(host, path),
params=params,
json=body,
headers=h,
verify=_master_cert_bundle,
stream=stream,
server_hostname=_master_cert_name,
)
except requests.exceptions.SSLError:
raise
except requests.exceptions.ConnectionError as e:
raise errors.MasterNotFoundException(str(e))
except requests.exceptions.RequestException as e:
raise errors.BadRequestException(str(e))
if r.status_code == 403:
username = authentication.Authentication.instance().get_session_user()
raise errors.UnauthenticatedException(username=username)
if r.status_code >= 300:
raise errors.APIException(r)
return r
def get(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
stream: bool = False,
) -> requests.Response:
"""
Send a GET request to the remote API.
"""
return do_request(
"GET",
host,
path,
params=params,
headers=headers,
authenticated=authenticated,
stream=stream,
)
def delete(
host: str,
path: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a DELETE request to the remote API.
"""
return do_request(
"DELETE", host, path, params=params, headers=headers, authenticated=authenticated
)
def post(
host: str,
path: str,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a POST request to the remote API.
"""
return do_request("POST", host, path, body=body, headers=headers, authenticated=authenticated)
def patch(
host: str,
path: str,
body: Dict[str, Any],
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a PATCH request to the remote API.
"""
return do_request("PATCH", host, path, body=body, headers=headers, authenticated=authenticated)
def put(
host: str,
path: str,
body: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
authenticated: bool = True,
) -> requests.Response:
"""
Send a PUT request to the remote API.
"""
return do_request("PUT", host, path, body=body, headers=headers, authenticated=authenticated)
def open(host: str, path: str) -> str:
url = make_url(host, path)
webbrowser.open(url)
return url
class WebSocket:
def __init__(self, socket: lomond.WebSocket) -> None:
self.socket = socket
def __enter__(self) -> "WebSocket":
return self
def __iter__(self) -> Iterator[Any]:
for event in self.socket.connect(ping_rate=0):
if isinstance(event, lomond.events.Connected):
# Ignore the initial connection event.
pass
elif isinstance(event, (lomond.events.Closing, lomond.events.Disconnected)):
# The socket was successfully closed so we just return.
return
elif isinstance(
event,
(lomond.events.ConnectFail, lomond.events.Rejected, lomond.events.ProtocolError),
):
# Any unexpected failures raise the standard API exception.
raise errors.BadRequestException(message="WebSocket failure: {}".format(event))
elif isinstance(event, lomond.events.Text):
# All web socket connections are expected to be in a JSON
# format.
yield simplejson.loads(event.text)
def __exit__(
self,
exc_type: Optional[type],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if not self.socket.is_closed:
self.socket.close()
def ws(host: str, path: str) -> WebSocket:
"""
Connect to a web socket at the remote API.
"""
websocket = lomond.WebSocket(maybe_upgrade_ws_scheme(make_url(host, path)))
token = authentication.Authentication.instance().get_session_token()
websocket.add_header("Authorization".encode(), "Bearer {}".format(token).encode())
return WebSocket(websocket)
| 0.68342 | 0.24979 |
import importlib
import os
from pathlib import Path
from typing import Any
import pytest
from pydolphinscheduler.core import configuration
from pydolphinscheduler.core.configuration import (
BUILD_IN_CONFIG_PATH,
config_path,
get_single_config,
set_single_config,
)
from pydolphinscheduler.exceptions import PyDSConfException
from pydolphinscheduler.utils.yaml_parser import YamlParser
from tests.testing.constants import DEV_MODE, ENV_PYDS_HOME
from tests.testing.file import get_file_content
@pytest.fixture
def teardown_file_env():
"""Util for deleting temp configuration file and pop env var after test finish."""
yield
config_file_path = config_path()
if config_file_path.exists():
config_file_path.unlink()
os.environ.pop(ENV_PYDS_HOME, None)
@pytest.mark.parametrize(
"val, expect",
[
("1", 1),
("123", 123),
("4567", 4567),
(b"1234", 1234),
],
)
def test_get_int(val: Any, expect: int):
"""Test function :func:`configuration.get_int`."""
assert configuration.get_int(val) == expect
@pytest.mark.parametrize(
"val",
[
"a",
"1a",
"1d2",
"1723-",
],
)
def test_get_int_error(val: Any):
"""Test function :func:`configuration.get_int`."""
with pytest.raises(ValueError):
configuration.get_int(val)
@pytest.mark.parametrize(
"val, expect",
[
("t", True),
("true", True),
(1, True),
(True, True),
("f", False),
("false", False),
(0, False),
(123, False),
("abc", False),
("abc1", False),
(False, False),
],
)
def test_get_bool(val: Any, expect: bool):
"""Test function :func:`configuration.get_bool`."""
assert configuration.get_bool(val) == expect
@pytest.mark.parametrize(
"home, expect",
[
(None, "~/pydolphinscheduler/config.yaml"),
("/tmp/pydolphinscheduler", "/tmp/pydolphinscheduler/config.yaml"),
("/tmp/test_abc", "/tmp/test_abc/config.yaml"),
],
)
def test_config_path(home: Any, expect: str):
"""Test function :func:`config_path`."""
if home:
os.environ[ENV_PYDS_HOME] = home
assert Path(expect).expanduser() == configuration.config_path()
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file(teardown_file_env, home: Any):
"""Test init config file."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
assert get_file_content(config_path()) == get_file_content(BUILD_IN_CONFIG_PATH)
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file_duplicate(teardown_file_env, home: Any):
"""Test raise error with init config file which already exists."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
with pytest.raises(PyDSConfException, match=".*file already exists.*"):
configuration.init_config_file()
def test_get_configs_build_in():
"""Test function :func:`get_configs` with build-in config file."""
content = get_file_content(BUILD_IN_CONFIG_PATH)
assert YamlParser(content).src_parser == configuration.get_configs().src_parser
assert YamlParser(content).dict_parser == configuration.get_configs().dict_parser
@pytest.mark.parametrize(
"key, val, new_val",
[
("java_gateway.address", "127.0.0.1", "127.1.1.1"),
("java_gateway.port", 25333, 25555),
("java_gateway.auto_convert", True, False),
("default.user.name", "userPythonGateway", "editUserPythonGateway"),
("default.user.password", "<PASSWORD>", "editUserPythonGateway"),
(
"default.user.email",
"<EMAIL>",
"<EMAIL>",
),
("default.user.phone", 11111111111, 22222222222),
("default.user.state", 1, 0),
("default.workflow.project", "project-pydolphin", "eidt-project-pydolphin"),
("default.workflow.tenant", "tenant_pydolphin", "edit_tenant_pydolphin"),
("default.workflow.user", "userPythonGateway", "editUserPythonGateway"),
("default.workflow.queue", "queuePythonGateway", "editQueuePythonGateway"),
("default.workflow.worker_group", "default", "specific"),
("default.workflow.time_zone", "Asia/Shanghai", "Asia/Beijing"),
("default.workflow.warning_type", "NONE", "ALL"),
],
)
def test_single_config_get_set(teardown_file_env, key: str, val: Any, new_val: Any):
"""Test function :func:`get_single_config` and :func:`set_single_config`."""
assert val == get_single_config(key)
set_single_config(key, new_val)
assert new_val == get_single_config(key)
def test_single_config_get_set_not_exists_key():
"""Test function :func:`get_single_config` and :func:`set_single_config` error while key not exists."""
not_exists_key = "i_am_not_exists_key"
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
get_single_config(not_exists_key)
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
set_single_config(not_exists_key, not_exists_key)
@pytest.mark.parametrize(
"config_name, expect",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1"),
("JAVA_GATEWAY_PORT", 25333),
("JAVA_GATEWAY_AUTO_CONVERT", True),
("USER_NAME", "userPythonGateway"),
("USER_PASSWORD", "<PASSWORD>"),
("USER_EMAIL", "<EMAIL>"),
("USER_PHONE", "11111111111"),
("USER_STATE", 1),
("WORKFLOW_PROJECT", "project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai"),
("WORKFLOW_WARNING_TYPE", "NONE"),
],
)
def test_get_configuration(config_name: str, expect: Any):
"""Test get exists attribute in :mod:`configuration`."""
assert expect == getattr(configuration, config_name)
@pytest.mark.parametrize(
"config_name, src, dest",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1", "192.168.1.1"),
("JAVA_GATEWAY_PORT", 25333, 25334),
("JAVA_GATEWAY_AUTO_CONVERT", True, False),
("USER_NAME", "userPythonGateway", "envUserPythonGateway"),
("USER_PASSWORD", "<PASSWORD>", "envUserPythonGateway"),
(
"USER_EMAIL",
"<EMAIL>",
"<EMAIL>",
),
("USER_PHONE", "11111111111", "22222222222"),
("USER_STATE", 1, 0),
("WORKFLOW_PROJECT", "project-pydolphin", "env-project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin", "env-tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway", "envUserPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway", "envQueuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default", "custom"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai", "America/Los_Angeles"),
("WORKFLOW_WARNING_TYPE", "NONE", "ALL"),
],
)
def test_get_configuration_env(config_name: str, src: Any, dest: Any):
"""Test get exists attribute from environment variable in :mod:`configuration`."""
assert getattr(configuration, config_name) == src
env_name = f"PYDS_{config_name}"
os.environ[env_name] = str(dest)
# reload module configuration to re-get config from environment.
importlib.reload(configuration)
assert getattr(configuration, config_name) == dest
# pop and reload configuration to test whether this config equal to `src` value
os.environ.pop(env_name, None)
importlib.reload(configuration)
assert getattr(configuration, config_name) == src
assert env_name not in os.environ
|
dolphinscheduler-python/pydolphinscheduler/tests/core/test_configuration.py
|
import importlib
import os
from pathlib import Path
from typing import Any
import pytest
from pydolphinscheduler.core import configuration
from pydolphinscheduler.core.configuration import (
BUILD_IN_CONFIG_PATH,
config_path,
get_single_config,
set_single_config,
)
from pydolphinscheduler.exceptions import PyDSConfException
from pydolphinscheduler.utils.yaml_parser import YamlParser
from tests.testing.constants import DEV_MODE, ENV_PYDS_HOME
from tests.testing.file import get_file_content
@pytest.fixture
def teardown_file_env():
"""Util for deleting temp configuration file and pop env var after test finish."""
yield
config_file_path = config_path()
if config_file_path.exists():
config_file_path.unlink()
os.environ.pop(ENV_PYDS_HOME, None)
@pytest.mark.parametrize(
"val, expect",
[
("1", 1),
("123", 123),
("4567", 4567),
(b"1234", 1234),
],
)
def test_get_int(val: Any, expect: int):
"""Test function :func:`configuration.get_int`."""
assert configuration.get_int(val) == expect
@pytest.mark.parametrize(
"val",
[
"a",
"1a",
"1d2",
"1723-",
],
)
def test_get_int_error(val: Any):
"""Test function :func:`configuration.get_int`."""
with pytest.raises(ValueError):
configuration.get_int(val)
@pytest.mark.parametrize(
"val, expect",
[
("t", True),
("true", True),
(1, True),
(True, True),
("f", False),
("false", False),
(0, False),
(123, False),
("abc", False),
("abc1", False),
(False, False),
],
)
def test_get_bool(val: Any, expect: bool):
"""Test function :func:`configuration.get_bool`."""
assert configuration.get_bool(val) == expect
@pytest.mark.parametrize(
"home, expect",
[
(None, "~/pydolphinscheduler/config.yaml"),
("/tmp/pydolphinscheduler", "/tmp/pydolphinscheduler/config.yaml"),
("/tmp/test_abc", "/tmp/test_abc/config.yaml"),
],
)
def test_config_path(home: Any, expect: str):
"""Test function :func:`config_path`."""
if home:
os.environ[ENV_PYDS_HOME] = home
assert Path(expect).expanduser() == configuration.config_path()
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file(teardown_file_env, home: Any):
"""Test init config file."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
assert get_file_content(config_path()) == get_file_content(BUILD_IN_CONFIG_PATH)
@pytest.mark.parametrize(
"home",
[
None,
"/tmp/pydolphinscheduler",
"/tmp/test_abc",
],
)
def test_init_config_file_duplicate(teardown_file_env, home: Any):
"""Test raise error with init config file which already exists."""
if home:
os.environ[ENV_PYDS_HOME] = home
elif DEV_MODE:
pytest.skip(
"Avoid delete ~/pydolphinscheduler/config.yaml by accident when test locally."
)
assert not config_path().exists()
configuration.init_config_file()
assert config_path().exists()
with pytest.raises(PyDSConfException, match=".*file already exists.*"):
configuration.init_config_file()
def test_get_configs_build_in():
"""Test function :func:`get_configs` with build-in config file."""
content = get_file_content(BUILD_IN_CONFIG_PATH)
assert YamlParser(content).src_parser == configuration.get_configs().src_parser
assert YamlParser(content).dict_parser == configuration.get_configs().dict_parser
@pytest.mark.parametrize(
"key, val, new_val",
[
("java_gateway.address", "127.0.0.1", "127.1.1.1"),
("java_gateway.port", 25333, 25555),
("java_gateway.auto_convert", True, False),
("default.user.name", "userPythonGateway", "editUserPythonGateway"),
("default.user.password", "<PASSWORD>", "editUserPythonGateway"),
(
"default.user.email",
"<EMAIL>",
"<EMAIL>",
),
("default.user.phone", 11111111111, 22222222222),
("default.user.state", 1, 0),
("default.workflow.project", "project-pydolphin", "eidt-project-pydolphin"),
("default.workflow.tenant", "tenant_pydolphin", "edit_tenant_pydolphin"),
("default.workflow.user", "userPythonGateway", "editUserPythonGateway"),
("default.workflow.queue", "queuePythonGateway", "editQueuePythonGateway"),
("default.workflow.worker_group", "default", "specific"),
("default.workflow.time_zone", "Asia/Shanghai", "Asia/Beijing"),
("default.workflow.warning_type", "NONE", "ALL"),
],
)
def test_single_config_get_set(teardown_file_env, key: str, val: Any, new_val: Any):
"""Test function :func:`get_single_config` and :func:`set_single_config`."""
assert val == get_single_config(key)
set_single_config(key, new_val)
assert new_val == get_single_config(key)
def test_single_config_get_set_not_exists_key():
"""Test function :func:`get_single_config` and :func:`set_single_config` error while key not exists."""
not_exists_key = "i_am_not_exists_key"
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
get_single_config(not_exists_key)
with pytest.raises(PyDSConfException, match=".*do not exists.*"):
set_single_config(not_exists_key, not_exists_key)
@pytest.mark.parametrize(
"config_name, expect",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1"),
("JAVA_GATEWAY_PORT", 25333),
("JAVA_GATEWAY_AUTO_CONVERT", True),
("USER_NAME", "userPythonGateway"),
("USER_PASSWORD", "<PASSWORD>"),
("USER_EMAIL", "<EMAIL>"),
("USER_PHONE", "11111111111"),
("USER_STATE", 1),
("WORKFLOW_PROJECT", "project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai"),
("WORKFLOW_WARNING_TYPE", "NONE"),
],
)
def test_get_configuration(config_name: str, expect: Any):
"""Test get exists attribute in :mod:`configuration`."""
assert expect == getattr(configuration, config_name)
@pytest.mark.parametrize(
"config_name, src, dest",
[
("JAVA_GATEWAY_ADDRESS", "127.0.0.1", "192.168.1.1"),
("JAVA_GATEWAY_PORT", 25333, 25334),
("JAVA_GATEWAY_AUTO_CONVERT", True, False),
("USER_NAME", "userPythonGateway", "envUserPythonGateway"),
("USER_PASSWORD", "<PASSWORD>", "envUserPythonGateway"),
(
"USER_EMAIL",
"<EMAIL>",
"<EMAIL>",
),
("USER_PHONE", "11111111111", "22222222222"),
("USER_STATE", 1, 0),
("WORKFLOW_PROJECT", "project-pydolphin", "env-project-pydolphin"),
("WORKFLOW_TENANT", "tenant_pydolphin", "env-tenant_pydolphin"),
("WORKFLOW_USER", "userPythonGateway", "envUserPythonGateway"),
("WORKFLOW_QUEUE", "queuePythonGateway", "envQueuePythonGateway"),
("WORKFLOW_WORKER_GROUP", "default", "custom"),
("WORKFLOW_TIME_ZONE", "Asia/Shanghai", "America/Los_Angeles"),
("WORKFLOW_WARNING_TYPE", "NONE", "ALL"),
],
)
def test_get_configuration_env(config_name: str, src: Any, dest: Any):
"""Test get exists attribute from environment variable in :mod:`configuration`."""
assert getattr(configuration, config_name) == src
env_name = f"PYDS_{config_name}"
os.environ[env_name] = str(dest)
# reload module configuration to re-get config from environment.
importlib.reload(configuration)
assert getattr(configuration, config_name) == dest
# pop and reload configuration to test whether this config equal to `src` value
os.environ.pop(env_name, None)
importlib.reload(configuration)
assert getattr(configuration, config_name) == src
assert env_name not in os.environ
| 0.615435 | 0.322993 |
from __future__ import with_statement
from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class TryFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 100000
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
with be: raise error
with be: raise error
with be: raise error,"something"
with be: raise error,"something"
with be: raise error,"something"
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
def calibrate(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
pass
|
tests/benchmarks/pybench/With.py
|
from __future__ import with_statement
from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class TryFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 100000
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
with be: raise error
with be: raise error
with be: raise error,"something"
with be: raise error,"something"
with be: raise error,"something"
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
def calibrate(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
pass
| 0.493409 | 0.127979 |
import os
import sys
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowClassificationTrainUpdateLossParam(QtWidgets.QWidget):
forward_train = QtCore.pyqtSignal();
backward_scheduler_param = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.cfg_setup()
self.title = 'Experiment {} - Update Loss Params'.format(self.system["experiment"])
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.loss_ui_mxnet = [];
self.loss_ui_keras = [];
self.loss_ui_pytorch = [];
self.current_loss_ = {};
self.current_loss_["name"] = "";
self.current_loss_["params"] = {};
self.initUI()
def cfg_setup(self):
with open('base_classification.json') as json_file:
self.system = json.load(json_file)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(600,550)
self.b1.clicked.connect(self.backward)
# Forward
self.b2 = QPushButton('Next', self)
self.b2.move(700,550)
self.b2.clicked.connect(self.forward)
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(800,550)
self.b3.clicked.connect(self.close)
self.cb1 = QComboBox(self);
self.cb1.move(20, 20);
self.cb1.activated.connect(self.select_loss);
self.cb2 = QComboBox(self);
self.cb2.move(20, 20);
self.cb2.activated.connect(self.select_loss);
self.cb3 = QComboBox(self);
self.cb3.move(20, 20);
self.cb3.activated.connect(self.select_loss);
self.mxnet_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge"];
self.keras_losses_list = ["select", "loss_l1", "loss_l2", "loss_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_hinge", "loss_squared_hinge"];
self.pytorch_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge", "loss_multimargin", "loss_squared_multimargin",
"loss_multilabel_margin", "loss_multilabel_softmargin"];
if(self.system["backend"] == "Mxnet-1.5.1"):
self.cb1.addItems(self.mxnet_losses_list);
self.cb1.show();
self.cb2.hide();
self.cb3.hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.cb2.addItems(self.keras_losses_list);
self.cb2.show();
self.cb1.hide();
self.cb3.hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.cb3.addItems(self.pytorch_losses_list);
self.cb3.show();
self.cb1.hide();
self.cb2.hide();
tmp = [];
self.mx_lo1_l1 = QLabel(self);
self.mx_lo1_l1.setText("1. Scalar Weight: ");
self.mx_lo1_l1.move(20, 100);
tmp.append(self.mx_lo1_l1);
self.mx_lo1_e1 = QLineEdit(self)
self.mx_lo1_e1.move(150, 100);
self.mx_lo1_e1.setText("1.0");
tmp.append(self.mx_lo1_e1);
self.mx_lo1_l2 = QLabel(self);
self.mx_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo1_l2.move(20, 150);
tmp.append(self.mx_lo1_l2);
self.mx_lo1_e2 = QLineEdit(self)
self.mx_lo1_e2.move(290, 150);
self.mx_lo1_e2.setText("0");
tmp.append(self.mx_lo1_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo2_l1 = QLabel(self);
self.mx_lo2_l1.setText("1. Scalar Weight: ");
self.mx_lo2_l1.move(20, 100);
tmp.append(self.mx_lo2_l1);
self.mx_lo2_e1 = QLineEdit(self)
self.mx_lo2_e1.move(150, 100);
self.mx_lo2_e1.setText("1.0");
tmp.append(self.mx_lo2_e1);
self.mx_lo2_l2 = QLabel(self);
self.mx_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo2_l2.move(20, 150);
tmp.append(self.mx_lo2_l2);
self.mx_lo2_e2 = QLineEdit(self)
self.mx_lo2_e2.move(290, 150);
self.mx_lo2_e2.setText("0");
tmp.append(self.mx_lo2_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo3_l1 = QLabel(self);
self.mx_lo3_l1.setText("1. Scalar Weight: ");
self.mx_lo3_l1.move(20, 100);
tmp.append(self.mx_lo3_l1);
self.mx_lo3_e1 = QLineEdit(self)
self.mx_lo3_e1.move(150, 100);
self.mx_lo3_e1.setText("1.0");
tmp.append(self.mx_lo3_e1);
self.mx_lo3_l2 = QLabel(self);
self.mx_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo3_l2.move(20, 150);
tmp.append(self.mx_lo3_l2);
self.mx_lo3_e2 = QLineEdit(self)
self.mx_lo3_e2.move(290, 150);
self.mx_lo3_e2.setText("0");
tmp.append(self.mx_lo3_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo4_l1 = QLabel(self);
self.mx_lo4_l1.setText("1. Scalar Weight: ");
self.mx_lo4_l1.move(20, 100);
tmp.append(self.mx_lo4_l1);
self.mx_lo4_e1 = QLineEdit(self)
self.mx_lo4_e1.move(150, 100);
self.mx_lo4_e1.setText("1.0");
tmp.append(self.mx_lo4_e1);
self.mx_lo4_l2 = QLabel(self);
self.mx_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo4_l2.move(20, 150);
tmp.append(self.mx_lo4_l2);
self.mx_lo4_e2 = QLineEdit(self)
self.mx_lo4_e2.move(290, 150);
self.mx_lo4_e2.setText("0");
tmp.append(self.mx_lo4_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo5_l1 = QLabel(self);
self.mx_lo5_l1.setText("1. Scalar Weight: ");
self.mx_lo5_l1.move(20, 100);
tmp.append(self.mx_lo5_l1);
self.mx_lo5_e1 = QLineEdit(self)
self.mx_lo5_e1.move(150, 100);
self.mx_lo5_e1.setText("1.0");
tmp.append(self.mx_lo5_e1);
self.mx_lo5_l2 = QLabel(self);
self.mx_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo5_l2.move(20, 150);
tmp.append(self.mx_lo5_l2);
self.mx_lo5_e2 = QLineEdit(self)
self.mx_lo5_e2.move(290, 150);
self.mx_lo5_e2.setText("0");
tmp.append(self.mx_lo5_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo6_l1 = QLabel(self);
self.mx_lo6_l1.setText("1. Scalar Weight: ");
self.mx_lo6_l1.move(20, 100);
tmp.append(self.mx_lo6_l1);
self.mx_lo6_e1 = QLineEdit(self)
self.mx_lo6_e1.move(150, 100);
self.mx_lo6_e1.setText("1.0");
tmp.append(self.mx_lo6_e1);
self.mx_lo6_l2 = QLabel(self);
self.mx_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo6_l2.move(20, 150);
tmp.append(self.mx_lo6_l2);
self.mx_lo6_e2 = QLineEdit(self)
self.mx_lo6_e2.move(290, 150);
self.mx_lo6_e2.setText("0");
tmp.append(self.mx_lo6_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo7_l1 = QLabel(self);
self.mx_lo7_l1.setText("1. Scalar Weight: ");
self.mx_lo7_l1.move(20, 100);
tmp.append(self.mx_lo7_l1);
self.mx_lo7_e1 = QLineEdit(self)
self.mx_lo7_e1.move(150, 100);
self.mx_lo7_e1.setText("1.0");
tmp.append(self.mx_lo7_e1);
self.mx_lo7_l2 = QLabel(self);
self.mx_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo7_l2.move(20, 150);
tmp.append(self.mx_lo7_l2);
self.mx_lo7_e2 = QLineEdit(self)
self.mx_lo7_e2.move(290, 150);
self.mx_lo7_e2.setText("0");
tmp.append(self.mx_lo7_e2);
self.mx_lo7_l3 = QLabel(self);
self.mx_lo7_l3.setText("3. Input has log pre-applied: ");
self.mx_lo7_l3.move(20, 200);
tmp.append(self.mx_lo7_l3);
self.mx_lo7_cb3 = QComboBox(self);
self.mx_lo7_cb3.move(290, 200);
self.mx_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo7_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo8_l1 = QLabel(self);
self.mx_lo8_l1.setText("1. Scalar Weight: ");
self.mx_lo8_l1.move(20, 100);
tmp.append(self.mx_lo8_l1);
self.mx_lo8_e1 = QLineEdit(self)
self.mx_lo8_e1.move(150, 100);
self.mx_lo8_e1.setText("1.0");
tmp.append(self.mx_lo8_e1);
self.mx_lo8_l2 = QLabel(self);
self.mx_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo8_l2.move(20, 150);
tmp.append(self.mx_lo8_l2);
self.mx_lo8_e2 = QLineEdit(self)
self.mx_lo8_e2.move(290, 150);
self.mx_lo8_e2.setText("0");
tmp.append(self.mx_lo8_e2);
self.mx_lo8_l3 = QLabel(self);
self.mx_lo8_l3.setText("3. Input has log pre-applied: ");
self.mx_lo8_l3.move(20, 200);
tmp.append(self.mx_lo8_l3);
self.mx_lo8_cb3 = QComboBox(self);
self.mx_lo8_cb3.move(290, 200);
self.mx_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo8_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo9_l1 = QLabel(self);
self.mx_lo9_l1.setText("1. Scalar Weight: ");
self.mx_lo9_l1.move(20, 100);
tmp.append(self.mx_lo9_l1);
self.mx_lo9_e1 = QLineEdit(self)
self.mx_lo9_e1.move(150, 100);
self.mx_lo9_e1.setText("1.0");
tmp.append(self.mx_lo9_e1);
self.mx_lo9_l2 = QLabel(self);
self.mx_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo9_l2.move(20, 150);
tmp.append(self.mx_lo9_l2);
self.mx_lo9_e2 = QLineEdit(self)
self.mx_lo9_e2.move(290, 150);
self.mx_lo9_e2.setText("0");
tmp.append(self.mx_lo9_e2);
self.mx_lo9_l3 = QLabel(self);
self.mx_lo9_l3.setText("3. Threshold for mean estimator: ");
self.mx_lo9_l3.move(20, 200);
tmp.append(self.mx_lo9_l3);
self.mx_lo9_e3 = QLineEdit(self)
self.mx_lo9_e3.move(290, 200);
self.mx_lo9_e3.setText("1.0");
tmp.append(self.mx_lo9_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo10_l1 = QLabel(self);
self.mx_lo10_l1.setText("1. Scalar Weight: ");
self.mx_lo10_l1.move(20, 100);
tmp.append(self.mx_lo10_l1);
self.mx_lo10_e1 = QLineEdit(self)
self.mx_lo10_e1.move(150, 100);
self.mx_lo10_e1.setText("1.0");
tmp.append(self.mx_lo10_e1);
self.mx_lo10_l2 = QLabel(self);
self.mx_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo10_l2.move(20, 150);
tmp.append(self.mx_lo10_l2);
self.mx_lo10_e2 = QLineEdit(self)
self.mx_lo10_e2.move(290, 150);
self.mx_lo10_e2.setText("0");
tmp.append(self.mx_lo10_e2);
self.mx_lo10_l3 = QLabel(self);
self.mx_lo10_l3.setText("3. Margin: ");
self.mx_lo10_l3.move(20, 200);
tmp.append(self.mx_lo10_l3);
self.mx_lo10_e3 = QLineEdit(self)
self.mx_lo10_e3.move(150, 200);
self.mx_lo10_e3.setText("1.0");
tmp.append(self.mx_lo10_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo11_l1 = QLabel(self);
self.mx_lo11_l1.setText("1. Scalar Weight: ");
self.mx_lo11_l1.move(20, 100);
tmp.append(self.mx_lo11_l1);
self.mx_lo11_e1 = QLineEdit(self)
self.mx_lo11_e1.move(150, 100);
self.mx_lo11_e1.setText("1.0");
tmp.append(self.mx_lo11_e1);
self.mx_lo11_l2 = QLabel(self);
self.mx_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo11_l2.move(20, 150);
tmp.append(self.mx_lo11_l2);
self.mx_lo11_e2 = QLineEdit(self)
self.mx_lo11_e2.move(290, 150);
self.mx_lo11_e2.setText("0");
tmp.append(self.mx_lo11_e2);
self.mx_lo11_l3 = QLabel(self);
self.mx_lo11_l3.setText("3. Margin: ");
self.mx_lo11_l3.move(20, 200);
tmp.append(self.mx_lo11_l3);
self.mx_lo11_e3 = QLineEdit(self)
self.mx_lo11_e3.move(150, 200);
self.mx_lo11_e3.setText("1.0");
tmp.append(self.mx_lo11_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.ke_lo1_l1 = QLabel(self);
self.ke_lo1_l1.setText("1. Scalar Weight: ");
self.ke_lo1_l1.move(20, 100);
tmp.append(self.ke_lo1_l1);
self.ke_lo1_e1 = QLineEdit(self)
self.ke_lo1_e1.move(150, 100);
self.ke_lo1_e1.setText("1.0");
tmp.append(self.ke_lo1_e1);
self.ke_lo1_l2 = QLabel(self);
self.ke_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo1_l2.move(20, 150);
tmp.append(self.ke_lo1_l2);
self.ke_lo1_e2 = QLineEdit(self)
self.ke_lo1_e2.move(290, 150);
self.ke_lo1_e2.setText("0");
tmp.append(self.ke_lo1_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo2_l1 = QLabel(self);
self.ke_lo2_l1.setText("1. Scalar Weight: ");
self.ke_lo2_l1.move(20, 100);
tmp.append(self.ke_lo2_l1);
self.ke_lo2_e1 = QLineEdit(self)
self.ke_lo2_e1.move(150, 100);
self.ke_lo2_e1.setText("1.0");
tmp.append(self.ke_lo2_e1);
self.ke_lo2_l2 = QLabel(self);
self.ke_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo2_l2.move(20, 150);
tmp.append(self.ke_lo2_l2);
self.ke_lo2_e2 = QLineEdit(self)
self.ke_lo2_e2.move(290, 150);
self.ke_lo2_e2.setText("0");
tmp.append(self.ke_lo2_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo3_l1 = QLabel(self);
self.ke_lo3_l1.setText("1. Scalar Weight: ");
self.ke_lo3_l1.move(20, 100);
tmp.append(self.ke_lo3_l1);
self.ke_lo3_e1 = QLineEdit(self)
self.ke_lo3_e1.move(150, 100);
self.ke_lo3_e1.setText("1.0");
tmp.append(self.ke_lo3_e1);
self.ke_lo3_l2 = QLabel(self);
self.ke_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo3_l2.move(20, 150);
tmp.append(self.ke_lo3_l2);
self.ke_lo3_e2 = QLineEdit(self)
self.ke_lo3_e2.move(290, 150);
self.ke_lo3_e2.setText("0");
tmp.append(self.ke_lo3_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo4_l1 = QLabel(self);
self.ke_lo4_l1.setText("1. Scalar Weight: ");
self.ke_lo4_l1.move(20, 100);
tmp.append(self.ke_lo4_l1);
self.ke_lo4_e1 = QLineEdit(self)
self.ke_lo4_e1.move(150, 100);
self.ke_lo4_e1.setText("1.0");
tmp.append(self.ke_lo4_e1);
self.ke_lo4_l2 = QLabel(self);
self.ke_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo4_l2.move(20, 150);
tmp.append(self.ke_lo4_l2);
self.ke_lo4_e2 = QLineEdit(self)
self.ke_lo4_e2.move(290, 150);
self.ke_lo4_e2.setText("0");
tmp.append(self.ke_lo4_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo5_l1 = QLabel(self);
self.ke_lo5_l1.setText("1. Scalar Weight: ");
self.ke_lo5_l1.move(20, 100);
tmp.append(self.ke_lo5_l1);
self.ke_lo5_e1 = QLineEdit(self)
self.ke_lo5_e1.move(150, 100);
self.ke_lo5_e1.setText("1.0");
tmp.append(self.ke_lo5_e1);
self.ke_lo5_l2 = QLabel(self);
self.ke_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo5_l2.move(20, 150);
tmp.append(self.ke_lo5_l2);
self.ke_lo5_e2 = QLineEdit(self)
self.ke_lo5_e2.move(290, 150);
self.ke_lo5_e2.setText("0");
tmp.append(self.ke_lo5_e2);
self.ke_lo5_l3 = QLabel(self);
self.ke_lo5_l3.setText("3. Input has log pre-applied: ");
self.ke_lo5_l3.move(20, 200);
tmp.append(self.ke_lo5_l3);
self.ke_lo5_cb3 = QComboBox(self);
self.ke_lo5_cb3.move(290, 200);
self.ke_lo5_cb3.addItems(["No", "Yes"]);
tmp.append(self.ke_lo5_cb3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo6_l1 = QLabel(self);
self.ke_lo6_l1.setText("1. Scalar Weight: ");
self.ke_lo6_l1.move(20, 100);
tmp.append(self.ke_lo6_l1);
self.ke_lo6_e1 = QLineEdit(self)
self.ke_lo6_e1.move(150, 100);
self.ke_lo6_e1.setText("1.0");
tmp.append(self.ke_lo6_e1);
self.ke_lo6_l2 = QLabel(self);
self.ke_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo6_l2.move(20, 150);
tmp.append(self.ke_lo6_l2);
self.ke_lo6_e2 = QLineEdit(self)
self.ke_lo6_e2.move(290, 150);
self.ke_lo6_e2.setText("0");
tmp.append(self.ke_lo6_e2);
self.ke_lo6_l3 = QLabel(self);
self.ke_lo6_l3.setText("3. Margin: ");
self.ke_lo6_l3.move(20, 200);
tmp.append(self.ke_lo6_l3);
self.ke_lo6_e3 = QLineEdit(self)
self.ke_lo6_e3.move(150, 200);
self.ke_lo6_e3.setText("1.0");
tmp.append(self.ke_lo6_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo7_l1 = QLabel(self);
self.ke_lo7_l1.setText("1. Scalar Weight: ");
self.ke_lo7_l1.move(20, 100);
tmp.append(self.ke_lo7_l1);
self.ke_lo7_e1 = QLineEdit(self)
self.ke_lo7_e1.move(150, 100);
self.ke_lo7_e1.setText("1.0");
tmp.append(self.ke_lo7_e1);
self.ke_lo7_l2 = QLabel(self);
self.ke_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo7_l2.move(20, 150);
tmp.append(self.ke_lo7_l2);
self.ke_lo7_e2 = QLineEdit(self)
self.ke_lo7_e2.move(290, 150);
self.ke_lo7_e2.setText("0");
tmp.append(self.ke_lo7_e2);
self.ke_lo7_l3 = QLabel(self);
self.ke_lo7_l3.setText("3. Margin: ");
self.ke_lo7_l3.move(20, 200);
tmp.append(self.ke_lo7_l3);
self.ke_lo7_e3 = QLineEdit(self)
self.ke_lo7_e3.move(150, 200);
self.ke_lo7_e3.setText("1.0");
tmp.append(self.ke_lo7_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.py_lo1_l1 = QLabel(self);
self.py_lo1_l1.setText("1. Scalar Weight: ");
self.py_lo1_l1.move(20, 100);
tmp.append(self.py_lo1_l1);
self.py_lo1_e1 = QLineEdit(self)
self.py_lo1_e1.move(150, 100);
self.py_lo1_e1.setText("1.0");
tmp.append(self.py_lo1_e1);
self.py_lo1_l2 = QLabel(self);
self.py_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo1_l2.move(20, 150);
tmp.append(self.py_lo1_l2);
self.py_lo1_e2 = QLineEdit(self)
self.py_lo1_e2.move(290, 150);
self.py_lo1_e2.setText("0");
tmp.append(self.py_lo1_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo2_l1 = QLabel(self);
self.py_lo2_l1.setText("1. Scalar Weight: ");
self.py_lo2_l1.move(20, 100);
tmp.append(self.py_lo2_l1);
self.py_lo2_e1 = QLineEdit(self)
self.py_lo2_e1.move(150, 100);
self.py_lo2_e1.setText("1.0");
tmp.append(self.py_lo2_e1);
self.py_lo2_l2 = QLabel(self);
self.py_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo2_l2.move(20, 150);
tmp.append(self.py_lo2_l2);
self.py_lo2_e2 = QLineEdit(self)
self.py_lo2_e2.move(290, 150);
self.py_lo2_e2.setText("0");
tmp.append(self.py_lo2_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo3_l1 = QLabel(self);
self.py_lo3_l1.setText("1. Scalar Weight: ");
self.py_lo3_l1.move(20, 100);
tmp.append(self.py_lo3_l1);
self.py_lo3_e1 = QLineEdit(self)
self.py_lo3_e1.move(150, 100);
self.py_lo3_e1.setText("1.0");
tmp.append(self.py_lo3_e1);
self.py_lo3_l2 = QLabel(self);
self.py_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo3_l2.move(20, 150);
tmp.append(self.py_lo3_l2);
self.py_lo3_e2 = QLineEdit(self)
self.py_lo3_e2.move(290, 150);
self.py_lo3_e2.setText("0");
tmp.append(self.py_lo3_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo4_l1 = QLabel(self);
self.py_lo4_l1.setText("1. Scalar Weight: ");
self.py_lo4_l1.move(20, 100);
tmp.append(self.py_lo4_l1);
self.py_lo4_e1 = QLineEdit(self)
self.py_lo4_e1.move(150, 100);
self.py_lo4_e1.setText("1.0");
tmp.append(self.py_lo4_e1);
self.py_lo4_l2 = QLabel(self);
self.py_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo4_l2.move(20, 150);
tmp.append(self.py_lo4_l2);
self.py_lo4_e2 = QLineEdit(self)
self.py_lo4_e2.move(290, 150);
self.py_lo4_e2.setText("0");
tmp.append(self.py_lo4_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo5_l1 = QLabel(self);
self.py_lo5_l1.setText("1. Scalar Weight: ");
self.py_lo5_l1.move(20, 100);
tmp.append(self.py_lo5_l1);
self.py_lo5_e1 = QLineEdit(self)
self.py_lo5_e1.move(150, 100);
self.py_lo5_e1.setText("1.0");
tmp.append(self.py_lo5_e1);
self.py_lo5_l2 = QLabel(self);
self.py_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo5_l2.move(20, 150);
tmp.append(self.py_lo5_l2);
self.py_lo5_e2 = QLineEdit(self)
self.py_lo5_e2.move(290, 150);
self.py_lo5_e2.setText("0");
tmp.append(self.py_lo5_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo6_l1 = QLabel(self);
self.py_lo6_l1.setText("1. Scalar Weight: ");
self.py_lo6_l1.move(20, 100);
tmp.append(self.py_lo6_l1);
self.py_lo6_e1 = QLineEdit(self)
self.py_lo6_e1.move(150, 100);
self.py_lo6_e1.setText("1.0");
tmp.append(self.py_lo6_e1);
self.py_lo6_l2 = QLabel(self);
self.py_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo6_l2.move(20, 150);
tmp.append(self.py_lo6_l2);
self.py_lo6_e2 = QLineEdit(self)
self.py_lo6_e2.move(290, 150);
self.py_lo6_e2.setText("0");
tmp.append(self.py_lo6_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo7_l1 = QLabel(self);
self.py_lo7_l1.setText("1. Scalar Weight: ");
self.py_lo7_l1.move(20, 100);
tmp.append(self.py_lo7_l1);
self.py_lo7_e1 = QLineEdit(self)
self.py_lo7_e1.move(150, 100);
self.py_lo7_e1.setText("1.0");
tmp.append(self.py_lo7_e1);
self.py_lo7_l2 = QLabel(self);
self.py_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo7_l2.move(20, 150);
tmp.append(self.py_lo7_l2);
self.py_lo7_e2 = QLineEdit(self)
self.py_lo7_e2.move(290, 150);
self.py_lo7_e2.setText("0");
tmp.append(self.py_lo7_e2);
self.py_lo7_l3 = QLabel(self);
self.py_lo7_l3.setText("3. Input has log pre-applied: ");
self.py_lo7_l3.move(20, 200);
tmp.append(self.py_lo7_l3);
self.py_lo7_cb3 = QComboBox(self);
self.py_lo7_cb3.move(290, 200);
self.py_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo7_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo8_l1 = QLabel(self);
self.py_lo8_l1.setText("1. Scalar Weight: ");
self.py_lo8_l1.move(20, 100);
tmp.append(self.py_lo8_l1);
self.py_lo8_e1 = QLineEdit(self)
self.py_lo8_e1.move(150, 100);
self.py_lo8_e1.setText("1.0");
tmp.append(self.py_lo8_e1);
self.py_lo8_l2 = QLabel(self);
self.py_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo8_l2.move(20, 150);
tmp.append(self.py_lo8_l2);
self.py_lo8_e2 = QLineEdit(self)
self.py_lo8_e2.move(290, 150);
self.py_lo8_e2.setText("0");
tmp.append(self.py_lo8_e2);
self.py_lo8_l3 = QLabel(self);
self.py_lo8_l3.setText("3. Input has log pre-applied: ");
self.py_lo8_l3.move(20, 200);
tmp.append(self.py_lo8_l3);
self.py_lo8_cb3 = QComboBox(self);
self.py_lo8_cb3.move(290, 200);
self.py_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo8_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo9_l1 = QLabel(self);
self.py_lo9_l1.setText("1. Scalar Weight: ");
self.py_lo9_l1.move(20, 100);
tmp.append(self.py_lo9_l1);
self.py_lo9_e1 = QLineEdit(self)
self.py_lo9_e1.move(150, 100);
self.py_lo9_e1.setText("1.0");
tmp.append(self.py_lo9_e1);
self.py_lo9_l2 = QLabel(self);
self.py_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo9_l2.move(20, 150);
tmp.append(self.py_lo9_l2);
self.py_lo9_e2 = QLineEdit(self)
self.py_lo9_e2.move(290, 150);
self.py_lo9_e2.setText("0");
tmp.append(self.py_lo9_e2);
self.py_lo9_l3 = QLabel(self);
self.py_lo9_l3.setText("3. Threshold for mean estimator: ");
self.py_lo9_l3.move(20, 200);
tmp.append(self.py_lo9_l3);
self.py_lo9_e3 = QLineEdit(self)
self.py_lo9_e3.move(290, 200);
self.py_lo9_e3.setText("1.0");
tmp.append(self.py_lo9_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo10_l1 = QLabel(self);
self.py_lo10_l1.setText("1. Scalar Weight: ");
self.py_lo10_l1.move(20, 100);
tmp.append(self.py_lo10_l1);
self.py_lo10_e1 = QLineEdit(self)
self.py_lo10_e1.move(150, 100);
self.py_lo10_e1.setText("1.0");
tmp.append(self.py_lo10_e1);
self.py_lo10_l2 = QLabel(self);
self.py_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo10_l2.move(20, 150);
tmp.append(self.py_lo10_l2);
self.py_lo10_e2 = QLineEdit(self)
self.py_lo10_e2.move(290, 150);
self.py_lo10_e2.setText("0");
tmp.append(self.py_lo10_e2);
self.py_lo10_l3 = QLabel(self);
self.py_lo10_l3.setText("3. Margin: ");
self.py_lo10_l3.move(20, 200);
tmp.append(self.py_lo10_l3);
self.py_lo10_e3 = QLineEdit(self)
self.py_lo10_e3.move(150, 200);
self.py_lo10_e3.setText("1.0");
tmp.append(self.py_lo10_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo11_l1 = QLabel(self);
self.py_lo11_l1.setText("1. Scalar Weight: ");
self.py_lo11_l1.move(20, 100);
tmp.append(self.py_lo11_l1);
self.py_lo11_e1 = QLineEdit(self)
self.py_lo11_e1.move(150, 100);
self.py_lo11_e1.setText("1.0");
tmp.append(self.py_lo11_e1);
self.py_lo11_l2 = QLabel(self);
self.py_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo11_l2.move(20, 150);
tmp.append(self.py_lo11_l2);
self.py_lo11_e2 = QLineEdit(self)
self.py_lo11_e2.move(290, 150);
self.py_lo11_e2.setText("0");
tmp.append(self.py_lo11_e2);
self.py_lo11_l3 = QLabel(self);
self.py_lo11_l3.setText("3. Margin: ");
self.py_lo11_l3.move(20, 200);
tmp.append(self.py_lo11_l3);
self.py_lo11_e3 = QLineEdit(self)
self.py_lo11_e3.move(150, 200);
self.py_lo11_e3.setText("1.0");
tmp.append(self.py_lo11_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo12_l1 = QLabel(self);
self.py_lo12_l1.setText("1. Scalar Weight: ");
self.py_lo12_l1.move(20, 100);
tmp.append(self.py_lo12_l1);
self.py_lo12_e1 = QLineEdit(self)
self.py_lo12_e1.move(150, 100);
self.py_lo12_e1.setText("1.0");
tmp.append(self.py_lo12_e1);
self.py_lo12_l2 = QLabel(self);
self.py_lo12_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo12_l2.move(20, 150);
tmp.append(self.py_lo12_l2);
self.py_lo12_e2 = QLineEdit(self)
self.py_lo12_e2.move(290, 150);
self.py_lo12_e2.setText("0");
tmp.append(self.py_lo12_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo13_l1 = QLabel(self);
self.py_lo13_l1.setText("1. Scalar Weight: ");
self.py_lo13_l1.move(20, 100);
tmp.append(self.py_lo13_l1);
self.py_lo13_e1 = QLineEdit(self)
self.py_lo13_e1.move(150, 100);
self.py_lo13_e1.setText("1.0");
tmp.append(self.py_lo13_e1);
self.py_lo13_l2 = QLabel(self);
self.py_lo13_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo13_l2.move(20, 150);
tmp.append(self.py_lo13_l2);
self.py_lo13_e2 = QLineEdit(self)
self.py_lo13_e2.move(290, 150);
self.py_lo13_e2.setText("0");
tmp.append(self.py_lo13_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo14_l1 = QLabel(self);
self.py_lo14_l1.setText("1. Scalar Weight: ");
self.py_lo14_l1.move(20, 100);
tmp.append(self.py_lo14_l1);
self.py_lo14_e1 = QLineEdit(self)
self.py_lo14_e1.move(150, 100);
self.py_lo14_e1.setText("1.0");
tmp.append(self.py_lo14_e1);
self.py_lo14_l2 = QLabel(self);
self.py_lo14_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo14_l2.move(20, 150);
tmp.append(self.py_lo14_l2);
self.py_lo14_e2 = QLineEdit(self)
self.py_lo14_e2.move(290, 150);
self.py_lo14_e2.setText("0");
tmp.append(self.py_lo14_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo15_l1 = QLabel(self);
self.py_lo15_l1.setText("1. Scalar Weight: ");
self.py_lo15_l1.move(20, 100);
tmp.append(self.py_lo15_l1);
self.py_lo15_e1 = QLineEdit(self)
self.py_lo15_e1.move(150, 100);
self.py_lo15_e1.setText("1.0");
tmp.append(self.py_lo15_e1);
self.py_lo15_l2 = QLabel(self);
self.py_lo15_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo15_l2.move(20, 150);
tmp.append(self.py_lo15_l2);
self.py_lo15_e2 = QLineEdit(self)
self.py_lo15_e2.move(290, 150);
self.py_lo15_e2.setText("0");
tmp.append(self.py_lo15_e2);
self.loss_ui_pytorch.append(tmp)
self.select_loss();
self.tb1 = QTextEdit(self)
self.tb1.move(550, 20)
self.tb1.resize(300, 500)
if(self.system["update"]["losses"]["active"]):
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
else:
self.tb1.setText("Using Default loss.")
self.b4 = QPushButton('Select loss', self)
self.b4.move(400,400)
self.b4.clicked.connect(self.add_loss)
self.b6 = QPushButton('Clear ', self)
self.b6.move(400,500)
self.b6.clicked.connect(self.clear_loss)
def select_loss(self):
self.current_loss = {};
self.current_loss["name"] = "";
self.current_loss["params"] = {};
if(self.system["backend"] == "Mxnet-1.5.1"):
self.current_loss["name"] = self.cb1.currentText();
index = self.mxnet_losses_list.index(self.cb1.currentText());
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
if((index-1)==i):
self.loss_ui_mxnet[i][j].show();
else:
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.current_loss["name"] = self.cb2.currentText();
index = self.keras_losses_list.index(self.cb2.currentText());
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
if((index-1)==i):
self.loss_ui_keras[i][j].show();
else:
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.current_loss["name"] = self.cb3.currentText();
index = self.pytorch_losses_list.index(self.cb3.currentText());
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
if((index-1)==i):
self.loss_ui_pytorch[i][j].show();
else:
self.loss_ui_pytorch[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
def add_loss(self):
self.system["update"]["losses"]["active"] = True;
if(self.system["backend"] == "Mxnet-1.5.1"):
if(self.current_loss["name"] == self.mxnet_losses_list[1]):
self.current_loss["params"]["weight"] = self.mx_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[2]):
self.current_loss["params"]["weight"] = self.mx_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[3]):
self.current_loss["params"]["weight"] = self.mx_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[4]):
self.current_loss["params"]["weight"] = self.mx_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[5]):
self.current_loss["params"]["weight"] = self.mx_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[6]):
self.current_loss["params"]["weight"] = self.mx_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[7]):
self.current_loss["params"]["weight"] = self.mx_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[8]):
self.current_loss["params"]["weight"] = self.mx_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[9]):
self.current_loss["params"]["weight"] = self.mx_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.mx_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[10]):
self.current_loss["params"]["weight"] = self.mx_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo10_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[11]):
self.current_loss["params"]["weight"] = self.mx_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo11_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
if(self.current_loss["name"] == self.keras_losses_list[1]):
self.current_loss["params"]["weight"] = self.ke_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[2]):
self.current_loss["params"]["weight"] = self.ke_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[3]):
self.current_loss["params"]["weight"] = self.ke_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[4]):
self.current_loss["params"]["weight"] = self.ke_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[5]):
self.current_loss["params"]["weight"] = self.ke_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo5_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.ke_lo5_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[6]):
self.current_loss["params"]["weight"] = self.ke_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo6_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo6_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[7]):
self.current_loss["params"]["weight"] = self.ke_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo7_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo7_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Pytorch-1.3.1"):
if(self.current_loss["name"] == self.pytorch_losses_list[1]):
self.current_loss["params"]["weight"] = self.py_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[2]):
self.current_loss["params"]["weight"] = self.py_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[3]):
self.current_loss["params"]["weight"] = self.py_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[4]):
self.current_loss["params"]["weight"] = self.py_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[5]):
self.current_loss["params"]["weight"] = self.py_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[6]):
self.current_loss["params"]["weight"] = self.py_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[7]):
self.current_loss["params"]["weight"] = self.py_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[8]):
self.current_loss["params"]["weight"] = self.py_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[9]):
self.current_loss["params"]["weight"] = self.py_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.py_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[10]):
self.current_loss["params"]["weight"] = self.py_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo10_e2.text();
self.current_loss["params"]["margin"] = self.py_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[11]):
self.current_loss["params"]["weight"] = self.py_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo11_e2.text();
self.current_loss["params"]["margin"] = self.py_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[12]):
self.current_loss["params"]["weight"] = self.py_lo12_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo12_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[13]):
self.current_loss["params"]["weight"] = self.py_lo13_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo13_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[14]):
self.current_loss["params"]["weight"] = self.py_lo14_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo14_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[15]):
self.current_loss["params"]["weight"] = self.py_lo15_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo15_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
def clear_loss(self):
self.system["update"]["losses"]["value"] = "";
self.system["update"]["losses"]["active"] = False;
wr = "";
self.tb1.setText(wr);
def forward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.forward_train.emit();
def backward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_scheduler_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowClassificationTrainUpdateLossParam()
screen.show()
sys.exit(app.exec_())
'''
|
classification/training/update/WindowClassificationTrainUpdateLossParam.py
|
import os
import sys
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowClassificationTrainUpdateLossParam(QtWidgets.QWidget):
forward_train = QtCore.pyqtSignal();
backward_scheduler_param = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.cfg_setup()
self.title = 'Experiment {} - Update Loss Params'.format(self.system["experiment"])
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.loss_ui_mxnet = [];
self.loss_ui_keras = [];
self.loss_ui_pytorch = [];
self.current_loss_ = {};
self.current_loss_["name"] = "";
self.current_loss_["params"] = {};
self.initUI()
def cfg_setup(self):
with open('base_classification.json') as json_file:
self.system = json.load(json_file)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(600,550)
self.b1.clicked.connect(self.backward)
# Forward
self.b2 = QPushButton('Next', self)
self.b2.move(700,550)
self.b2.clicked.connect(self.forward)
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(800,550)
self.b3.clicked.connect(self.close)
self.cb1 = QComboBox(self);
self.cb1.move(20, 20);
self.cb1.activated.connect(self.select_loss);
self.cb2 = QComboBox(self);
self.cb2.move(20, 20);
self.cb2.activated.connect(self.select_loss);
self.cb3 = QComboBox(self);
self.cb3.move(20, 20);
self.cb3.activated.connect(self.select_loss);
self.mxnet_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge"];
self.keras_losses_list = ["select", "loss_l1", "loss_l2", "loss_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_hinge", "loss_squared_hinge"];
self.pytorch_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge", "loss_multimargin", "loss_squared_multimargin",
"loss_multilabel_margin", "loss_multilabel_softmargin"];
if(self.system["backend"] == "Mxnet-1.5.1"):
self.cb1.addItems(self.mxnet_losses_list);
self.cb1.show();
self.cb2.hide();
self.cb3.hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.cb2.addItems(self.keras_losses_list);
self.cb2.show();
self.cb1.hide();
self.cb3.hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.cb3.addItems(self.pytorch_losses_list);
self.cb3.show();
self.cb1.hide();
self.cb2.hide();
tmp = [];
self.mx_lo1_l1 = QLabel(self);
self.mx_lo1_l1.setText("1. Scalar Weight: ");
self.mx_lo1_l1.move(20, 100);
tmp.append(self.mx_lo1_l1);
self.mx_lo1_e1 = QLineEdit(self)
self.mx_lo1_e1.move(150, 100);
self.mx_lo1_e1.setText("1.0");
tmp.append(self.mx_lo1_e1);
self.mx_lo1_l2 = QLabel(self);
self.mx_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo1_l2.move(20, 150);
tmp.append(self.mx_lo1_l2);
self.mx_lo1_e2 = QLineEdit(self)
self.mx_lo1_e2.move(290, 150);
self.mx_lo1_e2.setText("0");
tmp.append(self.mx_lo1_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo2_l1 = QLabel(self);
self.mx_lo2_l1.setText("1. Scalar Weight: ");
self.mx_lo2_l1.move(20, 100);
tmp.append(self.mx_lo2_l1);
self.mx_lo2_e1 = QLineEdit(self)
self.mx_lo2_e1.move(150, 100);
self.mx_lo2_e1.setText("1.0");
tmp.append(self.mx_lo2_e1);
self.mx_lo2_l2 = QLabel(self);
self.mx_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo2_l2.move(20, 150);
tmp.append(self.mx_lo2_l2);
self.mx_lo2_e2 = QLineEdit(self)
self.mx_lo2_e2.move(290, 150);
self.mx_lo2_e2.setText("0");
tmp.append(self.mx_lo2_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo3_l1 = QLabel(self);
self.mx_lo3_l1.setText("1. Scalar Weight: ");
self.mx_lo3_l1.move(20, 100);
tmp.append(self.mx_lo3_l1);
self.mx_lo3_e1 = QLineEdit(self)
self.mx_lo3_e1.move(150, 100);
self.mx_lo3_e1.setText("1.0");
tmp.append(self.mx_lo3_e1);
self.mx_lo3_l2 = QLabel(self);
self.mx_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo3_l2.move(20, 150);
tmp.append(self.mx_lo3_l2);
self.mx_lo3_e2 = QLineEdit(self)
self.mx_lo3_e2.move(290, 150);
self.mx_lo3_e2.setText("0");
tmp.append(self.mx_lo3_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo4_l1 = QLabel(self);
self.mx_lo4_l1.setText("1. Scalar Weight: ");
self.mx_lo4_l1.move(20, 100);
tmp.append(self.mx_lo4_l1);
self.mx_lo4_e1 = QLineEdit(self)
self.mx_lo4_e1.move(150, 100);
self.mx_lo4_e1.setText("1.0");
tmp.append(self.mx_lo4_e1);
self.mx_lo4_l2 = QLabel(self);
self.mx_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo4_l2.move(20, 150);
tmp.append(self.mx_lo4_l2);
self.mx_lo4_e2 = QLineEdit(self)
self.mx_lo4_e2.move(290, 150);
self.mx_lo4_e2.setText("0");
tmp.append(self.mx_lo4_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo5_l1 = QLabel(self);
self.mx_lo5_l1.setText("1. Scalar Weight: ");
self.mx_lo5_l1.move(20, 100);
tmp.append(self.mx_lo5_l1);
self.mx_lo5_e1 = QLineEdit(self)
self.mx_lo5_e1.move(150, 100);
self.mx_lo5_e1.setText("1.0");
tmp.append(self.mx_lo5_e1);
self.mx_lo5_l2 = QLabel(self);
self.mx_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo5_l2.move(20, 150);
tmp.append(self.mx_lo5_l2);
self.mx_lo5_e2 = QLineEdit(self)
self.mx_lo5_e2.move(290, 150);
self.mx_lo5_e2.setText("0");
tmp.append(self.mx_lo5_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo6_l1 = QLabel(self);
self.mx_lo6_l1.setText("1. Scalar Weight: ");
self.mx_lo6_l1.move(20, 100);
tmp.append(self.mx_lo6_l1);
self.mx_lo6_e1 = QLineEdit(self)
self.mx_lo6_e1.move(150, 100);
self.mx_lo6_e1.setText("1.0");
tmp.append(self.mx_lo6_e1);
self.mx_lo6_l2 = QLabel(self);
self.mx_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo6_l2.move(20, 150);
tmp.append(self.mx_lo6_l2);
self.mx_lo6_e2 = QLineEdit(self)
self.mx_lo6_e2.move(290, 150);
self.mx_lo6_e2.setText("0");
tmp.append(self.mx_lo6_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo7_l1 = QLabel(self);
self.mx_lo7_l1.setText("1. Scalar Weight: ");
self.mx_lo7_l1.move(20, 100);
tmp.append(self.mx_lo7_l1);
self.mx_lo7_e1 = QLineEdit(self)
self.mx_lo7_e1.move(150, 100);
self.mx_lo7_e1.setText("1.0");
tmp.append(self.mx_lo7_e1);
self.mx_lo7_l2 = QLabel(self);
self.mx_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo7_l2.move(20, 150);
tmp.append(self.mx_lo7_l2);
self.mx_lo7_e2 = QLineEdit(self)
self.mx_lo7_e2.move(290, 150);
self.mx_lo7_e2.setText("0");
tmp.append(self.mx_lo7_e2);
self.mx_lo7_l3 = QLabel(self);
self.mx_lo7_l3.setText("3. Input has log pre-applied: ");
self.mx_lo7_l3.move(20, 200);
tmp.append(self.mx_lo7_l3);
self.mx_lo7_cb3 = QComboBox(self);
self.mx_lo7_cb3.move(290, 200);
self.mx_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo7_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo8_l1 = QLabel(self);
self.mx_lo8_l1.setText("1. Scalar Weight: ");
self.mx_lo8_l1.move(20, 100);
tmp.append(self.mx_lo8_l1);
self.mx_lo8_e1 = QLineEdit(self)
self.mx_lo8_e1.move(150, 100);
self.mx_lo8_e1.setText("1.0");
tmp.append(self.mx_lo8_e1);
self.mx_lo8_l2 = QLabel(self);
self.mx_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo8_l2.move(20, 150);
tmp.append(self.mx_lo8_l2);
self.mx_lo8_e2 = QLineEdit(self)
self.mx_lo8_e2.move(290, 150);
self.mx_lo8_e2.setText("0");
tmp.append(self.mx_lo8_e2);
self.mx_lo8_l3 = QLabel(self);
self.mx_lo8_l3.setText("3. Input has log pre-applied: ");
self.mx_lo8_l3.move(20, 200);
tmp.append(self.mx_lo8_l3);
self.mx_lo8_cb3 = QComboBox(self);
self.mx_lo8_cb3.move(290, 200);
self.mx_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo8_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo9_l1 = QLabel(self);
self.mx_lo9_l1.setText("1. Scalar Weight: ");
self.mx_lo9_l1.move(20, 100);
tmp.append(self.mx_lo9_l1);
self.mx_lo9_e1 = QLineEdit(self)
self.mx_lo9_e1.move(150, 100);
self.mx_lo9_e1.setText("1.0");
tmp.append(self.mx_lo9_e1);
self.mx_lo9_l2 = QLabel(self);
self.mx_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo9_l2.move(20, 150);
tmp.append(self.mx_lo9_l2);
self.mx_lo9_e2 = QLineEdit(self)
self.mx_lo9_e2.move(290, 150);
self.mx_lo9_e2.setText("0");
tmp.append(self.mx_lo9_e2);
self.mx_lo9_l3 = QLabel(self);
self.mx_lo9_l3.setText("3. Threshold for mean estimator: ");
self.mx_lo9_l3.move(20, 200);
tmp.append(self.mx_lo9_l3);
self.mx_lo9_e3 = QLineEdit(self)
self.mx_lo9_e3.move(290, 200);
self.mx_lo9_e3.setText("1.0");
tmp.append(self.mx_lo9_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo10_l1 = QLabel(self);
self.mx_lo10_l1.setText("1. Scalar Weight: ");
self.mx_lo10_l1.move(20, 100);
tmp.append(self.mx_lo10_l1);
self.mx_lo10_e1 = QLineEdit(self)
self.mx_lo10_e1.move(150, 100);
self.mx_lo10_e1.setText("1.0");
tmp.append(self.mx_lo10_e1);
self.mx_lo10_l2 = QLabel(self);
self.mx_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo10_l2.move(20, 150);
tmp.append(self.mx_lo10_l2);
self.mx_lo10_e2 = QLineEdit(self)
self.mx_lo10_e2.move(290, 150);
self.mx_lo10_e2.setText("0");
tmp.append(self.mx_lo10_e2);
self.mx_lo10_l3 = QLabel(self);
self.mx_lo10_l3.setText("3. Margin: ");
self.mx_lo10_l3.move(20, 200);
tmp.append(self.mx_lo10_l3);
self.mx_lo10_e3 = QLineEdit(self)
self.mx_lo10_e3.move(150, 200);
self.mx_lo10_e3.setText("1.0");
tmp.append(self.mx_lo10_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo11_l1 = QLabel(self);
self.mx_lo11_l1.setText("1. Scalar Weight: ");
self.mx_lo11_l1.move(20, 100);
tmp.append(self.mx_lo11_l1);
self.mx_lo11_e1 = QLineEdit(self)
self.mx_lo11_e1.move(150, 100);
self.mx_lo11_e1.setText("1.0");
tmp.append(self.mx_lo11_e1);
self.mx_lo11_l2 = QLabel(self);
self.mx_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo11_l2.move(20, 150);
tmp.append(self.mx_lo11_l2);
self.mx_lo11_e2 = QLineEdit(self)
self.mx_lo11_e2.move(290, 150);
self.mx_lo11_e2.setText("0");
tmp.append(self.mx_lo11_e2);
self.mx_lo11_l3 = QLabel(self);
self.mx_lo11_l3.setText("3. Margin: ");
self.mx_lo11_l3.move(20, 200);
tmp.append(self.mx_lo11_l3);
self.mx_lo11_e3 = QLineEdit(self)
self.mx_lo11_e3.move(150, 200);
self.mx_lo11_e3.setText("1.0");
tmp.append(self.mx_lo11_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.ke_lo1_l1 = QLabel(self);
self.ke_lo1_l1.setText("1. Scalar Weight: ");
self.ke_lo1_l1.move(20, 100);
tmp.append(self.ke_lo1_l1);
self.ke_lo1_e1 = QLineEdit(self)
self.ke_lo1_e1.move(150, 100);
self.ke_lo1_e1.setText("1.0");
tmp.append(self.ke_lo1_e1);
self.ke_lo1_l2 = QLabel(self);
self.ke_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo1_l2.move(20, 150);
tmp.append(self.ke_lo1_l2);
self.ke_lo1_e2 = QLineEdit(self)
self.ke_lo1_e2.move(290, 150);
self.ke_lo1_e2.setText("0");
tmp.append(self.ke_lo1_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo2_l1 = QLabel(self);
self.ke_lo2_l1.setText("1. Scalar Weight: ");
self.ke_lo2_l1.move(20, 100);
tmp.append(self.ke_lo2_l1);
self.ke_lo2_e1 = QLineEdit(self)
self.ke_lo2_e1.move(150, 100);
self.ke_lo2_e1.setText("1.0");
tmp.append(self.ke_lo2_e1);
self.ke_lo2_l2 = QLabel(self);
self.ke_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo2_l2.move(20, 150);
tmp.append(self.ke_lo2_l2);
self.ke_lo2_e2 = QLineEdit(self)
self.ke_lo2_e2.move(290, 150);
self.ke_lo2_e2.setText("0");
tmp.append(self.ke_lo2_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo3_l1 = QLabel(self);
self.ke_lo3_l1.setText("1. Scalar Weight: ");
self.ke_lo3_l1.move(20, 100);
tmp.append(self.ke_lo3_l1);
self.ke_lo3_e1 = QLineEdit(self)
self.ke_lo3_e1.move(150, 100);
self.ke_lo3_e1.setText("1.0");
tmp.append(self.ke_lo3_e1);
self.ke_lo3_l2 = QLabel(self);
self.ke_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo3_l2.move(20, 150);
tmp.append(self.ke_lo3_l2);
self.ke_lo3_e2 = QLineEdit(self)
self.ke_lo3_e2.move(290, 150);
self.ke_lo3_e2.setText("0");
tmp.append(self.ke_lo3_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo4_l1 = QLabel(self);
self.ke_lo4_l1.setText("1. Scalar Weight: ");
self.ke_lo4_l1.move(20, 100);
tmp.append(self.ke_lo4_l1);
self.ke_lo4_e1 = QLineEdit(self)
self.ke_lo4_e1.move(150, 100);
self.ke_lo4_e1.setText("1.0");
tmp.append(self.ke_lo4_e1);
self.ke_lo4_l2 = QLabel(self);
self.ke_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo4_l2.move(20, 150);
tmp.append(self.ke_lo4_l2);
self.ke_lo4_e2 = QLineEdit(self)
self.ke_lo4_e2.move(290, 150);
self.ke_lo4_e2.setText("0");
tmp.append(self.ke_lo4_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo5_l1 = QLabel(self);
self.ke_lo5_l1.setText("1. Scalar Weight: ");
self.ke_lo5_l1.move(20, 100);
tmp.append(self.ke_lo5_l1);
self.ke_lo5_e1 = QLineEdit(self)
self.ke_lo5_e1.move(150, 100);
self.ke_lo5_e1.setText("1.0");
tmp.append(self.ke_lo5_e1);
self.ke_lo5_l2 = QLabel(self);
self.ke_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo5_l2.move(20, 150);
tmp.append(self.ke_lo5_l2);
self.ke_lo5_e2 = QLineEdit(self)
self.ke_lo5_e2.move(290, 150);
self.ke_lo5_e2.setText("0");
tmp.append(self.ke_lo5_e2);
self.ke_lo5_l3 = QLabel(self);
self.ke_lo5_l3.setText("3. Input has log pre-applied: ");
self.ke_lo5_l3.move(20, 200);
tmp.append(self.ke_lo5_l3);
self.ke_lo5_cb3 = QComboBox(self);
self.ke_lo5_cb3.move(290, 200);
self.ke_lo5_cb3.addItems(["No", "Yes"]);
tmp.append(self.ke_lo5_cb3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo6_l1 = QLabel(self);
self.ke_lo6_l1.setText("1. Scalar Weight: ");
self.ke_lo6_l1.move(20, 100);
tmp.append(self.ke_lo6_l1);
self.ke_lo6_e1 = QLineEdit(self)
self.ke_lo6_e1.move(150, 100);
self.ke_lo6_e1.setText("1.0");
tmp.append(self.ke_lo6_e1);
self.ke_lo6_l2 = QLabel(self);
self.ke_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo6_l2.move(20, 150);
tmp.append(self.ke_lo6_l2);
self.ke_lo6_e2 = QLineEdit(self)
self.ke_lo6_e2.move(290, 150);
self.ke_lo6_e2.setText("0");
tmp.append(self.ke_lo6_e2);
self.ke_lo6_l3 = QLabel(self);
self.ke_lo6_l3.setText("3. Margin: ");
self.ke_lo6_l3.move(20, 200);
tmp.append(self.ke_lo6_l3);
self.ke_lo6_e3 = QLineEdit(self)
self.ke_lo6_e3.move(150, 200);
self.ke_lo6_e3.setText("1.0");
tmp.append(self.ke_lo6_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo7_l1 = QLabel(self);
self.ke_lo7_l1.setText("1. Scalar Weight: ");
self.ke_lo7_l1.move(20, 100);
tmp.append(self.ke_lo7_l1);
self.ke_lo7_e1 = QLineEdit(self)
self.ke_lo7_e1.move(150, 100);
self.ke_lo7_e1.setText("1.0");
tmp.append(self.ke_lo7_e1);
self.ke_lo7_l2 = QLabel(self);
self.ke_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo7_l2.move(20, 150);
tmp.append(self.ke_lo7_l2);
self.ke_lo7_e2 = QLineEdit(self)
self.ke_lo7_e2.move(290, 150);
self.ke_lo7_e2.setText("0");
tmp.append(self.ke_lo7_e2);
self.ke_lo7_l3 = QLabel(self);
self.ke_lo7_l3.setText("3. Margin: ");
self.ke_lo7_l3.move(20, 200);
tmp.append(self.ke_lo7_l3);
self.ke_lo7_e3 = QLineEdit(self)
self.ke_lo7_e3.move(150, 200);
self.ke_lo7_e3.setText("1.0");
tmp.append(self.ke_lo7_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.py_lo1_l1 = QLabel(self);
self.py_lo1_l1.setText("1. Scalar Weight: ");
self.py_lo1_l1.move(20, 100);
tmp.append(self.py_lo1_l1);
self.py_lo1_e1 = QLineEdit(self)
self.py_lo1_e1.move(150, 100);
self.py_lo1_e1.setText("1.0");
tmp.append(self.py_lo1_e1);
self.py_lo1_l2 = QLabel(self);
self.py_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo1_l2.move(20, 150);
tmp.append(self.py_lo1_l2);
self.py_lo1_e2 = QLineEdit(self)
self.py_lo1_e2.move(290, 150);
self.py_lo1_e2.setText("0");
tmp.append(self.py_lo1_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo2_l1 = QLabel(self);
self.py_lo2_l1.setText("1. Scalar Weight: ");
self.py_lo2_l1.move(20, 100);
tmp.append(self.py_lo2_l1);
self.py_lo2_e1 = QLineEdit(self)
self.py_lo2_e1.move(150, 100);
self.py_lo2_e1.setText("1.0");
tmp.append(self.py_lo2_e1);
self.py_lo2_l2 = QLabel(self);
self.py_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo2_l2.move(20, 150);
tmp.append(self.py_lo2_l2);
self.py_lo2_e2 = QLineEdit(self)
self.py_lo2_e2.move(290, 150);
self.py_lo2_e2.setText("0");
tmp.append(self.py_lo2_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo3_l1 = QLabel(self);
self.py_lo3_l1.setText("1. Scalar Weight: ");
self.py_lo3_l1.move(20, 100);
tmp.append(self.py_lo3_l1);
self.py_lo3_e1 = QLineEdit(self)
self.py_lo3_e1.move(150, 100);
self.py_lo3_e1.setText("1.0");
tmp.append(self.py_lo3_e1);
self.py_lo3_l2 = QLabel(self);
self.py_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo3_l2.move(20, 150);
tmp.append(self.py_lo3_l2);
self.py_lo3_e2 = QLineEdit(self)
self.py_lo3_e2.move(290, 150);
self.py_lo3_e2.setText("0");
tmp.append(self.py_lo3_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo4_l1 = QLabel(self);
self.py_lo4_l1.setText("1. Scalar Weight: ");
self.py_lo4_l1.move(20, 100);
tmp.append(self.py_lo4_l1);
self.py_lo4_e1 = QLineEdit(self)
self.py_lo4_e1.move(150, 100);
self.py_lo4_e1.setText("1.0");
tmp.append(self.py_lo4_e1);
self.py_lo4_l2 = QLabel(self);
self.py_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo4_l2.move(20, 150);
tmp.append(self.py_lo4_l2);
self.py_lo4_e2 = QLineEdit(self)
self.py_lo4_e2.move(290, 150);
self.py_lo4_e2.setText("0");
tmp.append(self.py_lo4_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo5_l1 = QLabel(self);
self.py_lo5_l1.setText("1. Scalar Weight: ");
self.py_lo5_l1.move(20, 100);
tmp.append(self.py_lo5_l1);
self.py_lo5_e1 = QLineEdit(self)
self.py_lo5_e1.move(150, 100);
self.py_lo5_e1.setText("1.0");
tmp.append(self.py_lo5_e1);
self.py_lo5_l2 = QLabel(self);
self.py_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo5_l2.move(20, 150);
tmp.append(self.py_lo5_l2);
self.py_lo5_e2 = QLineEdit(self)
self.py_lo5_e2.move(290, 150);
self.py_lo5_e2.setText("0");
tmp.append(self.py_lo5_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo6_l1 = QLabel(self);
self.py_lo6_l1.setText("1. Scalar Weight: ");
self.py_lo6_l1.move(20, 100);
tmp.append(self.py_lo6_l1);
self.py_lo6_e1 = QLineEdit(self)
self.py_lo6_e1.move(150, 100);
self.py_lo6_e1.setText("1.0");
tmp.append(self.py_lo6_e1);
self.py_lo6_l2 = QLabel(self);
self.py_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo6_l2.move(20, 150);
tmp.append(self.py_lo6_l2);
self.py_lo6_e2 = QLineEdit(self)
self.py_lo6_e2.move(290, 150);
self.py_lo6_e2.setText("0");
tmp.append(self.py_lo6_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo7_l1 = QLabel(self);
self.py_lo7_l1.setText("1. Scalar Weight: ");
self.py_lo7_l1.move(20, 100);
tmp.append(self.py_lo7_l1);
self.py_lo7_e1 = QLineEdit(self)
self.py_lo7_e1.move(150, 100);
self.py_lo7_e1.setText("1.0");
tmp.append(self.py_lo7_e1);
self.py_lo7_l2 = QLabel(self);
self.py_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo7_l2.move(20, 150);
tmp.append(self.py_lo7_l2);
self.py_lo7_e2 = QLineEdit(self)
self.py_lo7_e2.move(290, 150);
self.py_lo7_e2.setText("0");
tmp.append(self.py_lo7_e2);
self.py_lo7_l3 = QLabel(self);
self.py_lo7_l3.setText("3. Input has log pre-applied: ");
self.py_lo7_l3.move(20, 200);
tmp.append(self.py_lo7_l3);
self.py_lo7_cb3 = QComboBox(self);
self.py_lo7_cb3.move(290, 200);
self.py_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo7_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo8_l1 = QLabel(self);
self.py_lo8_l1.setText("1. Scalar Weight: ");
self.py_lo8_l1.move(20, 100);
tmp.append(self.py_lo8_l1);
self.py_lo8_e1 = QLineEdit(self)
self.py_lo8_e1.move(150, 100);
self.py_lo8_e1.setText("1.0");
tmp.append(self.py_lo8_e1);
self.py_lo8_l2 = QLabel(self);
self.py_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo8_l2.move(20, 150);
tmp.append(self.py_lo8_l2);
self.py_lo8_e2 = QLineEdit(self)
self.py_lo8_e2.move(290, 150);
self.py_lo8_e2.setText("0");
tmp.append(self.py_lo8_e2);
self.py_lo8_l3 = QLabel(self);
self.py_lo8_l3.setText("3. Input has log pre-applied: ");
self.py_lo8_l3.move(20, 200);
tmp.append(self.py_lo8_l3);
self.py_lo8_cb3 = QComboBox(self);
self.py_lo8_cb3.move(290, 200);
self.py_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo8_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo9_l1 = QLabel(self);
self.py_lo9_l1.setText("1. Scalar Weight: ");
self.py_lo9_l1.move(20, 100);
tmp.append(self.py_lo9_l1);
self.py_lo9_e1 = QLineEdit(self)
self.py_lo9_e1.move(150, 100);
self.py_lo9_e1.setText("1.0");
tmp.append(self.py_lo9_e1);
self.py_lo9_l2 = QLabel(self);
self.py_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo9_l2.move(20, 150);
tmp.append(self.py_lo9_l2);
self.py_lo9_e2 = QLineEdit(self)
self.py_lo9_e2.move(290, 150);
self.py_lo9_e2.setText("0");
tmp.append(self.py_lo9_e2);
self.py_lo9_l3 = QLabel(self);
self.py_lo9_l3.setText("3. Threshold for mean estimator: ");
self.py_lo9_l3.move(20, 200);
tmp.append(self.py_lo9_l3);
self.py_lo9_e3 = QLineEdit(self)
self.py_lo9_e3.move(290, 200);
self.py_lo9_e3.setText("1.0");
tmp.append(self.py_lo9_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo10_l1 = QLabel(self);
self.py_lo10_l1.setText("1. Scalar Weight: ");
self.py_lo10_l1.move(20, 100);
tmp.append(self.py_lo10_l1);
self.py_lo10_e1 = QLineEdit(self)
self.py_lo10_e1.move(150, 100);
self.py_lo10_e1.setText("1.0");
tmp.append(self.py_lo10_e1);
self.py_lo10_l2 = QLabel(self);
self.py_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo10_l2.move(20, 150);
tmp.append(self.py_lo10_l2);
self.py_lo10_e2 = QLineEdit(self)
self.py_lo10_e2.move(290, 150);
self.py_lo10_e2.setText("0");
tmp.append(self.py_lo10_e2);
self.py_lo10_l3 = QLabel(self);
self.py_lo10_l3.setText("3. Margin: ");
self.py_lo10_l3.move(20, 200);
tmp.append(self.py_lo10_l3);
self.py_lo10_e3 = QLineEdit(self)
self.py_lo10_e3.move(150, 200);
self.py_lo10_e3.setText("1.0");
tmp.append(self.py_lo10_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo11_l1 = QLabel(self);
self.py_lo11_l1.setText("1. Scalar Weight: ");
self.py_lo11_l1.move(20, 100);
tmp.append(self.py_lo11_l1);
self.py_lo11_e1 = QLineEdit(self)
self.py_lo11_e1.move(150, 100);
self.py_lo11_e1.setText("1.0");
tmp.append(self.py_lo11_e1);
self.py_lo11_l2 = QLabel(self);
self.py_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo11_l2.move(20, 150);
tmp.append(self.py_lo11_l2);
self.py_lo11_e2 = QLineEdit(self)
self.py_lo11_e2.move(290, 150);
self.py_lo11_e2.setText("0");
tmp.append(self.py_lo11_e2);
self.py_lo11_l3 = QLabel(self);
self.py_lo11_l3.setText("3. Margin: ");
self.py_lo11_l3.move(20, 200);
tmp.append(self.py_lo11_l3);
self.py_lo11_e3 = QLineEdit(self)
self.py_lo11_e3.move(150, 200);
self.py_lo11_e3.setText("1.0");
tmp.append(self.py_lo11_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo12_l1 = QLabel(self);
self.py_lo12_l1.setText("1. Scalar Weight: ");
self.py_lo12_l1.move(20, 100);
tmp.append(self.py_lo12_l1);
self.py_lo12_e1 = QLineEdit(self)
self.py_lo12_e1.move(150, 100);
self.py_lo12_e1.setText("1.0");
tmp.append(self.py_lo12_e1);
self.py_lo12_l2 = QLabel(self);
self.py_lo12_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo12_l2.move(20, 150);
tmp.append(self.py_lo12_l2);
self.py_lo12_e2 = QLineEdit(self)
self.py_lo12_e2.move(290, 150);
self.py_lo12_e2.setText("0");
tmp.append(self.py_lo12_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo13_l1 = QLabel(self);
self.py_lo13_l1.setText("1. Scalar Weight: ");
self.py_lo13_l1.move(20, 100);
tmp.append(self.py_lo13_l1);
self.py_lo13_e1 = QLineEdit(self)
self.py_lo13_e1.move(150, 100);
self.py_lo13_e1.setText("1.0");
tmp.append(self.py_lo13_e1);
self.py_lo13_l2 = QLabel(self);
self.py_lo13_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo13_l2.move(20, 150);
tmp.append(self.py_lo13_l2);
self.py_lo13_e2 = QLineEdit(self)
self.py_lo13_e2.move(290, 150);
self.py_lo13_e2.setText("0");
tmp.append(self.py_lo13_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo14_l1 = QLabel(self);
self.py_lo14_l1.setText("1. Scalar Weight: ");
self.py_lo14_l1.move(20, 100);
tmp.append(self.py_lo14_l1);
self.py_lo14_e1 = QLineEdit(self)
self.py_lo14_e1.move(150, 100);
self.py_lo14_e1.setText("1.0");
tmp.append(self.py_lo14_e1);
self.py_lo14_l2 = QLabel(self);
self.py_lo14_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo14_l2.move(20, 150);
tmp.append(self.py_lo14_l2);
self.py_lo14_e2 = QLineEdit(self)
self.py_lo14_e2.move(290, 150);
self.py_lo14_e2.setText("0");
tmp.append(self.py_lo14_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo15_l1 = QLabel(self);
self.py_lo15_l1.setText("1. Scalar Weight: ");
self.py_lo15_l1.move(20, 100);
tmp.append(self.py_lo15_l1);
self.py_lo15_e1 = QLineEdit(self)
self.py_lo15_e1.move(150, 100);
self.py_lo15_e1.setText("1.0");
tmp.append(self.py_lo15_e1);
self.py_lo15_l2 = QLabel(self);
self.py_lo15_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo15_l2.move(20, 150);
tmp.append(self.py_lo15_l2);
self.py_lo15_e2 = QLineEdit(self)
self.py_lo15_e2.move(290, 150);
self.py_lo15_e2.setText("0");
tmp.append(self.py_lo15_e2);
self.loss_ui_pytorch.append(tmp)
self.select_loss();
self.tb1 = QTextEdit(self)
self.tb1.move(550, 20)
self.tb1.resize(300, 500)
if(self.system["update"]["losses"]["active"]):
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
else:
self.tb1.setText("Using Default loss.")
self.b4 = QPushButton('Select loss', self)
self.b4.move(400,400)
self.b4.clicked.connect(self.add_loss)
self.b6 = QPushButton('Clear ', self)
self.b6.move(400,500)
self.b6.clicked.connect(self.clear_loss)
def select_loss(self):
self.current_loss = {};
self.current_loss["name"] = "";
self.current_loss["params"] = {};
if(self.system["backend"] == "Mxnet-1.5.1"):
self.current_loss["name"] = self.cb1.currentText();
index = self.mxnet_losses_list.index(self.cb1.currentText());
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
if((index-1)==i):
self.loss_ui_mxnet[i][j].show();
else:
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.current_loss["name"] = self.cb2.currentText();
index = self.keras_losses_list.index(self.cb2.currentText());
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
if((index-1)==i):
self.loss_ui_keras[i][j].show();
else:
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.current_loss["name"] = self.cb3.currentText();
index = self.pytorch_losses_list.index(self.cb3.currentText());
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
if((index-1)==i):
self.loss_ui_pytorch[i][j].show();
else:
self.loss_ui_pytorch[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
def add_loss(self):
self.system["update"]["losses"]["active"] = True;
if(self.system["backend"] == "Mxnet-1.5.1"):
if(self.current_loss["name"] == self.mxnet_losses_list[1]):
self.current_loss["params"]["weight"] = self.mx_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[2]):
self.current_loss["params"]["weight"] = self.mx_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[3]):
self.current_loss["params"]["weight"] = self.mx_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[4]):
self.current_loss["params"]["weight"] = self.mx_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[5]):
self.current_loss["params"]["weight"] = self.mx_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[6]):
self.current_loss["params"]["weight"] = self.mx_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[7]):
self.current_loss["params"]["weight"] = self.mx_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[8]):
self.current_loss["params"]["weight"] = self.mx_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[9]):
self.current_loss["params"]["weight"] = self.mx_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.mx_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[10]):
self.current_loss["params"]["weight"] = self.mx_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo10_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[11]):
self.current_loss["params"]["weight"] = self.mx_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo11_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
if(self.current_loss["name"] == self.keras_losses_list[1]):
self.current_loss["params"]["weight"] = self.ke_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[2]):
self.current_loss["params"]["weight"] = self.ke_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[3]):
self.current_loss["params"]["weight"] = self.ke_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[4]):
self.current_loss["params"]["weight"] = self.ke_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[5]):
self.current_loss["params"]["weight"] = self.ke_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo5_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.ke_lo5_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[6]):
self.current_loss["params"]["weight"] = self.ke_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo6_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo6_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[7]):
self.current_loss["params"]["weight"] = self.ke_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo7_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo7_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Pytorch-1.3.1"):
if(self.current_loss["name"] == self.pytorch_losses_list[1]):
self.current_loss["params"]["weight"] = self.py_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[2]):
self.current_loss["params"]["weight"] = self.py_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[3]):
self.current_loss["params"]["weight"] = self.py_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[4]):
self.current_loss["params"]["weight"] = self.py_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[5]):
self.current_loss["params"]["weight"] = self.py_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[6]):
self.current_loss["params"]["weight"] = self.py_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[7]):
self.current_loss["params"]["weight"] = self.py_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[8]):
self.current_loss["params"]["weight"] = self.py_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[9]):
self.current_loss["params"]["weight"] = self.py_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.py_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[10]):
self.current_loss["params"]["weight"] = self.py_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo10_e2.text();
self.current_loss["params"]["margin"] = self.py_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[11]):
self.current_loss["params"]["weight"] = self.py_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo11_e2.text();
self.current_loss["params"]["margin"] = self.py_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[12]):
self.current_loss["params"]["weight"] = self.py_lo12_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo12_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[13]):
self.current_loss["params"]["weight"] = self.py_lo13_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo13_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[14]):
self.current_loss["params"]["weight"] = self.py_lo14_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo14_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[15]):
self.current_loss["params"]["weight"] = self.py_lo15_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo15_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
def clear_loss(self):
self.system["update"]["losses"]["value"] = "";
self.system["update"]["losses"]["active"] = False;
wr = "";
self.tb1.setText(wr);
def forward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.forward_train.emit();
def backward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_scheduler_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowClassificationTrainUpdateLossParam()
screen.show()
sys.exit(app.exec_())
'''
| 0.267217 | 0.122944 |
import math
from collections import defaultdict
import bisect
import numpy as np
from unittest.mock import MagicMock
from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,
MultiImageMixDataset, RepeatDataset)
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
dataset_b = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_b)
]
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_b[idx])
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]
assert len(repeat_dataset) == 10 * len(dataset_a)
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / len(cat_ids_list_a)
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert repeat_factor_dataset[idx] == bisect.bisect_right(
repeat_factors_cumsum, idx)
img_scale = (60, 60)
dynamic_scale = (80, 80)
pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 2
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline,
dynamic_scale)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (dynamic_scale[0], dynamic_scale[1], 3)
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a,
pipeline,
dynamic_scale,
skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
|
tests/test_data/test_datasets/test_dataset_wrapper.py
|
import math
from collections import defaultdict
import bisect
import numpy as np
from unittest.mock import MagicMock
from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,
MultiImageMixDataset, RepeatDataset)
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
dataset_b = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_b)
]
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_b[idx])
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]
assert len(repeat_dataset) == 10 * len(dataset_a)
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / len(cat_ids_list_a)
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert repeat_factor_dataset[idx] == bisect.bisect_right(
repeat_factors_cumsum, idx)
img_scale = (60, 60)
dynamic_scale = (80, 80)
pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 2
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline,
dynamic_scale)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (dynamic_scale[0], dynamic_scale[1], 3)
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a,
pipeline,
dynamic_scale,
skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
| 0.612657 | 0.519034 |
import logging
import os
import random
import hydra
from hydra.utils import instantiate
import numpy as np
import torch
import wandb
from allennlp.nn.util import get_text_field_mask
from torch.distributions import Categorical
from omegaconf import OmegaConf
from viraal.config import (flatten_dict, get_key, pass_conf,
register_interpolations, save_config, set_seeds)
from viraal.train.text import batch_to_device, get_checkpoint
from viraal.train.tag import TrainJoint
from viraal.core.utils import destroy_trainer, apply
logger = logging.getLogger(__name__)
@hydra.main(config_path="../config/rerank.yaml", strict=False)
def train_text(cfg):
register_interpolations()
pretrain_cfg = OmegaConf.load(os.path.join(cfg.rerank.pretrain, 'config.yaml'))
cfg = OmegaConf.merge(pretrain_cfg, cfg)
cfg_yaml = cfg.pretty(resolve=True)
logger.info("====CONFIG====\n%s", cfg_yaml)
save_config(cfg_yaml)
set_seeds(cfg.misc.seed)
if cfg.misc.wandb:
pass_conf(wandb.init, cfg, 'wandb')(config=cfg.to_container(resolve=True))
tr = TrainJoint(cfg)
tr.restore(get_checkpoint(os.path.join(cfg.rerank.pretrain, 'model'),cfg.rerank.iteration))
logger.info("Reranking training instances")
rerank(tr, cfg)
logger.info("Resetting model weights")
tr.instantiate_model()
logger.info("Training model")
tr.train_loop()
destroy_trainer(tr)
def normalize(criter):
c = criter.detach().cpu().numpy()
return c/np.quantile(c, 0.99)
def rerank(trainer, cfg):
trainer.model.train()
unlabeled = trainer.get_unlabeled_instances()
nb_instances = len(unlabeled)
to_select = int(cfg.rerank.part*len(trainer.train_instances))
iterator = trainer.iterator(unlabeled, num_epochs=1)
if cfg.misc.tqdm:
iterator = tqdm(iterator)
criter_epoch = []
for batch in iterator:
batch_to_device(batch, cfg.misc.device)
embeddings = trainer.word_embeddings(batch["sentence"])
mask = get_text_field_mask(batch["sentence"])
sentence_lengths = mask.sum(dim=-1)
logits_tag = trainer.model(embeddings=embeddings, mask=mask)
labeled = np.array(batch["labeled"], dtype=bool)
dist_tag = Categorical(logits=logits_tag.view(-1, logits_tag.size(-1)))
criter = np.zeros(logits_tag.size(0))
if "ce" in cfg.rerank.criteria:
tag_entropies = dist_tag.entropy().view(logits_tag.size(0), logits_tag.size(1))*mask.float()
criter += normalize(tag_entropies.sum(dim=-1)/sentence_lengths.float())
if "vat" in cfg.rerank.criteria:
model_forward = lambda embeddings: trainer.model(
embeddings=embeddings, mask=mask
)
vat_criter_tag = trainer.losses["vat"](logits_tag, model_forward, embeddings, mask)
criter += normalize(vat_criter_tag.mean(dim=-1))
if "random" in cfg.rerank.criteria:
criter += np.random.rand(logits_tag.size(0))
criter_epoch.append(criter)
criter_epoch = np.concatenate(criter_epoch)
selected = np.argsort(criter_epoch)[-to_select:]
for idx in selected:
unlabeled[idx]['labeled'].metadata = True
if __name__ == "__main__":
try:
train_text()
except Exception:
logger.exception("Fatal error")
|
viraal/rerank/tag.py
|
import logging
import os
import random
import hydra
from hydra.utils import instantiate
import numpy as np
import torch
import wandb
from allennlp.nn.util import get_text_field_mask
from torch.distributions import Categorical
from omegaconf import OmegaConf
from viraal.config import (flatten_dict, get_key, pass_conf,
register_interpolations, save_config, set_seeds)
from viraal.train.text import batch_to_device, get_checkpoint
from viraal.train.tag import TrainJoint
from viraal.core.utils import destroy_trainer, apply
logger = logging.getLogger(__name__)
@hydra.main(config_path="../config/rerank.yaml", strict=False)
def train_text(cfg):
register_interpolations()
pretrain_cfg = OmegaConf.load(os.path.join(cfg.rerank.pretrain, 'config.yaml'))
cfg = OmegaConf.merge(pretrain_cfg, cfg)
cfg_yaml = cfg.pretty(resolve=True)
logger.info("====CONFIG====\n%s", cfg_yaml)
save_config(cfg_yaml)
set_seeds(cfg.misc.seed)
if cfg.misc.wandb:
pass_conf(wandb.init, cfg, 'wandb')(config=cfg.to_container(resolve=True))
tr = TrainJoint(cfg)
tr.restore(get_checkpoint(os.path.join(cfg.rerank.pretrain, 'model'),cfg.rerank.iteration))
logger.info("Reranking training instances")
rerank(tr, cfg)
logger.info("Resetting model weights")
tr.instantiate_model()
logger.info("Training model")
tr.train_loop()
destroy_trainer(tr)
def normalize(criter):
c = criter.detach().cpu().numpy()
return c/np.quantile(c, 0.99)
def rerank(trainer, cfg):
trainer.model.train()
unlabeled = trainer.get_unlabeled_instances()
nb_instances = len(unlabeled)
to_select = int(cfg.rerank.part*len(trainer.train_instances))
iterator = trainer.iterator(unlabeled, num_epochs=1)
if cfg.misc.tqdm:
iterator = tqdm(iterator)
criter_epoch = []
for batch in iterator:
batch_to_device(batch, cfg.misc.device)
embeddings = trainer.word_embeddings(batch["sentence"])
mask = get_text_field_mask(batch["sentence"])
sentence_lengths = mask.sum(dim=-1)
logits_tag = trainer.model(embeddings=embeddings, mask=mask)
labeled = np.array(batch["labeled"], dtype=bool)
dist_tag = Categorical(logits=logits_tag.view(-1, logits_tag.size(-1)))
criter = np.zeros(logits_tag.size(0))
if "ce" in cfg.rerank.criteria:
tag_entropies = dist_tag.entropy().view(logits_tag.size(0), logits_tag.size(1))*mask.float()
criter += normalize(tag_entropies.sum(dim=-1)/sentence_lengths.float())
if "vat" in cfg.rerank.criteria:
model_forward = lambda embeddings: trainer.model(
embeddings=embeddings, mask=mask
)
vat_criter_tag = trainer.losses["vat"](logits_tag, model_forward, embeddings, mask)
criter += normalize(vat_criter_tag.mean(dim=-1))
if "random" in cfg.rerank.criteria:
criter += np.random.rand(logits_tag.size(0))
criter_epoch.append(criter)
criter_epoch = np.concatenate(criter_epoch)
selected = np.argsort(criter_epoch)[-to_select:]
for idx in selected:
unlabeled[idx]['labeled'].metadata = True
if __name__ == "__main__":
try:
train_text()
except Exception:
logger.exception("Fatal error")
| 0.535098 | 0.112893 |
import logging
from typing import Dict, Optional, Sequence, Type, Union
import tensorflow as tf
from tensorflow.keras.layers import Layer
from ..block.mlp import MLPBlock
from ..ranking_metric import AvgPrecisionAt, NDCGAt, RecallAt
from .base import PredictionTask
def name_fn(name, inp):
return "/".join([name, inp]) if name else None
MetricOrMetricClass = Union[tf.keras.metrics.Metric, Type[tf.keras.metrics.Metric]]
LOG = logging.getLogger("transformers4rec")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class BinaryClassificationTask(PredictionTask):
DEFAULT_LOSS = tf.keras.losses.BinaryCrossentropy()
DEFAULT_METRICS = (
tf.keras.metrics.Precision,
tf.keras.metrics.Recall,
tf.keras.metrics.BinaryAccuracy,
tf.keras.metrics.AUC,
)
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
loss=DEFAULT_LOSS,
metrics: Sequence[MetricOrMetricClass] = DEFAULT_METRICS,
summary_type="first",
**kwargs,
):
super().__init__(
loss=loss,
metrics=list(metrics),
target_name=target_name,
task_name=task_name,
summary_type=summary_type,
task_block=task_block,
**kwargs,
)
self.pre = tf.keras.layers.Dense(1, activation="sigmoid", name=self.child_name("logit"))
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class RegressionTask(PredictionTask):
DEFAULT_LOSS = tf.keras.losses.MeanSquaredError()
DEFAULT_METRICS = (tf.keras.metrics.RootMeanSquaredError,)
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
loss=DEFAULT_LOSS,
metrics=DEFAULT_METRICS,
summary_type="first",
**kwargs,
):
super().__init__(
loss=loss,
metrics=metrics,
target_name=target_name,
task_name=task_name,
summary_type=summary_type,
task_block=task_block,
**kwargs,
)
self.pre = tf.keras.layers.Dense(1, name=self.child_name("logit"))
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class NextItemPredictionTask(PredictionTask):
"""Next-item prediction task.
Parameters
----------
loss:
Loss function. SparseCategoricalCrossentropy()
metrics:
List of RankingMetrics to be evaluated.
prediction_metrics:
List of Keras metrics used to summarize the predictions.
label_metrics:
List of Keras metrics used to summarize the labels.
loss_metrics:
List of Keras metrics used to summarize the loss.
name:
Optional task name.
target_dim: int
Dimension of the target.
weight_tying: bool
The item id embedding table weights are shared with the prediction network layer.
item_embedding_table: tf.Variable
Variable of embedding table for the item.
softmax_temperature: float
Softmax temperature, used to reduce model overconfidence, so that softmax(logits / T).
Value 1.0 reduces to regular softmax.
"""
DEFAULT_LOSS = tf.keras.losses.SparseCategoricalCrossentropy()
DEFAULT_METRICS = (
# default metrics suppose labels are int encoded
NDCGAt(top_ks=[10, 20], labels_onehot=True),
AvgPrecisionAt(top_ks=[10, 20], labels_onehot=True),
RecallAt(top_ks=[10, 20], labels_onehot=True),
)
def __init__(
self,
loss=DEFAULT_LOSS,
metrics=DEFAULT_METRICS,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
weight_tying: bool = False,
target_dim: int = None,
softmax_temperature: float = 1,
**kwargs,
):
super().__init__(
loss=loss,
metrics=metrics,
target_name=target_name,
task_name=task_name,
task_block=task_block,
**kwargs,
)
self.weight_tying = weight_tying
self.target_dim = target_dim
self.softmax_temperature = softmax_temperature
def build(self, input_shape, body, inputs=None):
# Retrieve the embedding module to get the name of itemid col and its related table
if not len(input_shape) == 3 or isinstance(input_shape, dict):
raise ValueError(
"NextItemPredictionTask needs a 3-dim vector as input, found:" f"{input_shape}"
)
if not inputs:
inputs = body.inputs
if not getattr(inputs, "item_id", None):
raise ValueError(
"For Item Prediction task a categorical_module "
"including an item_id column is required."
)
self.embeddings = inputs.categorical_layer
if not self.target_dim:
self.target_dim = self.embeddings.item_embedding_table.shape[0]
if self.weight_tying:
self.item_embedding_table = self.embeddings.item_embedding_table
item_dim = self.item_embedding_table.shape[1]
if input_shape[-1] != item_dim and not self.task_block:
LOG.warning(
f"Projecting inputs of NextItemPredictionTask to'{item_dim}' "
f"As weight tying requires the input dimension '{input_shape[-1]}' "
f"to be equal to the item-id embedding dimension '{item_dim}'"
)
# project input tensors to same dimension as item-id embeddings
self.task_block = MLPBlock([item_dim])
# Retrieve the masking if used in the model block
self.masking = inputs.masking
if self.masking:
self.padding_idx = self.masking.padding_idx
self.pre = _NextItemPredictionTask(
target_dim=self.target_dim,
weight_tying=self.weight_tying,
item_embedding_table=self.item_embedding_table,
softmax_temperature=self.softmax_temperature,
)
return super().build(input_shape)
def call(self, inputs, **kwargs):
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
x = inputs
if self.task_block:
x = self.task_block(x)
# retrieve labels from masking
if self.masking:
labels = self.masking.masked_targets
else:
labels = self.embeddings.item_seq
# remove vectors of padded items
trg_flat = tf.reshape(labels, -1)
non_pad_mask = trg_flat != self.padding_idx
x = self.remove_pad_3d(x, non_pad_mask)
# compute predictions probs
x = self.pre(x)
return x
def remove_pad_3d(self, inp_tensor, non_pad_mask):
# inp_tensor: (n_batch x seqlen x emb_dim)
inp_tensor = tf.reshape(inp_tensor, (-1, inp_tensor.shape[-1]))
inp_tensor_fl = tf.boolean_mask(
inp_tensor, tf.broadcast_to(tf.expand_dims(non_pad_mask, 1), inp_tensor.shape)
)
out_tensor = tf.reshape(inp_tensor_fl, (-1, inp_tensor.shape[1]))
return out_tensor
def compute_loss( # type: ignore
self,
inputs,
targets=None,
compute_metrics=True,
sample_weight: Optional[tf.Tensor] = None,
**kwargs,
) -> tf.Tensor:
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
predictions = self(inputs)
# retrieve labels from masking
if self.masking:
targets = self.masking.masked_targets
# flatten labels and remove padding index
targets = tf.reshape(targets, -1)
non_pad_mask = targets != self.padding_idx
targets = tf.boolean_mask(targets, non_pad_mask)
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
if compute_metrics:
update_ops = self.calculate_metrics(predictions, targets, forward=False, loss=loss)
update_ops = [x for x in update_ops if x is not None]
with tf.control_dependencies(update_ops):
return tf.identity(loss)
return loss
def calculate_metrics(
self, predictions, targets=None, sample_weight=None, forward=True, loss=None
):
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if forward:
predictions = self(predictions)
# retrieve labels from masking
if self.masking:
targets = self.masking.masked_targets
# flatten labels and remove padding index
targets = tf.reshape(targets, -1)
non_pad_mask = targets != self.padding_idx
targets = tf.boolean_mask(targets, non_pad_mask)
update_ops = []
for metric in self.eval_metrics:
update_ops.append(
metric.update_state(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
)
for metric in self.prediction_metrics:
update_ops.append(metric.update_state(predictions, sample_weight=sample_weight))
for metric in self.label_metrics:
update_ops.append(metric.update_state(targets, sample_weight=sample_weight))
for metric in self.loss_metrics:
if not loss:
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
update_ops.append(metric.update_state(loss, sample_weight=sample_weight))
return update_ops
def metric_results(self, mode: str = None) -> Dict[str, tf.Tensor]:
metrics = {metric.name: metric.result() for metric in self.eval_metrics}
topks = {metric.name: metric.top_ks for metric in self.eval_metrics}
# explode metrics for each cut-off in top_ks
results = {}
for name, metric in metrics.items():
for measure, k in zip(metric, topks[name]):
results[f"{name}_{k}"] = measure
return results
class _NextItemPredictionTask(tf.keras.layers.Layer):
"""Predict the interacted item-id probabilities.
- During inference, the task consists of predicting the next item.
- During training, the class supports the following Language modeling tasks:
Causal LM and Masked LM.
p.s: we are planning to support Permutation LM and Replacement Token Detection
in future release.
Parameters:
-----------
target_dim: int
Dimension of the target.
weight_tying: bool
The item id embedding table weights are shared with the prediction network layer.
item_embedding_table: tf.Variable
Variable of embedding table for the item.
softmax_temperature: float
Softmax temperature, used to reduce model overconfidence, so that softmax(logits / T).
Value 1.0 reduces to regular softmax.
"""
def __init__(
self,
target_dim: int,
weight_tying: bool = False,
item_embedding_table: Optional[tf.Variable] = None,
softmax_temperature: float = 0,
):
super().__init__()
self.target_dim = target_dim
self.weight_tying = weight_tying
self.item_embedding_table = item_embedding_table
self.softmax_temperature = softmax_temperature
if self.weight_tying:
if item_embedding_table is None:
raise ValueError(
"For Item Prediction task with weight tying "
"the embedding table of item_id is required ."
)
self.output_layer_bias = self.add_weight(
name="output_layer_bias",
shape=(self.target_dim,),
initializer=tf.keras.initializers.Zeros(),
)
else:
self.output_layer = tf.keras.layers.Dense(
units=self.target_dim,
kernel_initializer="random_normal",
bias_initializer="zeros",
name="logits",
)
def call(self, inputs: tf.Tensor, **kwargs):
if self.weight_tying:
logits = tf.matmul(inputs, tf.transpose(self.item_embedding_table))
logits = tf.nn.bias_add(logits, self.output_layer_bias)
else:
logits = self.output_layer(inputs)
if self.softmax_temperature:
# Softmax temperature to reduce model overconfidence
# and better calibrate probs and accuracy
logits = logits / self.softmax_temperature
predictions = tf.nn.log_softmax(logits, axis=-1)
return predictions
def _get_name(self) -> str:
return "NextItemPredictionTask"
|
transformers4rec/tf/model/prediction_task.py
|
import logging
from typing import Dict, Optional, Sequence, Type, Union
import tensorflow as tf
from tensorflow.keras.layers import Layer
from ..block.mlp import MLPBlock
from ..ranking_metric import AvgPrecisionAt, NDCGAt, RecallAt
from .base import PredictionTask
def name_fn(name, inp):
return "/".join([name, inp]) if name else None
MetricOrMetricClass = Union[tf.keras.metrics.Metric, Type[tf.keras.metrics.Metric]]
LOG = logging.getLogger("transformers4rec")
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class BinaryClassificationTask(PredictionTask):
DEFAULT_LOSS = tf.keras.losses.BinaryCrossentropy()
DEFAULT_METRICS = (
tf.keras.metrics.Precision,
tf.keras.metrics.Recall,
tf.keras.metrics.BinaryAccuracy,
tf.keras.metrics.AUC,
)
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
loss=DEFAULT_LOSS,
metrics: Sequence[MetricOrMetricClass] = DEFAULT_METRICS,
summary_type="first",
**kwargs,
):
super().__init__(
loss=loss,
metrics=list(metrics),
target_name=target_name,
task_name=task_name,
summary_type=summary_type,
task_block=task_block,
**kwargs,
)
self.pre = tf.keras.layers.Dense(1, activation="sigmoid", name=self.child_name("logit"))
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class RegressionTask(PredictionTask):
DEFAULT_LOSS = tf.keras.losses.MeanSquaredError()
DEFAULT_METRICS = (tf.keras.metrics.RootMeanSquaredError,)
def __init__(
self,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
loss=DEFAULT_LOSS,
metrics=DEFAULT_METRICS,
summary_type="first",
**kwargs,
):
super().__init__(
loss=loss,
metrics=metrics,
target_name=target_name,
task_name=task_name,
summary_type=summary_type,
task_block=task_block,
**kwargs,
)
self.pre = tf.keras.layers.Dense(1, name=self.child_name("logit"))
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class NextItemPredictionTask(PredictionTask):
"""Next-item prediction task.
Parameters
----------
loss:
Loss function. SparseCategoricalCrossentropy()
metrics:
List of RankingMetrics to be evaluated.
prediction_metrics:
List of Keras metrics used to summarize the predictions.
label_metrics:
List of Keras metrics used to summarize the labels.
loss_metrics:
List of Keras metrics used to summarize the loss.
name:
Optional task name.
target_dim: int
Dimension of the target.
weight_tying: bool
The item id embedding table weights are shared with the prediction network layer.
item_embedding_table: tf.Variable
Variable of embedding table for the item.
softmax_temperature: float
Softmax temperature, used to reduce model overconfidence, so that softmax(logits / T).
Value 1.0 reduces to regular softmax.
"""
DEFAULT_LOSS = tf.keras.losses.SparseCategoricalCrossentropy()
DEFAULT_METRICS = (
# default metrics suppose labels are int encoded
NDCGAt(top_ks=[10, 20], labels_onehot=True),
AvgPrecisionAt(top_ks=[10, 20], labels_onehot=True),
RecallAt(top_ks=[10, 20], labels_onehot=True),
)
def __init__(
self,
loss=DEFAULT_LOSS,
metrics=DEFAULT_METRICS,
target_name: Optional[str] = None,
task_name: Optional[str] = None,
task_block: Optional[Layer] = None,
weight_tying: bool = False,
target_dim: int = None,
softmax_temperature: float = 1,
**kwargs,
):
super().__init__(
loss=loss,
metrics=metrics,
target_name=target_name,
task_name=task_name,
task_block=task_block,
**kwargs,
)
self.weight_tying = weight_tying
self.target_dim = target_dim
self.softmax_temperature = softmax_temperature
def build(self, input_shape, body, inputs=None):
# Retrieve the embedding module to get the name of itemid col and its related table
if not len(input_shape) == 3 or isinstance(input_shape, dict):
raise ValueError(
"NextItemPredictionTask needs a 3-dim vector as input, found:" f"{input_shape}"
)
if not inputs:
inputs = body.inputs
if not getattr(inputs, "item_id", None):
raise ValueError(
"For Item Prediction task a categorical_module "
"including an item_id column is required."
)
self.embeddings = inputs.categorical_layer
if not self.target_dim:
self.target_dim = self.embeddings.item_embedding_table.shape[0]
if self.weight_tying:
self.item_embedding_table = self.embeddings.item_embedding_table
item_dim = self.item_embedding_table.shape[1]
if input_shape[-1] != item_dim and not self.task_block:
LOG.warning(
f"Projecting inputs of NextItemPredictionTask to'{item_dim}' "
f"As weight tying requires the input dimension '{input_shape[-1]}' "
f"to be equal to the item-id embedding dimension '{item_dim}'"
)
# project input tensors to same dimension as item-id embeddings
self.task_block = MLPBlock([item_dim])
# Retrieve the masking if used in the model block
self.masking = inputs.masking
if self.masking:
self.padding_idx = self.masking.padding_idx
self.pre = _NextItemPredictionTask(
target_dim=self.target_dim,
weight_tying=self.weight_tying,
item_embedding_table=self.item_embedding_table,
softmax_temperature=self.softmax_temperature,
)
return super().build(input_shape)
def call(self, inputs, **kwargs):
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
x = inputs
if self.task_block:
x = self.task_block(x)
# retrieve labels from masking
if self.masking:
labels = self.masking.masked_targets
else:
labels = self.embeddings.item_seq
# remove vectors of padded items
trg_flat = tf.reshape(labels, -1)
non_pad_mask = trg_flat != self.padding_idx
x = self.remove_pad_3d(x, non_pad_mask)
# compute predictions probs
x = self.pre(x)
return x
def remove_pad_3d(self, inp_tensor, non_pad_mask):
# inp_tensor: (n_batch x seqlen x emb_dim)
inp_tensor = tf.reshape(inp_tensor, (-1, inp_tensor.shape[-1]))
inp_tensor_fl = tf.boolean_mask(
inp_tensor, tf.broadcast_to(tf.expand_dims(non_pad_mask, 1), inp_tensor.shape)
)
out_tensor = tf.reshape(inp_tensor_fl, (-1, inp_tensor.shape[1]))
return out_tensor
def compute_loss( # type: ignore
self,
inputs,
targets=None,
compute_metrics=True,
sample_weight: Optional[tf.Tensor] = None,
**kwargs,
) -> tf.Tensor:
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
predictions = self(inputs)
# retrieve labels from masking
if self.masking:
targets = self.masking.masked_targets
# flatten labels and remove padding index
targets = tf.reshape(targets, -1)
non_pad_mask = targets != self.padding_idx
targets = tf.boolean_mask(targets, non_pad_mask)
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
if compute_metrics:
update_ops = self.calculate_metrics(predictions, targets, forward=False, loss=loss)
update_ops = [x for x in update_ops if x is not None]
with tf.control_dependencies(update_ops):
return tf.identity(loss)
return loss
def calculate_metrics(
self, predictions, targets=None, sample_weight=None, forward=True, loss=None
):
if isinstance(targets, dict) and self.target_name:
targets = targets[self.target_name]
if forward:
predictions = self(predictions)
# retrieve labels from masking
if self.masking:
targets = self.masking.masked_targets
# flatten labels and remove padding index
targets = tf.reshape(targets, -1)
non_pad_mask = targets != self.padding_idx
targets = tf.boolean_mask(targets, non_pad_mask)
update_ops = []
for metric in self.eval_metrics:
update_ops.append(
metric.update_state(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
)
for metric in self.prediction_metrics:
update_ops.append(metric.update_state(predictions, sample_weight=sample_weight))
for metric in self.label_metrics:
update_ops.append(metric.update_state(targets, sample_weight=sample_weight))
for metric in self.loss_metrics:
if not loss:
loss = self.loss(y_true=targets, y_pred=predictions, sample_weight=sample_weight)
update_ops.append(metric.update_state(loss, sample_weight=sample_weight))
return update_ops
def metric_results(self, mode: str = None) -> Dict[str, tf.Tensor]:
metrics = {metric.name: metric.result() for metric in self.eval_metrics}
topks = {metric.name: metric.top_ks for metric in self.eval_metrics}
# explode metrics for each cut-off in top_ks
results = {}
for name, metric in metrics.items():
for measure, k in zip(metric, topks[name]):
results[f"{name}_{k}"] = measure
return results
class _NextItemPredictionTask(tf.keras.layers.Layer):
"""Predict the interacted item-id probabilities.
- During inference, the task consists of predicting the next item.
- During training, the class supports the following Language modeling tasks:
Causal LM and Masked LM.
p.s: we are planning to support Permutation LM and Replacement Token Detection
in future release.
Parameters:
-----------
target_dim: int
Dimension of the target.
weight_tying: bool
The item id embedding table weights are shared with the prediction network layer.
item_embedding_table: tf.Variable
Variable of embedding table for the item.
softmax_temperature: float
Softmax temperature, used to reduce model overconfidence, so that softmax(logits / T).
Value 1.0 reduces to regular softmax.
"""
def __init__(
self,
target_dim: int,
weight_tying: bool = False,
item_embedding_table: Optional[tf.Variable] = None,
softmax_temperature: float = 0,
):
super().__init__()
self.target_dim = target_dim
self.weight_tying = weight_tying
self.item_embedding_table = item_embedding_table
self.softmax_temperature = softmax_temperature
if self.weight_tying:
if item_embedding_table is None:
raise ValueError(
"For Item Prediction task with weight tying "
"the embedding table of item_id is required ."
)
self.output_layer_bias = self.add_weight(
name="output_layer_bias",
shape=(self.target_dim,),
initializer=tf.keras.initializers.Zeros(),
)
else:
self.output_layer = tf.keras.layers.Dense(
units=self.target_dim,
kernel_initializer="random_normal",
bias_initializer="zeros",
name="logits",
)
def call(self, inputs: tf.Tensor, **kwargs):
if self.weight_tying:
logits = tf.matmul(inputs, tf.transpose(self.item_embedding_table))
logits = tf.nn.bias_add(logits, self.output_layer_bias)
else:
logits = self.output_layer(inputs)
if self.softmax_temperature:
# Softmax temperature to reduce model overconfidence
# and better calibrate probs and accuracy
logits = logits / self.softmax_temperature
predictions = tf.nn.log_softmax(logits, axis=-1)
return predictions
def _get_name(self) -> str:
return "NextItemPredictionTask"
| 0.949412 | 0.297467 |
import os
import sys
import inspect
import meshlabxml as mlx
import platform
import glob
import shutil
import time
from threading import Thread
'''
Description: It takes via normal script "simplifybulk.py" 17 minutes to Decimate a 3M faces into 7 resolutions
[100K, 200K, 300K, 400K, 500K, 600K, 750K]. With this version of the program, I have included threading
so the time it takes now is only around 5 minutes (a considerable improvement).
Usage: python3 simplifybulkthreaded.py
You can of course include functionality to take arguments from command line/terminal like I did in simplify.py
DO NOT FORGET to change: Decimations_List, originalMesh (name of original mesh), SimplifiedMesh & Textures to what suits you.
Enjoy!
'''
def simplify(originalMeshName, SimplifiedMeshName, NumberOfFaces, WithTexture):
# File names
FilterScript = 'SimplificationFilter.mlx' # script file
original_mesh = originalMeshName # input file
simplified_mesh = SimplifiedMeshName # output file
Num_Of_Faces = int(NumberOfFaces) # Final Number of Faces
#Check the input mesh number of faces (so that we do not decimate to a higher number of faces than original mesh)
MetricsMeshDictionary = {}
MetricsMeshDictionary = mlx.files.measure_topology(original_mesh)
#print (MetricsMeshDictionary)
print('\n Number of faces of original mesh is: ' + str(MetricsMeshDictionary['face_num'] ))
if(MetricsMeshDictionary['face_num'] <= Num_Of_Faces):
#exit the script and print a message about it
print("\n SORRY your decimated mesh can not have higher number of faces that the input mesh.....")
print("\n ......................................................................................")
sys.exit()
#Creating a folder named as the Number of faces: named '150000'
print('\n Creating a folder to store the decimated model ...........')
if not os.path.exists(str(Num_Of_Faces)):
os.makedirs(str(Num_Of_Faces))
simplified_meshScript = mlx.FilterScript(file_in=original_mesh, file_out=str(Num_Of_Faces) + '/' + simplified_mesh,
ml_version='2016.12') # Create FilterScript object
mlx.remesh.simplify(simplified_meshScript, texture=WithTexture, faces=Num_Of_Faces,
target_perc=0.0, quality_thr=1.0, preserve_boundary=True,
boundary_weight=1.0, preserve_normal=True,
optimal_placement=True, planar_quadric=True,
selected=False, extra_tex_coord_weight=1.0)
print('\n Beginning the process of Decimation ...........')
simplified_meshScript.run_script() # Run the script
os.chdir(str(Num_Of_Faces))
print('\n Process of Decimation Finished ...')
print('\n Copying textures (PNG and JPEG) into the folder of decimated model....')
#go back to parent directory so we can copy the textures to the 3D Model folder
os.chdir('..')
#Now checking for textures in the folder of the input mesh.... (plz change if needed)
allfilelist= os.listdir('.')
for Afile in allfilelist[:]:
if not(Afile.endswith(".png") or Afile.endswith(".PNG") or Afile.endswith(".jpg") or Afile.endswith(".JPG")):
allfilelist.remove(Afile)
print('\n Found the LIST of images in PNG and JPEG (textures): ')
print(allfilelist)
for file in allfilelist:
shutil.copy(file, str(Num_Of_Faces))
print('\n sleeping for 3 seconds.... ')
time.sleep(3)
Decimations_List = [100000, 200000, 300000, 400000, 500000, 600000, 750000]
originalMesh = 'Lantern.1.obj'
SimplifiedMesh = 'Lantern_Simplified.obj'
Textures = True
print("...........Starting timer for the decimation process............")
initial_time = time.time()
threads_list = []
for decimationResolution in Decimations_List:
threads_list.append(Thread(target=simplify, args=(originalMesh, SimplifiedMesh, decimationResolution, Textures)))
for thread in threads_list:
try:
thread.start()
except KeyboardInterrupt: # std Python exception
continue # moves to next thread iterable
#Waiting for all threads to finish
thread.join()
print("I Finished Decimating in tandem.....")
finish_time = time.time()
print("---Decimation Process took: %s seconds ---" % (finish_time - initial_time))
print("\n Done.... have a good day!")
|
simplifybulkthreaded.py
|
import os
import sys
import inspect
import meshlabxml as mlx
import platform
import glob
import shutil
import time
from threading import Thread
'''
Description: It takes via normal script "simplifybulk.py" 17 minutes to Decimate a 3M faces into 7 resolutions
[100K, 200K, 300K, 400K, 500K, 600K, 750K]. With this version of the program, I have included threading
so the time it takes now is only around 5 minutes (a considerable improvement).
Usage: python3 simplifybulkthreaded.py
You can of course include functionality to take arguments from command line/terminal like I did in simplify.py
DO NOT FORGET to change: Decimations_List, originalMesh (name of original mesh), SimplifiedMesh & Textures to what suits you.
Enjoy!
'''
def simplify(originalMeshName, SimplifiedMeshName, NumberOfFaces, WithTexture):
# File names
FilterScript = 'SimplificationFilter.mlx' # script file
original_mesh = originalMeshName # input file
simplified_mesh = SimplifiedMeshName # output file
Num_Of_Faces = int(NumberOfFaces) # Final Number of Faces
#Check the input mesh number of faces (so that we do not decimate to a higher number of faces than original mesh)
MetricsMeshDictionary = {}
MetricsMeshDictionary = mlx.files.measure_topology(original_mesh)
#print (MetricsMeshDictionary)
print('\n Number of faces of original mesh is: ' + str(MetricsMeshDictionary['face_num'] ))
if(MetricsMeshDictionary['face_num'] <= Num_Of_Faces):
#exit the script and print a message about it
print("\n SORRY your decimated mesh can not have higher number of faces that the input mesh.....")
print("\n ......................................................................................")
sys.exit()
#Creating a folder named as the Number of faces: named '150000'
print('\n Creating a folder to store the decimated model ...........')
if not os.path.exists(str(Num_Of_Faces)):
os.makedirs(str(Num_Of_Faces))
simplified_meshScript = mlx.FilterScript(file_in=original_mesh, file_out=str(Num_Of_Faces) + '/' + simplified_mesh,
ml_version='2016.12') # Create FilterScript object
mlx.remesh.simplify(simplified_meshScript, texture=WithTexture, faces=Num_Of_Faces,
target_perc=0.0, quality_thr=1.0, preserve_boundary=True,
boundary_weight=1.0, preserve_normal=True,
optimal_placement=True, planar_quadric=True,
selected=False, extra_tex_coord_weight=1.0)
print('\n Beginning the process of Decimation ...........')
simplified_meshScript.run_script() # Run the script
os.chdir(str(Num_Of_Faces))
print('\n Process of Decimation Finished ...')
print('\n Copying textures (PNG and JPEG) into the folder of decimated model....')
#go back to parent directory so we can copy the textures to the 3D Model folder
os.chdir('..')
#Now checking for textures in the folder of the input mesh.... (plz change if needed)
allfilelist= os.listdir('.')
for Afile in allfilelist[:]:
if not(Afile.endswith(".png") or Afile.endswith(".PNG") or Afile.endswith(".jpg") or Afile.endswith(".JPG")):
allfilelist.remove(Afile)
print('\n Found the LIST of images in PNG and JPEG (textures): ')
print(allfilelist)
for file in allfilelist:
shutil.copy(file, str(Num_Of_Faces))
print('\n sleeping for 3 seconds.... ')
time.sleep(3)
Decimations_List = [100000, 200000, 300000, 400000, 500000, 600000, 750000]
originalMesh = 'Lantern.1.obj'
SimplifiedMesh = 'Lantern_Simplified.obj'
Textures = True
print("...........Starting timer for the decimation process............")
initial_time = time.time()
threads_list = []
for decimationResolution in Decimations_List:
threads_list.append(Thread(target=simplify, args=(originalMesh, SimplifiedMesh, decimationResolution, Textures)))
for thread in threads_list:
try:
thread.start()
except KeyboardInterrupt: # std Python exception
continue # moves to next thread iterable
#Waiting for all threads to finish
thread.join()
print("I Finished Decimating in tandem.....")
finish_time = time.time()
print("---Decimation Process took: %s seconds ---" % (finish_time - initial_time))
print("\n Done.... have a good day!")
| 0.108012 | 0.174164 |
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class ReidentificationJobApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_reidentification_job(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
return data
def create_reidentification_job_with_http_info(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_reidentification_job(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def delete_reidentification_job_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `delete_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_reidentification_jobs(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
return data
def get_all_reidentification_jobs_with_http_info(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size', 'environment_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_reidentification_jobs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'environment_id' in params:
query_params.append(('environment_id', params['environment_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJobList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_reidentification_job_by_id(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def get_reidentification_job_by_id_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_reidentification_job_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `get_reidentification_job_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_reidentification_job(self, reidentification_job_id, body, **kwargs): # noqa: E501
"""Update re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reidentification_job(reidentification_job_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to update (required)
:param ReidentificationJob body: The updated re-identification job (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_reidentification_job_with_http_info(reidentification_job_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_reidentification_job_with_http_info(reidentification_job_id, body, **kwargs) # noqa: E501
return data
def update_reidentification_job_with_http_info(self, reidentification_job_id, body, **kwargs): # noqa: E501
"""Update re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reidentification_job_with_http_info(reidentification_job_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to update (required)
:param ReidentificationJob body: The updated re-identification job (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `update_reidentification_job`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
dxm/lib/masking_api/api/reidentification_job_api.py
|
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class ReidentificationJobApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_reidentification_job(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
return data
def create_reidentification_job_with_http_info(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_reidentification_job(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def delete_reidentification_job_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `delete_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_reidentification_jobs(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
return data
def get_all_reidentification_jobs_with_http_info(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size', 'environment_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_reidentification_jobs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'environment_id' in params:
query_params.append(('environment_id', params['environment_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJobList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_reidentification_job_by_id(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def get_reidentification_job_by_id_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_reidentification_job_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `get_reidentification_job_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_reidentification_job(self, reidentification_job_id, body, **kwargs): # noqa: E501
"""Update re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reidentification_job(reidentification_job_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to update (required)
:param ReidentificationJob body: The updated re-identification job (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_reidentification_job_with_http_info(reidentification_job_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_reidentification_job_with_http_info(reidentification_job_id, body, **kwargs) # noqa: E501
return data
def update_reidentification_job_with_http_info(self, reidentification_job_id, body, **kwargs): # noqa: E501
"""Update re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reidentification_job_with_http_info(reidentification_job_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to update (required)
:param ReidentificationJob body: The updated re-identification job (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `update_reidentification_job`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 0.712932 | 0.055926 |
from __future__ import division, unicode_literals, print_function, absolute_import
import numpy as np
import traitlets as tl
from podpac.core.node import NodeException
from podpac.core.units import UnitsDataArray
from podpac.core.utils import common_doc
from podpac.core.compositor.compositor import COMMON_COMPOSITOR_DOC, BaseCompositor
@common_doc(COMMON_COMPOSITOR_DOC)
class OrderedCompositor(BaseCompositor):
"""Compositor that combines sources based on their order in self.sources.
The sources should generally be interpolated before being composited (i.e. not raw datasources).
Attributes
----------
sources : list
Source nodes, in order of preference. Later sources are only used where earlier sources do not provide data.
source_coordinates : :class:`podpac.Coordinates`
Coordinates that make each source unique. Must the same size as ``sources`` and single-dimensional. Optional.
multithreading : bool, optional
Default is True. If True, will always evaluate the compositor in serial, ignoring any MULTITHREADING settings
"""
multithreading = tl.Bool(False)
@common_doc(COMMON_COMPOSITOR_DOC)
def composite(self, coordinates, data_arrays, result=None):
"""Composites data_arrays in order that they appear. Once a request contains no nans, the result is returned.
Parameters
----------
coordinates : :class:`podpac.Coordinates`
{requested_coordinates}
data_arrays : generator
Evaluated source data, in the same order as the sources.
result : podpac.UnitsDataArray, optional
{eval_output}
Returns
-------
{eval_return} This composites the sources together until there are no nans or no more sources.
"""
if result is None:
result = self.create_output_array(coordinates)
else:
result[:] = np.nan
mask = UnitsDataArray.create(coordinates, outputs=self.outputs, data=0, dtype=bool)
for data in data_arrays:
if self.outputs is None:
try:
data = data.transpose(*result.dims)
except ValueError:
raise NodeException(
"Cannot evaluate compositor with requested dims %s. "
"The compositor source dims are %s. "
"Specify the compositor 'dims' attribute to ignore extra requested dims."
% (coordinates.dims, data.dims)
)
self._composite(result, data, mask)
else:
for name in data["output"]:
self._composite(result.sel(output=name), data.sel(output=name), mask.sel(output=name))
# stop if the results are full
if np.all(mask):
break
return result
@staticmethod
def _composite(result, data, mask):
source_mask = np.isfinite(data.data)
b = ~mask & source_mask
result.data[b.data] = data.data[b.data]
mask |= source_mask
|
podpac/core/compositor/ordered_compositor.py
|
from __future__ import division, unicode_literals, print_function, absolute_import
import numpy as np
import traitlets as tl
from podpac.core.node import NodeException
from podpac.core.units import UnitsDataArray
from podpac.core.utils import common_doc
from podpac.core.compositor.compositor import COMMON_COMPOSITOR_DOC, BaseCompositor
@common_doc(COMMON_COMPOSITOR_DOC)
class OrderedCompositor(BaseCompositor):
"""Compositor that combines sources based on their order in self.sources.
The sources should generally be interpolated before being composited (i.e. not raw datasources).
Attributes
----------
sources : list
Source nodes, in order of preference. Later sources are only used where earlier sources do not provide data.
source_coordinates : :class:`podpac.Coordinates`
Coordinates that make each source unique. Must the same size as ``sources`` and single-dimensional. Optional.
multithreading : bool, optional
Default is True. If True, will always evaluate the compositor in serial, ignoring any MULTITHREADING settings
"""
multithreading = tl.Bool(False)
@common_doc(COMMON_COMPOSITOR_DOC)
def composite(self, coordinates, data_arrays, result=None):
"""Composites data_arrays in order that they appear. Once a request contains no nans, the result is returned.
Parameters
----------
coordinates : :class:`podpac.Coordinates`
{requested_coordinates}
data_arrays : generator
Evaluated source data, in the same order as the sources.
result : podpac.UnitsDataArray, optional
{eval_output}
Returns
-------
{eval_return} This composites the sources together until there are no nans or no more sources.
"""
if result is None:
result = self.create_output_array(coordinates)
else:
result[:] = np.nan
mask = UnitsDataArray.create(coordinates, outputs=self.outputs, data=0, dtype=bool)
for data in data_arrays:
if self.outputs is None:
try:
data = data.transpose(*result.dims)
except ValueError:
raise NodeException(
"Cannot evaluate compositor with requested dims %s. "
"The compositor source dims are %s. "
"Specify the compositor 'dims' attribute to ignore extra requested dims."
% (coordinates.dims, data.dims)
)
self._composite(result, data, mask)
else:
for name in data["output"]:
self._composite(result.sel(output=name), data.sel(output=name), mask.sel(output=name))
# stop if the results are full
if np.all(mask):
break
return result
@staticmethod
def _composite(result, data, mask):
source_mask = np.isfinite(data.data)
b = ~mask & source_mask
result.data[b.data] = data.data[b.data]
mask |= source_mask
| 0.82425 | 0.461623 |
import os
import numpy as np
import pandas as pd
import time as tm
import rpy2.robjects as robjects
import tensorflow as tf
import math
import scipy.io as sio
import optunity as opt
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
import SparseMatrix as sm
def run_LAmbDA(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = "", NumGenes = 0):
'''
run LAmbDA classifier
Wrapper script to run LAmbDA on a benchmark dataset with 5-fold cross validation,
outputs lists of true and predicted cell labels as csv files, as well as computation time.
Parameters
----------
DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes
as row names and gene names as column names.
LabelsPath : Cell population annotations file path (.csv).
CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.
OutputDir : Output directory defining the path of the exported file.
GeneOrderPath : Gene order file path (.csv) obtained from feature selection,
defining the genes order for each cross validation fold, default is NULL.
NumGenes : Number of genes used in case of feature selection (integer), default is 0.
'''
# read the Rdata file
robjects.r['load'](CV_RDataPath)
nfolds = np.array(robjects.r['n_folds'], dtype = 'int')
tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')
col = np.array(robjects.r['col_Index'], dtype = 'int')
col = col - 1
test_ind = np.array(robjects.r['Test_Idx'])
train_ind = np.array(robjects.r['Train_Idx'])
# read the data
data = sm.importMM(DataPath)
labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)
labels = labels.iloc[tokeep]
data = data.iloc[tokeep]
data = data.fillna("0").astype(int)
# read the feature file
if (NumGenes > 0):
features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')
# folder with results
os.chdir(OutputDir)
tr_time=[]
ts_time=[]
truelab = np.zeros([len(labels),1],dtype = int)
predlab = np.zeros([len(labels),1],dtype = int)
for i in range(np.squeeze(nfolds)):
global X, Y, Gnp, Dnp, train, test, prt, cv
test_ind_i = np.array(test_ind[i], dtype = 'int') - 1
train_ind_i = np.array(train_ind[i], dtype = 'int') - 1
X = np.array(data)
if (NumGenes > 0):
X = np.log2(X/10+1)
feat_to_use = features.iloc[0:NumGenes,i]
X = X[:,feat_to_use]
else:
X = np.log2(np.transpose(select_feats(np.transpose(X),0.5,80))/10+1)
uniq = np.unique(labels)
Y = np.zeros([len(labels),len(uniq)],int)
for j in range(len(uniq)):
Y[np.where(labels == uniq[j])[0],j] = 1
Y = np.array(Y)
Gnp = np.zeros([len(uniq),len(uniq)],int)
np.fill_diagonal(Gnp,1)
Gnp = np.array(Gnp)
Dnp = np.ones([len(uniq),1],int)
Dnp = np.array(Dnp)
train_samp = int(np.floor(0.75*len(train_ind_i)))
test_samp = len(train_ind_i) - train_samp
perm = np.random.permutation(len(train_ind_i))
train = perm[0:train_samp]
test = perm[train_samp:test_samp+1]
while(np.sum(np.sum(Y[train,:],0)<5)>0):
perm = np.random.permutation(X.shape[0])
train = perm[0:train_samp+1]
test = perm[train_samp+1:train_samp+test_samp+1]
cv = i
optunity_it = 0
prt = False
opt_params = None
start=tm.time()
opt_params, _, _ = opt.minimize(run_LAmbDA2,solver_name='sobol', gamma=[0.8,1.2], delta=[0.05,0.95], tau=[10.0,11.0], prc_cut=[20,50], bs_prc=[0.2,0.6], num_trees=[10,200], max_nodes=[100,1000], num_evals=50)
tr_time.append(tm.time()-start)
print("Finished training!")
prt = True
train = train_ind_i
test = test_ind_i
start=tm.time()
err = run_LAmbDA2(opt_params['gamma'], opt_params['delta'], opt_params['tau'], opt_params['prc_cut'], opt_params['bs_prc'], opt_params['num_trees'], opt_params['max_nodes'])
ts_time.append(tm.time()-start)
tf.reset_default_graph();
predfile = 'preds_cv' + str(cv) + '.mat'
truefile = 'truth_cv' + str(cv) + '.mat'
pred = sio.loadmat(predfile)
truth = sio.loadmat(truefile)
pred = pred['preds']
truth = truth['labels']
pred_ind = np.argmax(pred,axis=1)
truth_ind = np.argmax(truth,axis=1)
predlab[test_ind_i,0] = pred_ind
truelab[test_ind_i,0] = truth_ind
truelab = pd.DataFrame(truelab)
predlab = pd.DataFrame(predlab)
tr_time = pd.DataFrame(tr_time)
ts_time = pd.DataFrame(ts_time)
if (NumGenes == 0):
truelab.to_csv("LAmbDA_True_Labels.csv", index = False)
predlab.to_csv("LAmbDA_Pred_Labels.csv", index = False)
tr_time.to_csv("LAmbDA_Training_Time.csv", index = False)
ts_time.to_csv("LAmbDA_Testing_Time.csv", index = False)
else:
truelab.to_csv("LAmbDA_" + str(NumGenes) + "_True_Labels.csv", index = False)
predlab.to_csv("LAmbDA_" + str(NumGenes) + "_Pred_Labels.csv", index = False)
tr_time.to_csv("LAmbDA_" + str(NumGenes) + "_Training_Time.csv", index = False)
ts_time.to_csv("LAmbDA_" + str(NumGenes) + "_Testing_Time.csv", index = False)
##### Functions copied from LAmbDA's Github
def wt_cutoff(colnum,cutoff,Gtmp,gamma):
rowsums = np.sum(Gtmp,axis=1);
return(math.ceil(cutoff*(math.log((max(rowsums)/rowsums[colnum])+1,2)**gamma)))
def resample(prc_cut,Y,Gtmp,train,gamma):
add = list()
rem = list()
colsums = np.sum(Y[train,:],axis=0);
cutoff = math.ceil(np.percentile(colsums,prc_cut));
for i in range(len(colsums)):
if colsums[i] == 0:
pass
elif colsums[i] < wt_cutoff(i,cutoff,Gtmp,gamma):
idx = np.squeeze(np.array(np.where(Y[train,i]>=1)));
choice = np.random.choice(train[idx],int(wt_cutoff(i,cutoff,Gtmp,gamma)-colsums[i]))
add = add + choice.tolist();
elif colsums[i] == wt_cutoff(i,cutoff,Gtmp,gamma):
pass
else:
idx = np.squeeze(np.array(np.where(Y[train,i]>=1)));
choice = np.random.choice(train[idx],int(colsums[i]-wt_cutoff(i,cutoff,Gtmp,gamma)),replace=False)
rem = rem + choice.tolist()
return np.concatenate((list([val for val in train if val not in rem]),add));
def select_feats(Xtmp,num_zero_prc_cut,var_prc_cut):
#*********************************************************************
# remove features with many zeros
num_feat_zeros = np.sum(Xtmp==0,axis=1);
Xtmp = Xtmp[num_feat_zeros<num_zero_prc_cut*Xtmp.shape[1],:]
#*********************************************************************
# remove features with low variance
feat_vars = np.var(Xtmp,axis=1)
Xtmp = Xtmp[feat_vars>np.percentile(feat_vars,var_prc_cut),:]
return(Xtmp)
def get_yn(predict,ys,delta,tau,output_feats):
D = tf.cast(Dnp, tf.float32);
G = tf.cast(Gnp, tf.float32);
ys = tf.cast(ys, tf.float32);
#print("start")
Cm = tf.matmul(tf.transpose(tf.matmul(ys,D)),predict+0.1)/tf.reshape(tf.reduce_sum(tf.transpose(tf.matmul(ys,D)),1),(-1,1));
#print("1")
mCm = tf.reshape(tf.reduce_mean(tf.cast(tf.matmul(tf.transpose(D),G)>0,tf.float32)*Cm,1),(-1,1));
#print("2")
yw = tf.multiply(predict+0.1,tf.matmul(tf.matmul(ys,D),tf.pow(mCm/Cm,tau)));
#print("3")
ye = tf.multiply(tf.matmul(ys,G),yw);
#print("4")
yt = tf.matmul(ys,tf.matmul(tf.transpose(ys),ye));
#print("5")
ya = (delta*yt)+((1-delta)*ye)
#print("6")
yn = tf.cast(tf.one_hot(tf.argmax(ya,axis=1),output_feats), dtype=tf.float32)
#print("7")
return(yn)
def get_yi(rowsums,G2,ys):
G2 = tf.cast(G2, tf.float32);
ys = tf.cast(ys, tf.float32);
yi = tf.cast(tf.matmul(ys,G2), dtype=tf.float32);
return(yi)
def run_LAmbDA2(gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes):
global X, Y, Gnp, Dnp, train, test, prt, cv
D = tf.cast(Dnp, tf.float32);
G = tf.cast(Gnp, tf.float32);
#optunity_it = optunity_it+1;
num_trees = int(num_trees);
max_nodes = int(max_nodes);
prc_cut = int(np.ceil(prc_cut));
print("gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i" % (gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
input_feats = X.shape[1];
num_labls = G.shape.as_list();
output_feats = num_labls[1];
#print(output_feats)
num_labls = num_labls[0];
rowsums = np.sum(Gnp,axis=1);
train2 = resample(prc_cut, Y, Gnp, train, gamma); # Bug??
bs = int(np.ceil(bs_prc*train2.size))
xs = tf.placeholder(tf.float32, [None,input_feats])
#ys = tf.placeholder(tf.float32, [None,num_labls])
yin = tf.placeholder(tf.int32, [None])
print("Vars loaded xs and ys created")
hparams = tensor_forest.ForestHParams(num_classes=output_feats,
num_features=input_feats,
num_trees=num_trees,
max_nodes=max_nodes).fill()
print("Tensor forest hparams created")
forest_graph = tensor_forest.RandomForestGraphs(hparams)
print("Tensor forest graph created")
train_op = forest_graph.training_graph(xs, yin)
loss_op = forest_graph.training_loss(xs, yin)
print("Loss and train ops created")
predict, _, _ = forest_graph.inference_graph(xs)
print("Tensor forest variables created through predict")
accuracy_op = tf.reduce_mean(tf.reduce_sum(tf.square(tf.one_hot(yin,output_feats)-predict),reduction_indices=[1]))
print(tf.reduce_sum(tf.square(tf.one_hot(yin,output_feats)-predict),reduction_indices=[1]))
#predict = tf.one_hot(pred);
print("Lambda specific variables created")
# Creating training and testing steps
G2 = np.copy(Gnp);
G2[rowsums>1,:] = 0;
YI = np.matmul(Y,G2);
YIrs = np.sum(YI,axis=1);
trainI = train2[np.in1d(train2,np.where(YIrs==1))];
print("data type trainI,",trainI.dtype)
testI = test[np.in1d(test,np.where(YIrs==1))];
print("trainI testI created")
#init_vars=tf.global_variables_initializer()
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
sess = tf.Session()
sess.run(init_vars)
print("Session started")
#beep = sess.run(predict,feed_dict={xs:X[1:100,:]});
#beep = sess.run(predict,feed_dict={xs:X[train2[0:bs],:]});
tensor_trainI = {xs: X[trainI, :], yin: sess.run(tf.argmax(get_yi(rowsums,G2,Y[trainI, :]),axis=1))}
print("tensor_trainI made")
tensor_testI = {xs: X[testI, :], yin: sess.run(tf.argmax(get_yi(rowsums,G2,Y[testI, :]),axis=1))}
print("tensor_testI made")
tensor_train = {xs: X[train2[0:bs], :], yin: sess.run(tf.argmax(get_yn(sess.run(predict,feed_dict={xs:X[train2[0:bs],:]}),Y[train2[0:bs], :],delta,tau,output_feats),axis=1))}
print("tensor_train made")
tensor_test = {xs: X[test, :], yin: sess.run(tf.argmax(get_yn(sess.run(predict,feed_dict={xs:X[test,:]}),Y[test, :],delta,tau,output_feats),axis=1))}
print("tensor_test made")
#**********************************
#print("Loss and training steps created with sample tensors")
# Setting params and initializing
print("Beginning iterations")
# Starting training iterations
print(X.shape)
for i in range(1,101):
if i < 50:
sess.run(train_op, feed_dict=tensor_trainI)
#print("ran train op")
if i % 10 == 0:
print(str(sess.run(accuracy_op, feed_dict=tensor_trainI)) + ' ' + str(sess.run(accuracy_op, feed_dict=tensor_testI)))
else:
sess.run(train_op, feed_dict=tensor_train)
if i % 10 == 0:
print(str(sess.run(accuracy_op, feed_dict=tensor_train)) + ' ' + str(sess.run(accuracy_op, feed_dict=tensor_test)))
elif i % 10 == 0:
np.random_shuffle(train2);
tensor_train = {xs: X[train2[0:bs], :], yin: sess.run(get_yn(sess.run(predict,feed_dict={xs:X[train2[0:bs],:]}),Y[train2[0:bs], :],delta,tau,output_feats))}
if prt:
blah = sess.run(predict, feed_dict=tensor_test);
sio.savemat('preds_cv' + str(cv) + '.mat', {'preds': blah});
sio.savemat('truth_cv' + str(cv) + '.mat', {'labels': Y[test, :]});
acc = sess.run(accuracy_op, feed_dict=tensor_test)
print("loss1=%.4f, gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i" % (acc, gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
tf.reset_default_graph();
return(acc)
|
Scripts/run_LAmbDA.py
|
import os
import numpy as np
import pandas as pd
import time as tm
import rpy2.robjects as robjects
import tensorflow as tf
import math
import scipy.io as sio
import optunity as opt
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
import SparseMatrix as sm
def run_LAmbDA(DataPath, LabelsPath, CV_RDataPath, OutputDir, GeneOrderPath = "", NumGenes = 0):
'''
run LAmbDA classifier
Wrapper script to run LAmbDA on a benchmark dataset with 5-fold cross validation,
outputs lists of true and predicted cell labels as csv files, as well as computation time.
Parameters
----------
DataPath : Data file path (.csv), cells-genes matrix with cell unique barcodes
as row names and gene names as column names.
LabelsPath : Cell population annotations file path (.csv).
CV_RDataPath : Cross validation RData file path (.RData), obtained from Cross_Validation.R function.
OutputDir : Output directory defining the path of the exported file.
GeneOrderPath : Gene order file path (.csv) obtained from feature selection,
defining the genes order for each cross validation fold, default is NULL.
NumGenes : Number of genes used in case of feature selection (integer), default is 0.
'''
# read the Rdata file
robjects.r['load'](CV_RDataPath)
nfolds = np.array(robjects.r['n_folds'], dtype = 'int')
tokeep = np.array(robjects.r['Cells_to_Keep'], dtype = 'bool')
col = np.array(robjects.r['col_Index'], dtype = 'int')
col = col - 1
test_ind = np.array(robjects.r['Test_Idx'])
train_ind = np.array(robjects.r['Train_Idx'])
# read the data
data = sm.importMM(DataPath)
labels = pd.read_csv(LabelsPath, header=0,index_col=None, sep=',', usecols = col)
labels = labels.iloc[tokeep]
data = data.iloc[tokeep]
data = data.fillna("0").astype(int)
# read the feature file
if (NumGenes > 0):
features = pd.read_csv(GeneOrderPath,header=0,index_col=None, sep=',')
# folder with results
os.chdir(OutputDir)
tr_time=[]
ts_time=[]
truelab = np.zeros([len(labels),1],dtype = int)
predlab = np.zeros([len(labels),1],dtype = int)
for i in range(np.squeeze(nfolds)):
global X, Y, Gnp, Dnp, train, test, prt, cv
test_ind_i = np.array(test_ind[i], dtype = 'int') - 1
train_ind_i = np.array(train_ind[i], dtype = 'int') - 1
X = np.array(data)
if (NumGenes > 0):
X = np.log2(X/10+1)
feat_to_use = features.iloc[0:NumGenes,i]
X = X[:,feat_to_use]
else:
X = np.log2(np.transpose(select_feats(np.transpose(X),0.5,80))/10+1)
uniq = np.unique(labels)
Y = np.zeros([len(labels),len(uniq)],int)
for j in range(len(uniq)):
Y[np.where(labels == uniq[j])[0],j] = 1
Y = np.array(Y)
Gnp = np.zeros([len(uniq),len(uniq)],int)
np.fill_diagonal(Gnp,1)
Gnp = np.array(Gnp)
Dnp = np.ones([len(uniq),1],int)
Dnp = np.array(Dnp)
train_samp = int(np.floor(0.75*len(train_ind_i)))
test_samp = len(train_ind_i) - train_samp
perm = np.random.permutation(len(train_ind_i))
train = perm[0:train_samp]
test = perm[train_samp:test_samp+1]
while(np.sum(np.sum(Y[train,:],0)<5)>0):
perm = np.random.permutation(X.shape[0])
train = perm[0:train_samp+1]
test = perm[train_samp+1:train_samp+test_samp+1]
cv = i
optunity_it = 0
prt = False
opt_params = None
start=tm.time()
opt_params, _, _ = opt.minimize(run_LAmbDA2,solver_name='sobol', gamma=[0.8,1.2], delta=[0.05,0.95], tau=[10.0,11.0], prc_cut=[20,50], bs_prc=[0.2,0.6], num_trees=[10,200], max_nodes=[100,1000], num_evals=50)
tr_time.append(tm.time()-start)
print("Finished training!")
prt = True
train = train_ind_i
test = test_ind_i
start=tm.time()
err = run_LAmbDA2(opt_params['gamma'], opt_params['delta'], opt_params['tau'], opt_params['prc_cut'], opt_params['bs_prc'], opt_params['num_trees'], opt_params['max_nodes'])
ts_time.append(tm.time()-start)
tf.reset_default_graph();
predfile = 'preds_cv' + str(cv) + '.mat'
truefile = 'truth_cv' + str(cv) + '.mat'
pred = sio.loadmat(predfile)
truth = sio.loadmat(truefile)
pred = pred['preds']
truth = truth['labels']
pred_ind = np.argmax(pred,axis=1)
truth_ind = np.argmax(truth,axis=1)
predlab[test_ind_i,0] = pred_ind
truelab[test_ind_i,0] = truth_ind
truelab = pd.DataFrame(truelab)
predlab = pd.DataFrame(predlab)
tr_time = pd.DataFrame(tr_time)
ts_time = pd.DataFrame(ts_time)
if (NumGenes == 0):
truelab.to_csv("LAmbDA_True_Labels.csv", index = False)
predlab.to_csv("LAmbDA_Pred_Labels.csv", index = False)
tr_time.to_csv("LAmbDA_Training_Time.csv", index = False)
ts_time.to_csv("LAmbDA_Testing_Time.csv", index = False)
else:
truelab.to_csv("LAmbDA_" + str(NumGenes) + "_True_Labels.csv", index = False)
predlab.to_csv("LAmbDA_" + str(NumGenes) + "_Pred_Labels.csv", index = False)
tr_time.to_csv("LAmbDA_" + str(NumGenes) + "_Training_Time.csv", index = False)
ts_time.to_csv("LAmbDA_" + str(NumGenes) + "_Testing_Time.csv", index = False)
##### Functions copied from LAmbDA's Github
def wt_cutoff(colnum,cutoff,Gtmp,gamma):
rowsums = np.sum(Gtmp,axis=1);
return(math.ceil(cutoff*(math.log((max(rowsums)/rowsums[colnum])+1,2)**gamma)))
def resample(prc_cut,Y,Gtmp,train,gamma):
add = list()
rem = list()
colsums = np.sum(Y[train,:],axis=0);
cutoff = math.ceil(np.percentile(colsums,prc_cut));
for i in range(len(colsums)):
if colsums[i] == 0:
pass
elif colsums[i] < wt_cutoff(i,cutoff,Gtmp,gamma):
idx = np.squeeze(np.array(np.where(Y[train,i]>=1)));
choice = np.random.choice(train[idx],int(wt_cutoff(i,cutoff,Gtmp,gamma)-colsums[i]))
add = add + choice.tolist();
elif colsums[i] == wt_cutoff(i,cutoff,Gtmp,gamma):
pass
else:
idx = np.squeeze(np.array(np.where(Y[train,i]>=1)));
choice = np.random.choice(train[idx],int(colsums[i]-wt_cutoff(i,cutoff,Gtmp,gamma)),replace=False)
rem = rem + choice.tolist()
return np.concatenate((list([val for val in train if val not in rem]),add));
def select_feats(Xtmp,num_zero_prc_cut,var_prc_cut):
#*********************************************************************
# remove features with many zeros
num_feat_zeros = np.sum(Xtmp==0,axis=1);
Xtmp = Xtmp[num_feat_zeros<num_zero_prc_cut*Xtmp.shape[1],:]
#*********************************************************************
# remove features with low variance
feat_vars = np.var(Xtmp,axis=1)
Xtmp = Xtmp[feat_vars>np.percentile(feat_vars,var_prc_cut),:]
return(Xtmp)
def get_yn(predict,ys,delta,tau,output_feats):
D = tf.cast(Dnp, tf.float32);
G = tf.cast(Gnp, tf.float32);
ys = tf.cast(ys, tf.float32);
#print("start")
Cm = tf.matmul(tf.transpose(tf.matmul(ys,D)),predict+0.1)/tf.reshape(tf.reduce_sum(tf.transpose(tf.matmul(ys,D)),1),(-1,1));
#print("1")
mCm = tf.reshape(tf.reduce_mean(tf.cast(tf.matmul(tf.transpose(D),G)>0,tf.float32)*Cm,1),(-1,1));
#print("2")
yw = tf.multiply(predict+0.1,tf.matmul(tf.matmul(ys,D),tf.pow(mCm/Cm,tau)));
#print("3")
ye = tf.multiply(tf.matmul(ys,G),yw);
#print("4")
yt = tf.matmul(ys,tf.matmul(tf.transpose(ys),ye));
#print("5")
ya = (delta*yt)+((1-delta)*ye)
#print("6")
yn = tf.cast(tf.one_hot(tf.argmax(ya,axis=1),output_feats), dtype=tf.float32)
#print("7")
return(yn)
def get_yi(rowsums,G2,ys):
G2 = tf.cast(G2, tf.float32);
ys = tf.cast(ys, tf.float32);
yi = tf.cast(tf.matmul(ys,G2), dtype=tf.float32);
return(yi)
def run_LAmbDA2(gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes):
global X, Y, Gnp, Dnp, train, test, prt, cv
D = tf.cast(Dnp, tf.float32);
G = tf.cast(Gnp, tf.float32);
#optunity_it = optunity_it+1;
num_trees = int(num_trees);
max_nodes = int(max_nodes);
prc_cut = int(np.ceil(prc_cut));
print("gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i" % (gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
input_feats = X.shape[1];
num_labls = G.shape.as_list();
output_feats = num_labls[1];
#print(output_feats)
num_labls = num_labls[0];
rowsums = np.sum(Gnp,axis=1);
train2 = resample(prc_cut, Y, Gnp, train, gamma); # Bug??
bs = int(np.ceil(bs_prc*train2.size))
xs = tf.placeholder(tf.float32, [None,input_feats])
#ys = tf.placeholder(tf.float32, [None,num_labls])
yin = tf.placeholder(tf.int32, [None])
print("Vars loaded xs and ys created")
hparams = tensor_forest.ForestHParams(num_classes=output_feats,
num_features=input_feats,
num_trees=num_trees,
max_nodes=max_nodes).fill()
print("Tensor forest hparams created")
forest_graph = tensor_forest.RandomForestGraphs(hparams)
print("Tensor forest graph created")
train_op = forest_graph.training_graph(xs, yin)
loss_op = forest_graph.training_loss(xs, yin)
print("Loss and train ops created")
predict, _, _ = forest_graph.inference_graph(xs)
print("Tensor forest variables created through predict")
accuracy_op = tf.reduce_mean(tf.reduce_sum(tf.square(tf.one_hot(yin,output_feats)-predict),reduction_indices=[1]))
print(tf.reduce_sum(tf.square(tf.one_hot(yin,output_feats)-predict),reduction_indices=[1]))
#predict = tf.one_hot(pred);
print("Lambda specific variables created")
# Creating training and testing steps
G2 = np.copy(Gnp);
G2[rowsums>1,:] = 0;
YI = np.matmul(Y,G2);
YIrs = np.sum(YI,axis=1);
trainI = train2[np.in1d(train2,np.where(YIrs==1))];
print("data type trainI,",trainI.dtype)
testI = test[np.in1d(test,np.where(YIrs==1))];
print("trainI testI created")
#init_vars=tf.global_variables_initializer()
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
sess = tf.Session()
sess.run(init_vars)
print("Session started")
#beep = sess.run(predict,feed_dict={xs:X[1:100,:]});
#beep = sess.run(predict,feed_dict={xs:X[train2[0:bs],:]});
tensor_trainI = {xs: X[trainI, :], yin: sess.run(tf.argmax(get_yi(rowsums,G2,Y[trainI, :]),axis=1))}
print("tensor_trainI made")
tensor_testI = {xs: X[testI, :], yin: sess.run(tf.argmax(get_yi(rowsums,G2,Y[testI, :]),axis=1))}
print("tensor_testI made")
tensor_train = {xs: X[train2[0:bs], :], yin: sess.run(tf.argmax(get_yn(sess.run(predict,feed_dict={xs:X[train2[0:bs],:]}),Y[train2[0:bs], :],delta,tau,output_feats),axis=1))}
print("tensor_train made")
tensor_test = {xs: X[test, :], yin: sess.run(tf.argmax(get_yn(sess.run(predict,feed_dict={xs:X[test,:]}),Y[test, :],delta,tau,output_feats),axis=1))}
print("tensor_test made")
#**********************************
#print("Loss and training steps created with sample tensors")
# Setting params and initializing
print("Beginning iterations")
# Starting training iterations
print(X.shape)
for i in range(1,101):
if i < 50:
sess.run(train_op, feed_dict=tensor_trainI)
#print("ran train op")
if i % 10 == 0:
print(str(sess.run(accuracy_op, feed_dict=tensor_trainI)) + ' ' + str(sess.run(accuracy_op, feed_dict=tensor_testI)))
else:
sess.run(train_op, feed_dict=tensor_train)
if i % 10 == 0:
print(str(sess.run(accuracy_op, feed_dict=tensor_train)) + ' ' + str(sess.run(accuracy_op, feed_dict=tensor_test)))
elif i % 10 == 0:
np.random_shuffle(train2);
tensor_train = {xs: X[train2[0:bs], :], yin: sess.run(get_yn(sess.run(predict,feed_dict={xs:X[train2[0:bs],:]}),Y[train2[0:bs], :],delta,tau,output_feats))}
if prt:
blah = sess.run(predict, feed_dict=tensor_test);
sio.savemat('preds_cv' + str(cv) + '.mat', {'preds': blah});
sio.savemat('truth_cv' + str(cv) + '.mat', {'labels': Y[test, :]});
acc = sess.run(accuracy_op, feed_dict=tensor_test)
print("loss1=%.4f, gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i" % (acc, gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
tf.reset_default_graph();
return(acc)
| 0.287368 | 0.379091 |
from django.test import TransactionTestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
import simplejson as json
import random
from django.test.utils import override_settings
from mock import patch
from django.core import cache
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
@override_settings(SPOTSEEKER_SPOT_FORM='spotseeker_server.org_forms.uw_spot.UWSpotForm')
@override_settings(SPOTSEEKER_SPOTEXTENDEDINFO_FORM='spotseeker_server.org_forms.uw_spot.UWSpotExtendedInfoForm')
@override_settings(SPOTSEEKER_AUTH_ADMINS=('demo_user',))
class UWSpotPUTTest(TransactionTestCase):
""" Tests updating Spot information via PUT.
"""
def setUp(self):
spot = Spot.objects.create(name="This is for testing PUT")
SpotExtendedInfo.objects.create(spot=spot, key="aw_yisss", value="breadcrumbs")
spot.save()
self.spot = spot
url = '/api/v1/spot/{0}'.format(self.spot.pk)
self.url = url
def test_bad_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put(self.url, 'this is just text', content_type="application/json", If_Match=self.spot.etag)
self.assertEquals(response.status_code, 400, "Rejects non-json")
def test_invalid_url(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put("/api/v1/spot/aa", '{}', content_type="application/json")
self.assertEquals(response.status_code, 404, "Rejects a non-numeric url")
def test_invalid_id_too_high(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
test_id = self.spot.pk + 10000
test_url = '/api/v1/spot/{0}'.format(test_id)
response = c.put(test_url, '{}', content_type="application/json")
self.assertEquals(response.status_code, 404, "Rejects an id that doesn't exist yet (no PUT to create)")
def test_empty_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put(self.url, '{}', content_type="application/json", If_Match=self.spot.etag)
self.assertEquals(response.status_code, 400, "Rejects an empty body")
def test_valid_json_no_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 10
response = c.put(self.url, '{{"name":"{0}","capacity":"{1}"}}'.format(new_name, new_capacity), content_type="application/json")
self.assertEquals(response.status_code, 400, "Bad request w/o an etag")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, self.spot.name, "No etag - same name")
self.assertEquals(updated_spot.capacity, self.spot.capacity, "no etag - same capacity")
def test_valid_json_valid_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 20
response = c.get(self.url)
etag = response["ETag"]
json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 200, "Accepts a valid json string")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, new_name, "a valid PUT changes the name")
self.assertEquals(updated_spot.capacity, new_capacity, "a valid PUT changes the capacity")
def test_valid_json_outdated_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 30
response = c.get(self.url)
etag = response["ETag"]
intermediate_spot = Spot.objects.get(pk=self.spot.pk)
intermediate_spot.name = "This interferes w/ the PUT"
intermediate_spot.save()
response = c.put(self.url, '{{"name":"{0}","capacity":"{1}"}}'.format(new_name, new_capacity), content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 409, "An outdated etag leads to a conflict")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, intermediate_spot.name, "keeps the intermediate name w/ an outdated etag")
def test_valid_json_but_invalid_extended_info(self):
with self.settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.org_forms.uw_spot.UWSpotForm'):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 20
response = c.get(self.url)
etag = response["ETag"]
json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true","has_computers":"true","num_computers":"10","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 200, "Accepts a valid json string")
# test: invalid extended info value
response = c.get(self.url)
etag = response["ETag"]
updated_json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"wub wub wub wu wu wuhhhh WUB WUB WUBBBBUB", "has_computers":"true", "num_computers":"10","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, updated_json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 400, "Doesn't update spot info with invalid extended info")
response = c.get(self.url)
self.assertEquals(json.loads(json_string)['extended_info'], json.loads(response.content)['extended_info'], "Doesn't update spot info with invalid extended info")
# test: invalid int value
invalid_int = "invalid_int"
invalid_int_json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true", "has_computers":"true", "num_computers":"%s","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity, invalid_int)
response = c.put(self.url, invalid_int_json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 400, "Doesn't update spot info with invalid int value")
response = c.get(self.url)
self.assertEquals(json.loads(json_string)['extended_info'], json.loads(response.content)['extended_info'], "Doesn't update spot info with invalid int value")
|
test/uw_spot/spot_put.py
|
from django.test import TransactionTestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
import simplejson as json
import random
from django.test.utils import override_settings
from mock import patch
from django.core import cache
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
@override_settings(SPOTSEEKER_SPOT_FORM='spotseeker_server.org_forms.uw_spot.UWSpotForm')
@override_settings(SPOTSEEKER_SPOTEXTENDEDINFO_FORM='spotseeker_server.org_forms.uw_spot.UWSpotExtendedInfoForm')
@override_settings(SPOTSEEKER_AUTH_ADMINS=('demo_user',))
class UWSpotPUTTest(TransactionTestCase):
""" Tests updating Spot information via PUT.
"""
def setUp(self):
spot = Spot.objects.create(name="This is for testing PUT")
SpotExtendedInfo.objects.create(spot=spot, key="aw_yisss", value="breadcrumbs")
spot.save()
self.spot = spot
url = '/api/v1/spot/{0}'.format(self.spot.pk)
self.url = url
def test_bad_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put(self.url, 'this is just text', content_type="application/json", If_Match=self.spot.etag)
self.assertEquals(response.status_code, 400, "Rejects non-json")
def test_invalid_url(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put("/api/v1/spot/aa", '{}', content_type="application/json")
self.assertEquals(response.status_code, 404, "Rejects a non-numeric url")
def test_invalid_id_too_high(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
test_id = self.spot.pk + 10000
test_url = '/api/v1/spot/{0}'.format(test_id)
response = c.put(test_url, '{}', content_type="application/json")
self.assertEquals(response.status_code, 404, "Rejects an id that doesn't exist yet (no PUT to create)")
def test_empty_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.put(self.url, '{}', content_type="application/json", If_Match=self.spot.etag)
self.assertEquals(response.status_code, 400, "Rejects an empty body")
def test_valid_json_no_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 10
response = c.put(self.url, '{{"name":"{0}","capacity":"{1}"}}'.format(new_name, new_capacity), content_type="application/json")
self.assertEquals(response.status_code, 400, "Bad request w/o an etag")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, self.spot.name, "No etag - same name")
self.assertEquals(updated_spot.capacity, self.spot.capacity, "no etag - same capacity")
def test_valid_json_valid_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 20
response = c.get(self.url)
etag = response["ETag"]
json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 200, "Accepts a valid json string")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, new_name, "a valid PUT changes the name")
self.assertEquals(updated_spot.capacity, new_capacity, "a valid PUT changes the capacity")
def test_valid_json_outdated_etag(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 30
response = c.get(self.url)
etag = response["ETag"]
intermediate_spot = Spot.objects.get(pk=self.spot.pk)
intermediate_spot.name = "This interferes w/ the PUT"
intermediate_spot.save()
response = c.put(self.url, '{{"name":"{0}","capacity":"{1}"}}'.format(new_name, new_capacity), content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 409, "An outdated etag leads to a conflict")
updated_spot = Spot.objects.get(pk=self.spot.pk)
self.assertEquals(updated_spot.name, intermediate_spot.name, "keeps the intermediate name w/ an outdated etag")
def test_valid_json_but_invalid_extended_info(self):
with self.settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.org_forms.uw_spot.UWSpotForm'):
c = Client()
new_name = "testing PUT name: {0}".format(random.random())
new_capacity = 20
response = c.get(self.url)
etag = response["ETag"]
json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true","has_computers":"true","num_computers":"10","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 200, "Accepts a valid json string")
# test: invalid extended info value
response = c.get(self.url)
etag = response["ETag"]
updated_json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"wub wub wub wu wu wuhhhh WUB WUB WUBBBBUB", "has_computers":"true", "num_computers":"10","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity)
response = c.put(self.url, updated_json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 400, "Doesn't update spot info with invalid extended info")
response = c.get(self.url)
self.assertEquals(json.loads(json_string)['extended_info'], json.loads(response.content)['extended_info'], "Doesn't update spot info with invalid extended info")
# test: invalid int value
invalid_int = "invalid_int"
invalid_int_json_string = '{"name":"%s","capacity":"%s","location": {"latitude": 55, "longitude": -30},"extended_info":{"has_whiteboards":"true","has_outlets":"true", "has_computers":"true", "num_computers":"%s","manager":"Sam","organization":"UW"}}' % (new_name, new_capacity, invalid_int)
response = c.put(self.url, invalid_int_json_string, content_type="application/json", If_Match=etag)
self.assertEquals(response.status_code, 400, "Doesn't update spot info with invalid int value")
response = c.get(self.url)
self.assertEquals(json.loads(json_string)['extended_info'], json.loads(response.content)['extended_info'], "Doesn't update spot info with invalid int value")
| 0.549399 | 0.089694 |
u"""
Created at 2020.01.02
A python wrapper for running the netprophet to replace snakemake
"""
import os
import sys
import json
import logging
from argparse import ArgumentParser, ArgumentError
from multiprocessing import Pool
from shutil import rmtree
from subprocess import check_call, CalledProcessError
from tqdm import tqdm
from CODE import prepare_resources
from CODE import weighted_avg_similar_dbds
from CODE import build_motif_network
from CODE import combine_networks
from CODE import convert_fire2meme
from CODE import parse_network_scores
from CODE import parse_motif_summary
from CODE import parse_quantized_bins
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
def call(cmd):
u"""
:param cmd:
:return:
"""
with open(os.devnull, "w+") as w:
check_call(cmd, shell=True, stdout=w, stderr=w)
class SnakeMakePipe(object):
u"""
"""
def __init__(self, path: str, processes: int=1):
u"""
path to config file
:param path:
:param processes
"""
self.processes = processes
# self.__root__ = os.path.abspath(os.path.dirname(__file__))
self.__root__ = "/opt/NetProphet_2.0"
if not os.path.exists(path) or not os.path.isfile(path):
raise FileNotFoundError("Cannot find config file at %s" % path)
with open(path) as r:
self.config = json.load(r)
self.progress = os.path.join(self.config["NETPROPHET2_DIR"], "progress.json")
if self.check_progress(11):
logging.info("Please remove {} before re-run this pipeline".format(self.progress))
def check_progress(self, step):
u"""
:param step:
:return:
"""
progress = []
if os.path.exists(self.progress):
with open(self.progress) as r:
progress = json.load(r)
return step in progress
def log_progress(self, step):
progress = []
if os.path.exists(self.progress):
with open(self.progress) as r:
progress = json.load(r)
if step not in progress:
progress.append(step)
with open(self.progress, "w+") as w:
json.dump(progress, w, indent=4)
def step1(self):
u"""
STEP 1 to create output dir or files
:return:
"""
logging.info("STEP1: make_directories")
if self.check_progress(1):
logging.info("STEP1: skipped")
return
paths = [
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_pfm/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_score/"
)
]
for i in paths:
if not os.path.exists(i):
logging.info("Create %s" % i)
os.makedirs(i)
else:
logging.info("%s exists" % i)
self.log_progress(1)
def step2(self):
u"""
:return:
"""
logging.info("STEP2: prepare_resources")
if not self.check_progress(1):
raise FileNotFoundError("Please run STEP1 before run STEP2")
else:
if not self.check_progress(2):
prepare_resources.main([
"-g", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-e", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_EXPRESSION_DATA"]
),
"-c", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_SAMPLE_CONDITIONS"]
),
"-or", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/rdata.expr"
),
"-of", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.fc.tsv"
),
"-oa", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/allowed.adj"
),
"-op1", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"-op2", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.tsv"
),
])
self.log_progress(2)
def step3(self):
u"""
:return:
"""
logging.info("STEP3: map_np_network")
if not self.check_progress(2):
raise FileNotFoundError("Please run STEP2 before run STEP3")
else:
if not self.check_progress(3):
check_call(
"bash {program} -m -u {input_u} -t {input_t} -r {input_r} -a {input_a} -p {input_p} -d {input_d} -g {input_g} -f {input_f} -o {input_o} -n {output_n}".format(**{
"program": os.path.join(self.__root__, "SRC/NetProphet1/netprophet"),
"input_u": os.path.join(
self.config["NETPROPHET2_DIR"], "SRC/NetProphet1/"),
"input_t": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_EXPRESSION_DATA"]),
"input_r": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/rdata.expr"
),
"input_a": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/allowed.adj"
),
"input_p": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"input_d": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_DE_ADJMTR"]
),
"input_g": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"input_f": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"input_o": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/"
),
"output_n": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/np.adjmtr"
)
}),
shell=True
)
self.log_progress(3)
def step4(self):
u"""
data_fc_expr=${1}
pert_matrix=${2}
tf_names=${3}
output_adjmtr=${4}
use_serial=${5}
:return:
"""
logging.info("STEP4: map_bart_network")
if not self.check_progress(3):
raise FileNotFoundError("Please run STEP3 before run STEP4")
else:
if not self.check_progress(4):
check_call("Rscript --vanilla {program} fcFile={data_fc_expr} isPerturbedFile={pert_matrix} tfNameFile={tf_names} saveTo={output_adjmtr}.tsv mpiBlockSize={processes}".format(**{
"program": os.path.join(self.__root__, "CODE/build_bart_network.r"),
"data_fc_expr": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.fc.tsv"
),
"pert_matrix": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"tf_names": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"output_adjmtr": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bn.adjmtr"
),
"processes": self.processes
}), shell=True)
# 推测,这里只是单纯的去除行名和列名
o = os.path.join(self.config["NETPROPHET2_DIR"], self.config["OUTPUT_DIR"], "networks/bn.adjmtr")
with open(o, "w+") as w:
with open(o + ".tsv") as r:
for idx, line in enumerate(r):
if idx > 0:
lines = line.split()
if len(lines) > 0:
w.write("\t".join(lines[1:]) + "\n")
self.log_progress(4)
def step5(self):
u"""
:return:
"""
logging.info("STEP5: weighted_average_np_network")
if not self.check_progress(4):
raise FileNotFoundError("Please run STEP4 before run STEP5")
else:
if not self.check_progress(5):
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/np.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-t", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa.adjmtr"
)
])
self.log_progress(5)
def step6(self):
u"""
:return:
"""
logging.info("STEP6: weighted_average_bart_network")
if not self.check_progress(5):
raise FileNotFoundError("Please run STEP5 before run STEP6")
else:
if not self.check_progress(6):
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bn.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-t", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bnwa.adjmtr"
)
])
self.log_progress(6)
def step7(self):
u"""
:return:
"""
logging.info("STEP7: combine_npwa_bnwa")
if not self.check_progress(6):
raise FileNotFoundError("Please run STEP6 before run STEP7")
else:
if not self.check_progress(7):
check_call(
"Rscript {program} {input_n} {input_b} {output_o}".format(**{
"program": os.path.join(self.__root__, "CODE/quantile_combine_networks.r"),
"input_n": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa.adjmtr"
),
"input_b": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bnwa.adjmtr"
),
"output_o": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
)
}),
shell=True
)
self.log_progress(7)
def step8(self):
u"""
## Check if all motifs are ready
bash CODE/check_inference_status.sh ${OUTPUT_DIR}/motif_inference/motif_inference.log $REGULATORS $FLAG
:return:
"""
logging.info("STEP8: infer_motifs")
if not self.check_progress(7):
raise FileNotFoundError("Please run STEP7 before run STEP8")
else:
if not self.check_progress(8):
logging.info("Binning promoters based on network scores ... ")
parse_network_scores.main([
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-t", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores"
)
])
parse_quantized_bins.main([
"-n", "20",
"-i", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores"
),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins"
),
])
logging.info("Done")
promoter = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_PROMOTERS"]
)
out_dir = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"]
)
tasks = []
with open(os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"])
) as r:
for regulator in r:
regulator = regulator.strip()
tasks.append(
"perl {FIREDIR}/fire.pl --expfiles={OUTDIR} --exptype=discrete --fastafile_dna={PROMOTER} --k=7 --jn=20 --jn_t=16 --nodups=1 --dorna=0 --dodnarna=0".format(**{
"FIREDIR": os.getenv("FIREDIR"),
"OUTDIR": os.path.join(out_dir, "motif_inference/network_bins", regulator),
"PROMOTER": promoter
})
)
try:
with Pool(self.processes) as p:
list(tqdm(p.imap(call, tasks), total=len(tasks)))
except CalledProcessError as err:
logging.error(err)
exit(1)
self.log_progress(8)
def step9(self):
u"""
:return:
"""
logging.info("STEP9: score_motifs")
if not self.check_progress(8):
raise FileNotFoundError("Please run STEP8 before run STEP9")
else:
if not self.check_progress(9):
OUTPUT_DIR = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"]
)
MOTIFS_DIR = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins/"
)
REGULATORS = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
)
PROMOTERS = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_PROMOTERS"]
)
MOTIFS_LIST = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"], "motif_inference/motifs.txt"
)
logging.info("Parsing motif inference results ... ")
parse_motif_summary.main([
"-a", "True",
"-i", MOTIFS_DIR,
"-o", MOTIFS_LIST
])
convert_fire2meme.main([
"-i", MOTIFS_LIST,
"-o", os.path.join(OUTPUT_DIR, "motif_inference/motifs_pfm/")
])
logging.info("Done")
# score_motifs $ $PROMOTERS $ ${OUTPUT_DIR}/motif_inference/motif_scoring.log
FN_TF_PWM = os.path.join(OUTPUT_DIR, "motif_inference/motifs_pfm/") # directory of tf pwm
FN_PROMOTERS = PROMOTERS # promoter sequence file
OUT_FIMO = os.path.join(OUTPUT_DIR, "motif_inference/motifs_score") # directory of fimo alignment output
tasks1, tasks2, tasks3 = [], [], []
with open(REGULATORS) as r:
for regulator in r:
regulator = regulator.strip()
if not os.path.exists(os.path.join(FN_TF_PWM, regulator)):
continue
if os.path.exists(os.path.join(OUT_FIMO, regulator)):
rmtree(os.path.join(OUT_FIMO, regulator))
os.makedirs(os.path.join(OUT_FIMO, regulator))
tasks1.append("{fimo} -o {OUT_FIMO}/{regulator} --thresh 5e-3 {FN_TF_PWM}/{regulator} {FN_PROMOTERS}".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
"FN_TF_PWM": FN_TF_PWM,
"FN_PROMOTERS": FN_PROMOTERS,
"fimo": os.path.join(self.__root__, "SRC/meme/bin/fimo")
}))
tasks2.append("sed '1d' {OUT_FIMO}/{regulator}/fimo.txt | cut -f 1,2,7 > {OUT_FIMO}/{regulator}/temp.txt".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
}))
tasks3.append("ruby {program} -i {OUT_FIMO}/{regulator}/temp.txt > {OUT_FIMO}/{regulator}.summary".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
"program": os.path.join(self.__root__, "CODE/estimate_affinity.rb")
}))
with Pool(self.processes) as p:
try:
list(tqdm(p.imap(call, tasks1), total=len(tasks1)))
except CalledProcessError as err:
pass
try:
list(tqdm(p.imap(call, tasks2), total=len(tasks2)))
list(tqdm(p.imap(call, tasks3), total=len(tasks3)))
except CalledProcessError as err:
logging.error(err)
exit(1)
self.log_progress(9)
def step10(self):
u"""
:return:
"""
logging.info("STEP10: build_motif_network")
if not self.check_progress(9):
raise FileNotFoundError("Please run STEP9 before run STEP10")
else:
if not self.check_progress(10):
build_motif_network.main([
"-i", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs.txt"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-g", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-f", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_score/"
),
"-t", "robust",
"-v", str(self.config["MOTIF_THRESHOLD"]),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/mn.adjmtr"
)
])
self.log_progress(10)
def step11(self):
u"""
:return:
"""
logging.info("STEP11: assemble_final_network")
if not self.check_progress(10):
raise FileNotFoundError("Please run STEP10 before run STEP11")
else:
if not self.check_progress(11):
combine_networks.main([
"-s", "resort",
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
),
"-b", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/mn.adjmtr"
),
"-od", os.path.join(self.config["NETPROPHET2_DIR"], self.config["OUTPUT_DIR"], "networks/"),
"-om", "npwa_bnwa_mn.adjmtr"
])
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/", "npwa_bnwa_mn.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-f", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
self.config["FILENAME_NETPROPHET2_NETWORK"]
)
])
self.log_progress(11)
if __name__ == '__main__':
parser = ArgumentParser(description="NetProphet 2.0")
parser.add_argument("-c", "--config", type=str, required=True, help="Path to config file")
parser.add_argument("-p", "--processes", type=int, default=1, help="How many cpu to use")
if len(sys.argv) <= 1:
parser.print_help()
else:
try:
args = parser.parse_args(sys.argv[1:])
if not os.path.exists(args.config) or not os.path.isfile(args.config):
print("Please set the correct path to config file")
exit(1)
root_dir = os.path.abspath(os.path.dirname(__file__))
config = os.path.abspath(args.config)
if args.processes <= 0:
processes = 1
else:
processes = args.processes
runner = SnakeMakePipe(args.config, args.processes)
runner.step1()
runner.step2()
runner.step3()
runner.step4()
runner.step5()
runner.step6()
runner.step7()
runner.step8()
runner.step9()
runner.step10()
runner.step11()
except ArgumentError as err:
print(err)
parser.print_help()
|
main.py
|
u"""
Created at 2020.01.02
A python wrapper for running the netprophet to replace snakemake
"""
import os
import sys
import json
import logging
from argparse import ArgumentParser, ArgumentError
from multiprocessing import Pool
from shutil import rmtree
from subprocess import check_call, CalledProcessError
from tqdm import tqdm
from CODE import prepare_resources
from CODE import weighted_avg_similar_dbds
from CODE import build_motif_network
from CODE import combine_networks
from CODE import convert_fire2meme
from CODE import parse_network_scores
from CODE import parse_motif_summary
from CODE import parse_quantized_bins
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
def call(cmd):
u"""
:param cmd:
:return:
"""
with open(os.devnull, "w+") as w:
check_call(cmd, shell=True, stdout=w, stderr=w)
class SnakeMakePipe(object):
u"""
"""
def __init__(self, path: str, processes: int=1):
u"""
path to config file
:param path:
:param processes
"""
self.processes = processes
# self.__root__ = os.path.abspath(os.path.dirname(__file__))
self.__root__ = "/opt/NetProphet_2.0"
if not os.path.exists(path) or not os.path.isfile(path):
raise FileNotFoundError("Cannot find config file at %s" % path)
with open(path) as r:
self.config = json.load(r)
self.progress = os.path.join(self.config["NETPROPHET2_DIR"], "progress.json")
if self.check_progress(11):
logging.info("Please remove {} before re-run this pipeline".format(self.progress))
def check_progress(self, step):
u"""
:param step:
:return:
"""
progress = []
if os.path.exists(self.progress):
with open(self.progress) as r:
progress = json.load(r)
return step in progress
def log_progress(self, step):
progress = []
if os.path.exists(self.progress):
with open(self.progress) as r:
progress = json.load(r)
if step not in progress:
progress.append(step)
with open(self.progress, "w+") as w:
json.dump(progress, w, indent=4)
def step1(self):
u"""
STEP 1 to create output dir or files
:return:
"""
logging.info("STEP1: make_directories")
if self.check_progress(1):
logging.info("STEP1: skipped")
return
paths = [
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_pfm/"
),
os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_score/"
)
]
for i in paths:
if not os.path.exists(i):
logging.info("Create %s" % i)
os.makedirs(i)
else:
logging.info("%s exists" % i)
self.log_progress(1)
def step2(self):
u"""
:return:
"""
logging.info("STEP2: prepare_resources")
if not self.check_progress(1):
raise FileNotFoundError("Please run STEP1 before run STEP2")
else:
if not self.check_progress(2):
prepare_resources.main([
"-g", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-e", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_EXPRESSION_DATA"]
),
"-c", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_SAMPLE_CONDITIONS"]
),
"-or", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/rdata.expr"
),
"-of", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.fc.tsv"
),
"-oa", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/allowed.adj"
),
"-op1", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"-op2", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.tsv"
),
])
self.log_progress(2)
def step3(self):
u"""
:return:
"""
logging.info("STEP3: map_np_network")
if not self.check_progress(2):
raise FileNotFoundError("Please run STEP2 before run STEP3")
else:
if not self.check_progress(3):
check_call(
"bash {program} -m -u {input_u} -t {input_t} -r {input_r} -a {input_a} -p {input_p} -d {input_d} -g {input_g} -f {input_f} -o {input_o} -n {output_n}".format(**{
"program": os.path.join(self.__root__, "SRC/NetProphet1/netprophet"),
"input_u": os.path.join(
self.config["NETPROPHET2_DIR"], "SRC/NetProphet1/"),
"input_t": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_EXPRESSION_DATA"]),
"input_r": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/rdata.expr"
),
"input_a": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/allowed.adj"
),
"input_p": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"input_d": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_DE_ADJMTR"]
),
"input_g": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"input_f": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"input_o": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/"
),
"output_n": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/np.adjmtr"
)
}),
shell=True
)
self.log_progress(3)
def step4(self):
u"""
data_fc_expr=${1}
pert_matrix=${2}
tf_names=${3}
output_adjmtr=${4}
use_serial=${5}
:return:
"""
logging.info("STEP4: map_bart_network")
if not self.check_progress(3):
raise FileNotFoundError("Please run STEP3 before run STEP4")
else:
if not self.check_progress(4):
check_call("Rscript --vanilla {program} fcFile={data_fc_expr} isPerturbedFile={pert_matrix} tfNameFile={tf_names} saveTo={output_adjmtr}.tsv mpiBlockSize={processes}".format(**{
"program": os.path.join(self.__root__, "CODE/build_bart_network.r"),
"data_fc_expr": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.fc.tsv"
),
"pert_matrix": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
"tmp/data.pert.adj"
),
"tf_names": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"output_adjmtr": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bn.adjmtr"
),
"processes": self.processes
}), shell=True)
# 推测,这里只是单纯的去除行名和列名
o = os.path.join(self.config["NETPROPHET2_DIR"], self.config["OUTPUT_DIR"], "networks/bn.adjmtr")
with open(o, "w+") as w:
with open(o + ".tsv") as r:
for idx, line in enumerate(r):
if idx > 0:
lines = line.split()
if len(lines) > 0:
w.write("\t".join(lines[1:]) + "\n")
self.log_progress(4)
def step5(self):
u"""
:return:
"""
logging.info("STEP5: weighted_average_np_network")
if not self.check_progress(4):
raise FileNotFoundError("Please run STEP4 before run STEP5")
else:
if not self.check_progress(5):
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/np.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-t", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa.adjmtr"
)
])
self.log_progress(5)
def step6(self):
u"""
:return:
"""
logging.info("STEP6: weighted_average_bart_network")
if not self.check_progress(5):
raise FileNotFoundError("Please run STEP5 before run STEP6")
else:
if not self.check_progress(6):
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bn.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-t", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bnwa.adjmtr"
)
])
self.log_progress(6)
def step7(self):
u"""
:return:
"""
logging.info("STEP7: combine_npwa_bnwa")
if not self.check_progress(6):
raise FileNotFoundError("Please run STEP6 before run STEP7")
else:
if not self.check_progress(7):
check_call(
"Rscript {program} {input_n} {input_b} {output_o}".format(**{
"program": os.path.join(self.__root__, "CODE/quantile_combine_networks.r"),
"input_n": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa.adjmtr"
),
"input_b": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/bnwa.adjmtr"
),
"output_o": os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
)
}),
shell=True
)
self.log_progress(7)
def step8(self):
u"""
## Check if all motifs are ready
bash CODE/check_inference_status.sh ${OUTPUT_DIR}/motif_inference/motif_inference.log $REGULATORS $FLAG
:return:
"""
logging.info("STEP8: infer_motifs")
if not self.check_progress(7):
raise FileNotFoundError("Please run STEP7 before run STEP8")
else:
if not self.check_progress(8):
logging.info("Binning promoters based on network scores ... ")
parse_network_scores.main([
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-t", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores"
)
])
parse_quantized_bins.main([
"-n", "20",
"-i", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_scores"
),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins"
),
])
logging.info("Done")
promoter = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_PROMOTERS"]
)
out_dir = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"]
)
tasks = []
with open(os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"])
) as r:
for regulator in r:
regulator = regulator.strip()
tasks.append(
"perl {FIREDIR}/fire.pl --expfiles={OUTDIR} --exptype=discrete --fastafile_dna={PROMOTER} --k=7 --jn=20 --jn_t=16 --nodups=1 --dorna=0 --dodnarna=0".format(**{
"FIREDIR": os.getenv("FIREDIR"),
"OUTDIR": os.path.join(out_dir, "motif_inference/network_bins", regulator),
"PROMOTER": promoter
})
)
try:
with Pool(self.processes) as p:
list(tqdm(p.imap(call, tasks), total=len(tasks)))
except CalledProcessError as err:
logging.error(err)
exit(1)
self.log_progress(8)
def step9(self):
u"""
:return:
"""
logging.info("STEP9: score_motifs")
if not self.check_progress(8):
raise FileNotFoundError("Please run STEP8 before run STEP9")
else:
if not self.check_progress(9):
OUTPUT_DIR = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"]
)
MOTIFS_DIR = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/network_bins/"
)
REGULATORS = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
)
PROMOTERS = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_PROMOTERS"]
)
MOTIFS_LIST = os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"], "motif_inference/motifs.txt"
)
logging.info("Parsing motif inference results ... ")
parse_motif_summary.main([
"-a", "True",
"-i", MOTIFS_DIR,
"-o", MOTIFS_LIST
])
convert_fire2meme.main([
"-i", MOTIFS_LIST,
"-o", os.path.join(OUTPUT_DIR, "motif_inference/motifs_pfm/")
])
logging.info("Done")
# score_motifs $ $PROMOTERS $ ${OUTPUT_DIR}/motif_inference/motif_scoring.log
FN_TF_PWM = os.path.join(OUTPUT_DIR, "motif_inference/motifs_pfm/") # directory of tf pwm
FN_PROMOTERS = PROMOTERS # promoter sequence file
OUT_FIMO = os.path.join(OUTPUT_DIR, "motif_inference/motifs_score") # directory of fimo alignment output
tasks1, tasks2, tasks3 = [], [], []
with open(REGULATORS) as r:
for regulator in r:
regulator = regulator.strip()
if not os.path.exists(os.path.join(FN_TF_PWM, regulator)):
continue
if os.path.exists(os.path.join(OUT_FIMO, regulator)):
rmtree(os.path.join(OUT_FIMO, regulator))
os.makedirs(os.path.join(OUT_FIMO, regulator))
tasks1.append("{fimo} -o {OUT_FIMO}/{regulator} --thresh 5e-3 {FN_TF_PWM}/{regulator} {FN_PROMOTERS}".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
"FN_TF_PWM": FN_TF_PWM,
"FN_PROMOTERS": FN_PROMOTERS,
"fimo": os.path.join(self.__root__, "SRC/meme/bin/fimo")
}))
tasks2.append("sed '1d' {OUT_FIMO}/{regulator}/fimo.txt | cut -f 1,2,7 > {OUT_FIMO}/{regulator}/temp.txt".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
}))
tasks3.append("ruby {program} -i {OUT_FIMO}/{regulator}/temp.txt > {OUT_FIMO}/{regulator}.summary".format(**{
"OUT_FIMO": OUT_FIMO,
"regulator": regulator,
"program": os.path.join(self.__root__, "CODE/estimate_affinity.rb")
}))
with Pool(self.processes) as p:
try:
list(tqdm(p.imap(call, tasks1), total=len(tasks1)))
except CalledProcessError as err:
pass
try:
list(tqdm(p.imap(call, tasks2), total=len(tasks2)))
list(tqdm(p.imap(call, tasks3), total=len(tasks3)))
except CalledProcessError as err:
logging.error(err)
exit(1)
self.log_progress(9)
def step10(self):
u"""
:return:
"""
logging.info("STEP10: build_motif_network")
if not self.check_progress(9):
raise FileNotFoundError("Please run STEP9 before run STEP10")
else:
if not self.check_progress(10):
build_motif_network.main([
"-i", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs.txt"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-g", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_GENES"]
),
"-f", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"motif_inference/motifs_score/"
),
"-t", "robust",
"-v", str(self.config["MOTIF_THRESHOLD"]),
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/mn.adjmtr"
)
])
self.log_progress(10)
def step11(self):
u"""
:return:
"""
logging.info("STEP11: assemble_final_network")
if not self.check_progress(10):
raise FileNotFoundError("Please run STEP10 before run STEP11")
else:
if not self.check_progress(11):
combine_networks.main([
"-s", "resort",
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/npwa_bnwa.adjmtr"
),
"-b", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/mn.adjmtr"
),
"-od", os.path.join(self.config["NETPROPHET2_DIR"], self.config["OUTPUT_DIR"], "networks/"),
"-om", "npwa_bnwa_mn.adjmtr"
])
weighted_avg_similar_dbds.main([
"-n", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
"networks/", "npwa_bnwa_mn.adjmtr"
),
"-r", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["FILENAME_REGULATORS"]
),
"-a", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["RESOURCES_DIR"],
self.config["DBD_PID_DIR"]
),
"-d", "50", "-f", "single_dbds",
"-o", os.path.join(
self.config["NETPROPHET2_DIR"],
self.config["OUTPUT_DIR"],
self.config["FILENAME_NETPROPHET2_NETWORK"]
)
])
self.log_progress(11)
if __name__ == '__main__':
parser = ArgumentParser(description="NetProphet 2.0")
parser.add_argument("-c", "--config", type=str, required=True, help="Path to config file")
parser.add_argument("-p", "--processes", type=int, default=1, help="How many cpu to use")
if len(sys.argv) <= 1:
parser.print_help()
else:
try:
args = parser.parse_args(sys.argv[1:])
if not os.path.exists(args.config) or not os.path.isfile(args.config):
print("Please set the correct path to config file")
exit(1)
root_dir = os.path.abspath(os.path.dirname(__file__))
config = os.path.abspath(args.config)
if args.processes <= 0:
processes = 1
else:
processes = args.processes
runner = SnakeMakePipe(args.config, args.processes)
runner.step1()
runner.step2()
runner.step3()
runner.step4()
runner.step5()
runner.step6()
runner.step7()
runner.step8()
runner.step9()
runner.step10()
runner.step11()
except ArgumentError as err:
print(err)
parser.print_help()
| 0.35209 | 0.146667 |
import sys
from functools import lru_cache, partial
from itertools import combinations
import generator_conf
import jinja2
def all_combinations(l):
return [x for n in range(len(l) + 1) for x in combinations(l, n)]
def indent(n, s):
return s.replace("\n", "\n" + " " * (n * 4))
def nested_statements(layers, *args, **kwargs):
if layers:
outer, *inners = layers
def inner(depth, *args, **kwargs):
s = nested_statements(inners, *args, **kwargs)
return indent(depth, s)
return outer(inner, *args, **kwargs)
raise RuntimeError("The last layer must not call inner.")
def run(root_dir, template_file, output_file):
def generated_banner():
return "THIS FILE IS GENERATED FROM '{0}'. Make changes to that file instead of this one.".format(template_file)
template_env = _get_jinja_environment(root_dir)
template_env.globals.update(
combinations=combinations,
all_combinations=all_combinations,
generated_banner=generated_banner,
nested_statements=nested_statements,
partial=partial,
indent=indent,
**generator_conf.exports,
)
template = template_env.get_template(str(template_file))
output = template.render()
if output_file:
try:
with open(output_file, "rt", encoding="UTF-8") as f:
old_output = f.read()
except IOError:
old_output = None
if output == old_output:
# print(f"{output_file} does not need to be updated.")
return False
with open(output_file, "wt", encoding="UTF-8") as f:
# print(f"Writing {output_file}.")
print(template.render(), file=f, end="")
return True
else:
print(output, end="")
return None
# Cache jinja environments to allow caching inside the environment. But don't cache many in case they get big.
@lru_cache(2)
def _get_jinja_environment(root_dir):
template_loader = jinja2.FileSystemLoader(searchpath=[str(root_dir), str(f"{root_dir}/katana")])
template_env = jinja2.Environment(loader=template_loader)
return template_env
if __name__ == "__main__":
DIR = sys.argv[1]
TEMPLATE_FILE = sys.argv[2]
run(DIR, TEMPLATE_FILE, None)
|
python/generate_from_jinja.py
|
import sys
from functools import lru_cache, partial
from itertools import combinations
import generator_conf
import jinja2
def all_combinations(l):
return [x for n in range(len(l) + 1) for x in combinations(l, n)]
def indent(n, s):
return s.replace("\n", "\n" + " " * (n * 4))
def nested_statements(layers, *args, **kwargs):
if layers:
outer, *inners = layers
def inner(depth, *args, **kwargs):
s = nested_statements(inners, *args, **kwargs)
return indent(depth, s)
return outer(inner, *args, **kwargs)
raise RuntimeError("The last layer must not call inner.")
def run(root_dir, template_file, output_file):
def generated_banner():
return "THIS FILE IS GENERATED FROM '{0}'. Make changes to that file instead of this one.".format(template_file)
template_env = _get_jinja_environment(root_dir)
template_env.globals.update(
combinations=combinations,
all_combinations=all_combinations,
generated_banner=generated_banner,
nested_statements=nested_statements,
partial=partial,
indent=indent,
**generator_conf.exports,
)
template = template_env.get_template(str(template_file))
output = template.render()
if output_file:
try:
with open(output_file, "rt", encoding="UTF-8") as f:
old_output = f.read()
except IOError:
old_output = None
if output == old_output:
# print(f"{output_file} does not need to be updated.")
return False
with open(output_file, "wt", encoding="UTF-8") as f:
# print(f"Writing {output_file}.")
print(template.render(), file=f, end="")
return True
else:
print(output, end="")
return None
# Cache jinja environments to allow caching inside the environment. But don't cache many in case they get big.
@lru_cache(2)
def _get_jinja_environment(root_dir):
template_loader = jinja2.FileSystemLoader(searchpath=[str(root_dir), str(f"{root_dir}/katana")])
template_env = jinja2.Environment(loader=template_loader)
return template_env
if __name__ == "__main__":
DIR = sys.argv[1]
TEMPLATE_FILE = sys.argv[2]
run(DIR, TEMPLATE_FILE, None)
| 0.279632 | 0.126785 |
from RLTest import Env
import random
def aofTestCommon(env, reloadfn):
# TODO: Change this attribute in rmtest
env.cmd('ft.create', 'idx', 'schema',
'field1', 'text', 'field2', 'numeric')
reloadfn()
for x in range(1, 10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(x), 1.0 / x, 'fields',
'field1', 'myText{}'.format(x), 'field2', 20 * x)
exp = [9L, 'doc1', ['field1', 'myText1', 'field2', '20'], 'doc2', ['field1', 'myText2', 'field2', '40'], 'doc3', ['field1', 'myText3', 'field2', '60'], 'doc4', ['field1', 'myText4', 'field2', '80'], 'doc5', ['field1',
'myText5', 'field2', '100'], 'doc6', ['field1', 'myText6', 'field2', '120'], 'doc7', ['field1', 'myText7', 'field2', '140'], 'doc8', ['field1', 'myText8', 'field2', '160'], 'doc9', ['field1', 'myText9', 'field2', '180']]
reloadfn()
ret = env.cmd('ft.search', 'idx', 'myt*')
env.assertEqual(exp, ret)
def testAof():
env = Env(useAof=True)
aofTestCommon(env, lambda: env.restart_and_reload())
def testRawAof():
env = Env(useAof=True)
if env.env == 'existing-env':
env.skip()
aofTestCommon(env, lambda: env.broadcast('debug', 'loadaof'))
def testRewriteAofSortables():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'schema', 'field1', 'TEXT',
'SORTABLE', 'num1', 'NUMERIC', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc', 1.0,
'FIELDS', 'field1', 'Hello World')
env.restart_and_reload()
env.broadcast('SAVE')
# Load some documents
for x in xrange(100):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x), 1.0, 'FIELDS',
'field1', 'txt{}'.format(random.random()),
'num1', random.random())
for sspec in [('field1', 'asc'), ('num1', 'desc')]:
cmd = ['FT.SEARCH', 'idx', 'txt', 'SORTBY', sspec[0], sspec[1]]
res = env.cmd(*cmd)
env.restart_and_reload()
res2 = env.cmd(*cmd)
env.assertEqual(res, res2)
def testAofRewriteSortkeys():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res_exp = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.restart_and_reload()
res_got = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual(res_exp, res_got)
def testAofRewriteTags():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
info_a = to_dict(env.cmd('FT.INFO', 'idx'))
env.restart_and_reload()
info_b = to_dict(env.cmd('FT.INFO', 'idx'))
env.assertEqual(info_a['fields'], info_b['fields'])
# Try to drop the schema
env.cmd('FT.DROP', 'idx')
# Try to create it again - should work!
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual([2L, '1', '$a', ['foo', 'A'],
'2', '$b', ['foo', 'B']], res)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
|
src/pytest/test_aof.py
|
from RLTest import Env
import random
def aofTestCommon(env, reloadfn):
# TODO: Change this attribute in rmtest
env.cmd('ft.create', 'idx', 'schema',
'field1', 'text', 'field2', 'numeric')
reloadfn()
for x in range(1, 10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(x), 1.0 / x, 'fields',
'field1', 'myText{}'.format(x), 'field2', 20 * x)
exp = [9L, 'doc1', ['field1', 'myText1', 'field2', '20'], 'doc2', ['field1', 'myText2', 'field2', '40'], 'doc3', ['field1', 'myText3', 'field2', '60'], 'doc4', ['field1', 'myText4', 'field2', '80'], 'doc5', ['field1',
'myText5', 'field2', '100'], 'doc6', ['field1', 'myText6', 'field2', '120'], 'doc7', ['field1', 'myText7', 'field2', '140'], 'doc8', ['field1', 'myText8', 'field2', '160'], 'doc9', ['field1', 'myText9', 'field2', '180']]
reloadfn()
ret = env.cmd('ft.search', 'idx', 'myt*')
env.assertEqual(exp, ret)
def testAof():
env = Env(useAof=True)
aofTestCommon(env, lambda: env.restart_and_reload())
def testRawAof():
env = Env(useAof=True)
if env.env == 'existing-env':
env.skip()
aofTestCommon(env, lambda: env.broadcast('debug', 'loadaof'))
def testRewriteAofSortables():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'schema', 'field1', 'TEXT',
'SORTABLE', 'num1', 'NUMERIC', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc', 1.0,
'FIELDS', 'field1', 'Hello World')
env.restart_and_reload()
env.broadcast('SAVE')
# Load some documents
for x in xrange(100):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x), 1.0, 'FIELDS',
'field1', 'txt{}'.format(random.random()),
'num1', random.random())
for sspec in [('field1', 'asc'), ('num1', 'desc')]:
cmd = ['FT.SEARCH', 'idx', 'txt', 'SORTBY', sspec[0], sspec[1]]
res = env.cmd(*cmd)
env.restart_and_reload()
res2 = env.cmd(*cmd)
env.assertEqual(res, res2)
def testAofRewriteSortkeys():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res_exp = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.restart_and_reload()
res_got = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual(res_exp, res_got)
def testAofRewriteTags():
env = Env(useAof=True)
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
info_a = to_dict(env.cmd('FT.INFO', 'idx'))
env.restart_and_reload()
info_b = to_dict(env.cmd('FT.INFO', 'idx'))
env.assertEqual(info_a['fields'], info_b['fields'])
# Try to drop the schema
env.cmd('FT.DROP', 'idx')
# Try to create it again - should work!
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'foo',
'TEXT', 'SORTABLE', 'bar', 'TAG')
env.cmd('FT.ADD', 'idx', '1', '1', 'FIELDS', 'foo', 'A', 'bar', '1')
env.cmd('FT.ADD', 'idx', '2', '1', 'fields', 'foo', 'B', 'bar', '1')
res = env.cmd('FT.SEARCH', 'idx', '@bar:{1}', 'SORTBY', 'foo', 'ASC',
'RETURN', '1', 'foo', 'WITHSORTKEYS')
env.assertEqual([2L, '1', '$a', ['foo', 'A'],
'2', '$b', ['foo', 'B']], res)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
| 0.303216 | 0.361446 |