content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test Omniglot dataset operators
"""
import mindspore.dataset as ds
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testOmniglot"
def test_omniglot_basic():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case basic")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
BASIC_EXPECTED_SHAPE = {"82386": 1, "61235": 1, "159109": 2}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
assert count == [2, 2, 0, 0]
assert ACTUAL_SHAPE == BASIC_EXPECTED_SHAPE
def test_omniglot_num_samples():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case numSamples")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_samples=8, num_parallel_workers=2)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
random_sampler = ds.RandomSampler(num_samples=3, replacement=True)
data1 = ds.OmniglotDataset(DATA_DIR,
num_parallel_workers=2,
sampler=random_sampler)
num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
assert num_iter == 3
random_sampler = ds.RandomSampler(num_samples=3, replacement=False)
data1 = ds.OmniglotDataset(DATA_DIR,
num_parallel_workers=2,
sampler=random_sampler)
num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
assert num_iter == 3
def test_omniglot_num_shards():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case numShards")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_shards=4, shard_id=2)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 82386
assert item["label"] == 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_omniglot_shard_id():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case withShardID")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_shards=4, shard_id=1)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 159109
assert item["label"] == 0
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_omniglot_no_shuffle():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case noShuffle")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, shuffle=False)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
SHAPE = [159109, 159109, 82386, 61235]
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == SHAPE[num_iter]
count[item["label"]] += 1
num_iter += 1
assert num_iter == 4
assert count == [2, 2, 0, 0]
def test_omniglot_extra_shuffle():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case extraShuffle")
# define parameters.
repeat_count = 2
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, shuffle=True)
data1 = data1.shuffle(buffer_size=5)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
EXPECTED_SHAPE = {"82386": 2, "61235": 2, "159109": 4}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 8
assert count == [4, 4, 0, 0]
assert ACTUAL_SHAPE == EXPECTED_SHAPE
def test_omniglot_decode():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case decode")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, decode=True)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_sequential_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case SequentialSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.SequentialSampler(num_samples=8)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data_seq = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
SHAPE = [159109, 159109, 82386, 61235]
# each data is a dictionary.
for item in data_seq.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == SHAPE[num_iter]
count[item["label"]] += 1
num_iter += 1
assert num_iter == 4
assert count == [2, 2, 0, 0]
def test_random_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case RandomSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.RandomSampler()
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
RANDOM_EXPECTED_SHAPE = {"82386": 1, "61235": 1, "159109": 2}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
assert count == [2, 2, 0, 0]
assert ACTUAL_SHAPE == RANDOM_EXPECTED_SHAPE
def test_distributed_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case DistributedSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.DistributedSampler(4, 1)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 159109
assert item["label"] == 0
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_pk_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case PKSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.PKSampler(1)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 2
def test_chained_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info(
"Test Case Chained Sampler - Random and Sequential, with repeat")
# Create chained sampler, random and sequential.
sampler = ds.RandomSampler()
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create OmniglotDataset with sampler.
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(count=3)
# Verify dataset size.
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 12
# Verify number of iterations.
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 12
def test_omniglot_evaluation():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case usage")
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, background=False, num_samples=6)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_omniglot_zip():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case zip")
# define parameters.
repeat_count = 2
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_samples=8)
data2 = ds.OmniglotDataset(DATA_DIR, num_samples=8)
data1 = data1.repeat(repeat_count)
# rename dataset2 for no conflict.
data2 = data2.rename(input_columns=["image", "label"],
output_columns=["image1", "label1"])
data3 = ds.zip((data1, data2))
num_iter = 0
# each data is a dictionary.
for _ in data3.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_omniglot_exception():
"""
Feature: test_omniglot_exception.
Description: test error cases for OmniglotDataset.
Expectation: raise exception.
"""
logger.info("Test omniglot exception")
def exception_func(item):
raise Exception("Error occur!")
def exception_func2(image, label):
raise Exception("Error occur!")
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=exception_func,
input_columns=["image"],
num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(
e)
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=exception_func2,
input_columns=["image", "label"],
output_columns=["image", "label", "label1"],
column_order=["image", "label", "label1"],
num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
if __name__ == '__main__':
test_omniglot_basic()
test_omniglot_num_samples()
test_sequential_sampler()
test_random_sampler()
test_distributed_sampler()
test_chained_sampler()
test_pk_sampler()
test_omniglot_num_shards()
test_omniglot_shard_id()
test_omniglot_no_shuffle()
test_omniglot_extra_shuffle()
test_omniglot_decode()
test_omniglot_evaluation()
test_omniglot_zip()
test_omniglot_exception()
| python |
# -*- coding: utf-8 -*-
"""
This module contains the Parameters class that is used to specify the input parameters of the tree.
"""
import numpy as np
class Parameters():
"""Class to specify the parameters of the fractal tree.
Attributes:
meshfile (str): path and filename to obj file name.
filename (str): name of the output files.
init_node (numpy array): the first node of the tree.
second_node (numpy array): this point is only used to calculate the initial direction of the tree and is not included in the tree. Please avoid selecting nodes that are connected to the init_node by a single edge in the mesh, because it causes numerical issues.
init_length (float): length of the first branch.
N_it (int): number of generations of branches.
length (float): average lenght of the branches in the tree.
std_length (float): standard deviation of the length. Set to zero to avoid random lengths.
min_length (float): minimum length of the branches. To avoid randomly generated negative lengths.
branch_angle (float): angle with respect to the direction of the previous branch and the new branch.
w (float): repulsivity parameter.
l_segment (float): length of the segments that compose one branch (approximately, because the lenght of the branch is random). It can be interpreted as the element length in a finite element mesh.
Fascicles (bool): include one or more straigth branches with different lengths and angles from the initial branch. It is motivated by the fascicles of the left ventricle.
fascicles_angles (list): angles with respect to the initial branches of the fascicles. Include one per fascicle to include.
fascicles_length (list): length of the fascicles. Include one per fascicle to include. The size must match the size of fascicles_angles.
save (bool): save text files containing the nodes, the connectivity and end nodes of the tree.
save_paraview (bool): save a .vtu paraview file. The tvtk module must be installed.
"""
def __init__(self):
self.meshfile='sphere.obj'
self.filename='sphere-line'
self.init_node=np.array([-1.0 ,0., 0.])
self.second_node=np.array([-0.964, 0.00, 0.266 ])
self.init_length=0.5
#Number of iterations (generations of branches)
self.N_it=10
#Median length of the branches
self.length=.3
#Standard deviation of the length
self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
self.min_length = self.length/10.
self.branch_angle=0.15
self.w=0.1
#Length of the segments (approximately, because the lenght of the branch is random)
self.l_segment=.01
self.Fascicles=True
###########################################
# Fascicles data
###########################################
self.fascicles_angles=[-1.5,.2] #rad
self.fascicles_length=[.5,.5]
# Save data?
self.save=True
self.save_paraview=True | python |
from .fixup_resnet_cifar import *
from .resnet_cifar import *
from .rezero_resnet_cifar import *
from .rezero_dpn import *
from .dpn import *
from .rezero_preact_resnet import *
from .preact_resnet import *
| python |
import os.path
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ui.mainwindow import Ui_MainWindow
from ui.worldview import WorldView
from world import World
class PsychSimUI(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
self.world = None
super(PsychSimUI, self).__init__(parent)
self.setupUi(self)
self.scene = WorldView(self.graphicsView)
self.graphicsView.setScene(self.scene)
@pyqtSlot() # signal with no arguments
def on_actionOpen_triggered(self):
filename = QFileDialog.getOpenFileName(self,"PsychSim -- Open File")
if not filename.isEmpty():
self.openScenario(str(filename))
def openScenario(self,filename):
self.world = World(filename)
settings = QSettings()
settings.setValue('LastFile',os.path.abspath(filename))
self.scene.displayWorld(self.world)
@pyqtSlot() # signal with no arguments
def on_actionSave_triggered(self):
settings = QSettings()
filename = settings.value('LastFile').toString()
self.scene.world.save(str(filename))
self.scene.unsetDirty()
@pyqtSlot() # signal with no arguments
def on_actionQuit_triggered(self):
app.quit()
@pyqtSlot() # signal with no arguments
def on_actionAgent_triggered(self):
self.scene.colorNodes('agent')
@pyqtSlot() # signal with no arguments
def on_actionLikelihood_triggered(self):
self.scene.colorNodes('likelihood')
@pyqtSlot() # signal with no arguments
def on_actionStep_triggered(self):
self.scene.step()
def wheelEvent(self,event):
factor = 1.41**(-event.delta()/240.)
self.graphicsView.scale(factor,factor)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('scenario',default=None,nargs='?',
help='File containing an exising PsychSim scenario')
app = QApplication(sys.argv)
app.setOrganizationName('USC ICT')
app.setOrganizationDomain('ict.usc.edu')
app.setApplicationName('PsychSim')
args = parser.parse_args(args=[str(el) for el in app.arguments()][1:])
win = PsychSimUI()
if args.scenario is None:
settings = QSettings()
filename = settings.value('LastFile').toString()
if filename and QFile.exists(filename):
win.openScenario(str(filename))
else:
win.openScenario(args.scenario)
win.show()
app.exec_()
| python |
from torch.optim import Optimizer
class ReduceLROnLambda():
def __init__(self, optimizer, func, factor=0.1,\
verbose=False, min_lr=0, eps=1e-8):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(\
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(\
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.func = func
self.verbose = verbose
self.eps = eps
self.history_data = None
def step(self, metrics):
flag, self.history_data = self.func(metrics, self.history_data)
if flag:
self._reduce_lr()
def _reduce_lr(self):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Reducing learning rate' \
' of group {} to {:.4e}.'.format(i, new_lr))
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'func'}}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
| python |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
print(dataset.head())
# Check if there's any column which is not useful and remove it like the column id
dataset = dataset.drop(["Id"],1)
# check the statistical description
print(dataset.info())
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
#x-axis has target attribute to distinguish between classes
x = dataset["Cover_Type"]
#y-axis shows values of an attribute
y = dataset.drop(["Cover_Type"],1)
size = y.columns
#Plot violin for all attributes
for i in size:
sns.violinplot(x=x,y=y[i])
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:,0:10]
data_corr = subset_train.corr()
sns.heatmap(data_corr,annot=True)
correlation = list(data_corr.unstack().sort_values(kind="quicksort"))
corr_var_list = []
for i in correlation:
if abs(i)>0.5 and i!=1:
corr_var_list.append(i)
print(corr_var_list)
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X = dataset.drop(["Cover_Type"],1)
Y = dataset["Cover_Type"]
X_train,X_test,Y_train,Y_test = cross_validation.train_test_split(X,Y,test_size=0.2,random_state=0)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
#Standardized
scaler = StandardScaler()
#Apply transform only for continuous data
X_train_temp = scaler.fit_transform(X_train.iloc[:,0:53])
X_test_temp = scaler.fit_transform(X_test.iloc[:,0:53])
#Concatenate scaled continuous data and categorical
X_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,52:]),axis=1)
X_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,52:]),axis=1)
scaled_features_train_df = pd.DataFrame(X_train1)
scaled_features_train_df.columns = X_train.columns
scaled_features_train_df.index = X_train.index
scaled_features_test_df = pd.DataFrame(X_test1)
scaled_features_test_df.columns = X_test.columns
scaled_features_test_df.index = X_test.index
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
skb = SelectPercentile(score_func=f_classif, percentile=20 )
predictors = skb.fit_transform(X_train1,Y_train)
scores = skb.scores_
Features = scaled_features_train_df.columns
dataframe = pd.DataFrame({"Features":Features,"scores":scores}).sort_values(ascending = False,by = "scores")
top_k_predictors = list(dataframe['Features'][:predictors.shape[1]])
print(top_k_predictors)
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
clf = OneVsRestClassifier(LogisticRegression())
clf1 = OneVsRestClassifier(LogisticRegression())
model_fit_all_features = clf1.fit(X_train,Y_train)
predictions_all_features = model_fit_all_features.predict(X_test)
score_all_features = accuracy_score(Y_test,predictions_all_features)
print(score_all_features)
model_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors],Y_train)
predictions_top_features = model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])
score_top_features = accuracy_score(Y_test,predictions_top_features)
print(score_top_features)
| python |
import errno
import os
from tqdm import tqdm
from urllib.request import urlretrieve
def maybe_makedir(path: str) -> None:
try:
# Create output directory if it does not exist
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def download_file(url: str, path: str, verbose: bool = False) -> None:
if verbose:
def reporthook(t):
"""Wraps tqdm instance.
Don't forget to close() or __exit__()
the tqdm instance once you're done with it (easiest using `with` syntax).
"""
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=url) as t:
urlretrieve(url, path, reporthook=reporthook(t))
else:
urlretrieve(url, path)
| python |
from typing import Union
import numpy as np
import pandas as pd
from fedot.api.api_utils.data_definition import data_strategy_selector
from fedot.core.data.data import InputData
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.pipelines.pipeline import Pipeline
class ApiDataHelper:
def define_data(self,
ml_task: Task,
features: Union[str, np.ndarray, pd.DataFrame, InputData, dict],
target: Union[str, np.ndarray, pd.Series] = None,
is_predict=False):
""" Prepare data for fedot pipeline composing """
try:
data = data_strategy_selector(features=features,
target=target,
ml_task=ml_task,
is_predict=is_predict)
except Exception as ex:
raise ValueError('Please specify a features as path to csv file or as Numpy array')
return data
def define_predictions(self,
task_type: TaskTypesEnum,
current_pipeline: Pipeline,
test_data: InputData):
if task_type == TaskTypesEnum.classification:
prediction = current_pipeline.predict(test_data, output_mode='labels')
output_prediction = prediction
elif task_type == TaskTypesEnum.ts_forecasting:
# Convert forecast into one-dimensional array
prediction = current_pipeline.predict(test_data)
forecast = np.ravel(np.array(prediction.predict))
prediction.predict = forecast
output_prediction = prediction
else:
prediction = current_pipeline.predict(test_data)
output_prediction = prediction
return output_prediction
| python |
#!/usr/bin/env python
import pandas as pd
import os
import numpy as np
import SNPknock.fastphase as fp
from SNPknock import knockoffHMM
from joblib import Parallel, delayed
import utils_snpko as utils
logger = utils.logger
def make_knockoff(chromosome=None, grouped_by_chromosome=None, df_SNP=None,
df_geno_experiment=None, df_geno_ensembl=None,
SNP_to_wild_type=None, cache_dir=None, path_to_fp=None,
em_iterations=25, random_seed=123):
# assert chromosome!=None and grouped_by_chromosome!=None and df_SNP!=None
assert chromosome is not None
assert grouped_by_chromosome is not None
assert df_SNP is not None
logger.debug("################")
logger.debug("Chromosome %2d #" % chromosome)
logger.debug("################")
num_experiment_people = len(df_geno_experiment)
num_ensembl_people = len(df_geno_ensembl)
indices = grouped_by_chromosome.groups[chromosome]
df_SNP_chromo = df_SNP.iloc[indices].sort_values('chromosome_position')
SNPs_on_chromosome = df_SNP_chromo['SNP'].values
X_experiment = np.empty((num_experiment_people, len(SNPs_on_chromosome)))
X_ensembl = np.empty((num_ensembl_people, len(SNPs_on_chromosome)))
for X, df in [
(X_experiment, df_geno_experiment),
(X_ensembl, df_geno_ensembl)]:
for j, SNP in enumerate(SNPs_on_chromosome):
X[:, j] = utils.genotype_to_nonwild_type_count(
df[SNP].values, SNP_to_wild_type[SNP])
out_path = '%s/chrom_%d' % (cache_dir, chromosome)
# If all relevant files are found in cache, skip EM recomputation; otherwise,
# redo the whole thing.
target_file_suffix_list = [
'alphahat.txt', 'finallikelihoods', 'origchars', 'rhat.txt', 'thetahat.txt']
already_in_cache = True
for suffix in target_file_suffix_list:
target_path = os.path.join(
cache_dir, 'chrom_%d_%s' % (chromosome, suffix))
if not os.path.exists(target_path):
already_in_cache = False
break
if already_in_cache:
logger.debug("Found chrom %d HMM in cache" % chromosome)
else:
# Write array to file
Xfp_file = '%s/X_%d.inp' % (cache_dir, chromosome)
fp.writeX(X_ensembl, Xfp_file)
# Run fastPhase on data (which runs EM)
fp.runFastPhase(path_to_fp, Xfp_file, out_path,
K=12, numit=em_iterations)
# Read in fastPhase results (i.e., HMM parameters) from file:
r_file = out_path + "_rhat.txt"
alpha_file = out_path + "_alphahat.txt"
theta_file = out_path + "_thetahat.txt"
# Why is X_ensembl[0, :] in the function arguments below?
hmm = fp.loadFit(r_file, theta_file, alpha_file, X_ensembl[0, :])
# Actually produce the knockoffs
knockoffs = knockoffHMM(hmm["pInit"], hmm["Q"], hmm[
"pEmit"], seed=random_seed)
X_knockoffs = knockoffs.sample(X_experiment)
return(X_knockoffs, X_experiment, SNPs_on_chromosome)
def make_all_knockoffs(args):
'''
For each chromosome, independently:
Sort SNPs according to position on genome.
Train HMM parameters with EM on ENSEMBL data.
Generate knockoffs of experimentals SNP data.
For now, we ignore sex of persons, although that is
available in ENSEMBL
'''
logger.info("####################################")
logger.info("Fitting HMM and generating knockoffs")
path_to_fp = os.path.join(args.fastPHASE_path, 'fastPHASE')
if not(os.path.exists(path_to_fp)):
logger.info("Cannot find fastPHASE at %s" % path_to_fp)
raise Exception
cache_dir = os.path.join(args.working_dir, 'fastphase_cache')
utils.safe_mkdir(cache_dir)
df_geno_ensembl = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_ensembl.csv'))
# SNP,wild_type,chromosome,chromosome_position
df_SNP = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_SNP_facts.csv'))
df_wild = pd.read_csv(os.path.join(args.working_dir, 'wild_types.csv'))
SNP_to_wild_type = dict(
zip(df_wild['SNP'].values, df_wild['wild_type'].values))
chromosome_list = np.sort(np.unique(df_SNP['chromosome']))
for chromosome in chromosome_list:
assert chromosome in np.arange(1, 24)
df_geno_experiment = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_experiment.csv'))
# Make sure we have the same SNPs everywhere.
assert (set([c for c in df_geno_ensembl.columns if c.startswith('rs')]) ==
set([c for c in df_geno_experiment.columns if c.startswith('rs')]))
for SNP in df_SNP.SNP.values:
assert SNP in df_geno_ensembl.columns
grouped_by_chromosome = df_SNP.groupby('chromosome')
num_experiment_people = len(df_geno_experiment)
knockoff_SNP_list = []
utils.safe_mkdir(os.path.join(args.working_dir, 'knockoffs'))
em_iterations = 500
logger.info('Number of EM iterations: %d' % em_iterations)
for knockoff_trial_count in xrange(args.num_knockoff_trials):
random_seed = knockoff_trial_count + args.random_seed
if ((args.num_knockoff_trials <= 20) or
knockoff_trial_count % ((args.num_knockoff_trials) // 20) == 0):
logger.info("Knockoff sampling %d of %d" % (
knockoff_trial_count, args.num_knockoff_trials))
if False:
# Serial version; code preserved for debugging purposes
for chromosome in chromosome_list:
knockoff_SNP_list.append(
make_knockoff(
chromosome=chromosome,
grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,
df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,
SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir,
path_to_fp=path_to_fp, em_iterations=em_iterations, random_seed=random_seed))
else:
knockoff_SNP_list = Parallel(n_jobs=args.num_workers)(
delayed(make_knockoff)(
chromosome=i,
grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,
df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,
SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir, path_to_fp=path_to_fp,
em_iterations=em_iterations, random_seed=random_seed)
for i in chromosome_list)
# Stitch results for each chromosome back together into a single dataframe
# Knockoff results
SNP_columns = [
x for x in df_geno_ensembl.columns if x.startswith('rs')]
df_knockoffs = pd.DataFrame(
columns=SNP_columns, index=np.arange(num_experiment_people))
# Matched experimental observations + knockoffs in one dataframe
matched_columns = []
data_labels = []
for field in df_geno_experiment.columns:
if field.startswith('rs'):
matched_columns.append(field)
matched_columns.append(field + '_knockoff')
elif field.startswith(args.data_prefix):
data_labels.append(field)
else:
continue
df_matched = pd.DataFrame(columns=matched_columns + data_labels,
index=np.arange(num_experiment_people))
for (X_knockoffs, X_experiment, SNPs_on_chromosome) in knockoff_SNP_list:
for i in xrange(num_experiment_people):
for j, SNP in enumerate(SNPs_on_chromosome):
df_knockoffs[SNP].values[i] = X_knockoffs[i, j]
df_matched[SNP].values[i] = int(X_experiment[i, j])
df_matched[
SNP + '_knockoff'].values[i] = int(X_knockoffs[i, j])
for data_label in data_labels:
df_matched[data_label] = df_geno_experiment[data_label]
# Sanity check that all fields are filled in.
for field in df_knockoffs:
for i in xrange(num_experiment_people):
assert pd.notnull(df_knockoffs[field].values[i])
df_matched.to_csv(os.path.join((args.working_dir), 'knockoffs',
'knockoffs_%03d.csv' % knockoff_trial_count),
index=False)
logger.info("Done making knockoffs!!!")
if __name__ == '__main__':
args = utils.parse_arguments()
utils.initialize_logger(args)
make_all_knockoffs(args)
| python |
import datetime
import json
import time
from fate_manager.db.db_models import DeployComponent, FateSiteInfo, FateSiteCount, FateSiteJobInfo, ApplySiteInfo
from fate_manager.entity import item
from fate_manager.entity.types import SiteStatusType, FateJobEndStatus
from fate_manager.operation.db_operator import DBOperator
from fate_manager.settings import FATE_FLOW_SETTINGS, request_flow_logger, request_cloud_logger
from fate_manager.utils.request_cloud_utils import request_cloud_manager
from fate_manager.utils.request_fate_flow_utils import post_fate_flow
class CountJob:
@staticmethod
def count_fate_flow_job(account):
request_flow_logger.info("start count fate flow job")
site_list = DBOperator.query_entity(FateSiteInfo, status=SiteStatusType.JOINED)
component_name = 'FATEFLOW'
party_id_flow_url = {}
for site in site_list:
try:
deploy_fate_flow = DBOperator.query_entity(DeployComponent, party_id=site.party_id,
component_name=component_name)
if deploy_fate_flow:
query_job_url = "http://{}{}".format(deploy_fate_flow[0].address, FATE_FLOW_SETTINGS["QueryJob"])
party_id_flow_url[site.party_id] = query_job_url
fate_site_count = DBOperator.query_entity(FateSiteCount, reverse=True, order_by="version")
now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if fate_site_count:
if site.party_id in fate_site_count[0].party_id_list:
party_id_list = fate_site_count[0].party_id_list
time_list = [fate_site_count[0].strftime, now_time]
else:
party_id_list = fate_site_count[0].party_id_list
party_id_list.append(site.party_id)
time_list = [0, now_time]
else:
time_list = [0, now_time]
party_id_list = [site.party_id]
request_flow_logger.info(time_list)
job_list = post_fate_flow(query_job_url, data={"end_time": time_list})
CountJob.log_job_info(account, job_list, party_id=site.party_id, site_name=site.site_name)
request_flow_logger.info(f"start create fate site count: now_time{now_time}")
DBOperator.create_entity(FateSiteCount, {"strftime": now_time, "party_id_list": party_id_list})
except Exception as e:
request_flow_logger.exception(e)
return party_id_flow_url
@staticmethod
def detector_no_end_job(account, party_id_flow_url):
job_list = DBOperator.query_entity(FateSiteJobInfo, is_end=0)
synchronization_job_list = []
for job in job_list:
try:
update_status = FateJobEndStatus.FAILED
if job.party_id in party_id_flow_url:
job_list = post_fate_flow(party_id_flow_url[job.party_id], data={"job_id": job.job_id})
if job_list:
if job_list[0]["f_status"] not in FateJobEndStatus.status_list():
update_status = None
if update_status:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.job_id, "status":update_status, "is_end": 1})
job.status = update_status
job = CountJob.job_adapter(job)
if job:
synchronization_job_list.append(job)
except Exception as e:
request_flow_logger.exception(e)
CountJob.job_synchronization(account, synchronization_job_list, m="no_end")
@staticmethod
def detector_no_report_job(account):
job_list = DBOperator.query_entity(FateSiteJobInfo, is_report=0)
synchronization_job_list = []
for job in job_list:
job = CountJob.job_adapter(job)
if job:
synchronization_job_list.append(job)
CountJob.job_synchronization(account, synchronization_job_list, is_report=1, m='no_report')
@staticmethod
def log_job_info(account, job_list, party_id, site_name):
request_flow_logger.info(job_list)
apply_site_list = DBOperator.query_entity(ApplySiteInfo)
all_institutions = {}
for site in apply_site_list:
all_institutions[str(site.party_id)] = site.institutions
synchronization_job_list = []
for job in job_list:
try:
if not CountJob.check_roles(job.get("f_roles")):
continue
site_job = CountJob.save_site_job_item(job, party_id, all_institutions, site_name, account)
site_job = CountJob.job_adapter(site_job)
if site_job:
synchronization_job_list.append(site_job)
except Exception as e:
request_flow_logger.exception(e)
CountJob.job_synchronization(account, synchronization_job_list, m='log_job')
@staticmethod
def check_roles(roles):
return True
@staticmethod
def save_site_job_item(job, party_id, all_institutions, site_name, account):
site_job = FateSiteJobInfo()
site_job.job_id = job.get("f_job_id")
site_job.institutions = account.institutions
site_job.party_id = party_id
site_job.site_name = site_name
site_job.job_create_time = int(time.mktime(time.strptime(job.get("f_job_id")[:20], "%Y%m%d%H%M%S%f"))*1000)
site_job.job_elapsed = job.get("f_elapsed")
site_job.job_start_time = job.get("f_start_time")
site_job.job_end_time = job.get("f_end_time")
site_job.roles = job.get("f_roles")
site_job.job_type = CountJob.get_job_type(job.get("f_dsl"))
site_job.status = FateJobEndStatus.end_status(job.get("f_status"))
site_job.is_end = 1 if site_job.status in FateJobEndStatus.status_list() else 0
site_job.job_create_day = job.get("f_job_id")[:8]
site_job.job_create_day_date = datetime.datetime.strptime(site_job.job_create_day, "%Y%m%d")
site_job.job_info = job
site_job.need_report = 1
other_party_id = set()
site_job.role = job.get("f_role")
institutions_party_id_list = []
if site_job.role == "local":
site_job.other_party_id = [party_id]
institutions_party_id_list = [party_id]
else:
for role, party_id_list in job["f_roles"].items():
for _party_id in party_id_list:
other_party_id.add(_party_id)
if str(_party_id) in all_institutions and all_institutions[str(_party_id)] == all_institutions[str(party_id)]:
institutions_party_id_list.append(_party_id)
if str(_party_id) not in all_institutions:
site_job.need_report = 0
return None
site_job.other_party_id = list(set(other_party_id))
if len(site_job.other_party_id) > 1 and party_id in site_job.other_party_id:
site_job.other_party_id.remove(site_job.party_id)
# set other institutions by other party id
site_job.institutions_party_id = list(set(institutions_party_id_list))
institutions_list = []
for _party_id in site_job.other_party_id:
if str(_party_id) in all_institutions.keys():
institutions_list.append(all_institutions[str(_party_id)])
site_job.other_institutions = list(set(institutions_list))
if len(site_job.other_institutions) > 1 and site_job.institutions in site_job.other_institutions:
site_job.other_institutions.remove(site_job.institutions)
site_job.save(force_insert=True)
return site_job
@staticmethod
def get_job_type(dsl):
job_type = ''
if isinstance(dsl, str):
dsl = json.loads(dsl)
cpn = dsl['components'].keys()
cpn = list(cpn)[0]
if 'upload' in cpn:
job_type = 'upload'
elif 'download' in cpn:
job_type = 'download'
elif 'intersect' in cpn:
for j in dsl['components'].keys():
if 'intersect' not in j:
job_type = 'modeling'
break
else:
job_type = 'intersect'
else:
job_type = 'modeling'
return job_type
@staticmethod
def job_adapter(site_job):
# for cloud job
if not site_job or not site_job.need_report:
return None
site_job.job_info = None
site_job.create_date = None
site_job.update_date = None
site_job.create_time = None
site_job.job_create_day_date = datetime.datetime.strptime(site_job.job_create_day, "%Y%m%d")
site_job.job_create_day_date = int(datetime.datetime.timestamp(site_job.job_create_day_date)) * 1000
site_job.roles = json.dumps(site_job.roles, separators=(',', ':'))
site_job.other_party_id = json.dumps(site_job.other_party_id, separators=(',', ':'))
site_job.other_institutions = json.dumps(site_job.other_institutions, separators=(',', ':'))
site_job = site_job.to_json()
del site_job["need_report"], site_job["is_report"], site_job["is_end"], site_job["institutions_party_id"]
return site_job
@staticmethod
def job_synchronization(account, synchronization_job_list, is_report=0, m='log_job'):
piece = 0
count_of_piece = 500
try:
while len(synchronization_job_list) > piece*count_of_piece:
start = piece*count_of_piece
end = piece*count_of_piece + count_of_piece
institution_signature_item = item.InstitutionSignatureItem(fateManagerId=account.fate_manager_id,
appKey=account.app_key,
appSecret=account.app_secret).to_dict()
resp = request_cloud_manager(uri_key="MonitorPushUri", data=institution_signature_item,
body=synchronization_job_list[start:end],
url=None)
piece += 1
except Exception as e:
request_cloud_logger.exception(e)
if piece*count_of_piece >= len(synchronization_job_list):
if is_report:
for job in synchronization_job_list[:piece*count_of_piece]:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.get("job_id"), "is_report": is_report})
else:
if m in ["log_job", "no_end"]:
for job in synchronization_job_list[piece * count_of_piece:]:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.get("job_id"), "is_report": is_report}) | python |
from . import ShapeNet, SetMNIST, SetMultiMNIST, ArCH
def get_datasets(args):
if args.dataset_type == 'shapenet15k':
return ShapeNet.build(args)
if args.dataset_type == 'mnist':
return SetMNIST.build(args)
if args.dataset_type == 'multimnist':
return SetMultiMNIST.build(args)
if args.dataset_type == 'arch':
return ArCH.build(args)
raise NotImplementedError
| python |
# flake8: noqa: W291
# pylint: disable=too-many-lines,trailing-whitespace
"""
AbstractAnnoworkApiのヘッダ部分
Note:
このファイルはopenapi-generatorで自動生成される。詳細は generate/README.mdを参照
"""
from __future__ import annotations
import abc
import warnings # pylint: disable=unused-import
from typing import Any, Optional, Union # pylint: disable=unused-import
import annoworkapi # pylint: disable=unused-import
class AbstractAnnoworkApi(abc.ABC):
"""
AnnoworkApiクラスの抽象クラス
"""
@abc.abstractmethod
def _request_wrapper(
self,
http_method: str,
url_path: str,
*,
query_params: Optional[dict[str, Any]] = None,
header_params: Optional[dict[str, Any]] = None,
request_body: Optional[Any] = None,
log_response_with_error: bool = True,
) -> Any:
pass
#########################################
# Public Method : AccountApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def confirm_reset_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワードリセットstep2(新しいパスワードに変更)
新しいパスワードに変更します。 本人確認のため、[パスワードリセットを要求](#operation/resetPassword)で受信したメールに記載された検証コードを使用します。 パスワードリセットプロセスの最終ステップです。
Args:
request_body (Any): Request Body
confirm_reset_password_request (ConfirmResetPasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/confirm-reset-password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def confirm_sign_up(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""サインアップstep2(本登録)
アカウントのサインアップの最後のステップとして、アカウントを本登録します。
Args:
request_body (Any): Request Body
confirm_sign_up_request (ConfirmSignUpRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/confirm-sign-up"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_account_external_linkage_info(self, user_id: str, **kwargs) -> Any:
"""アカウント外部連携情報取得
Args:
user_id (str): ユーザーID (required)
Returns:
InlineResponse2001
"""
url_path = f"/accounts/{user_id}/external-linkage-info"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_account_external_linkage_info(self, user_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""アカウント外部連携情報更新
Args:
user_id (str): ユーザーID (required)
request_body (Any): Request Body
put_account_external_linkage_info_request (PutAccountExternalLinkageInfoRequest): (required)
Returns:
InlineResponse2001
"""
url_path = f"/accounts/{user_id}/external-linkage-info"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def reset_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワードリセットstep1(開始)
パスワードリセットに必要な確認コードをメールで送付します。 後続の[新しいパスワードに変更](#operation/confirmResetPassword)を実行することで、新しいパスワードに変更できます。
Args:
request_body (Any): Request Body
reset_password_request (ResetPasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/reset-password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def sign_up(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""サインアップstep1(仮登録)
アカウントのサインアップの最初のステップとして、アカウントを仮登録します。 AnnoWorkに未登録のメールアドレスであれば、新規アカウントが仮登録状態で作成され、本登録フローのためのメールが送信されます。 このメールには仮パスワードなどが記載されています。 指定したメールアドレスを使うユーザーが仮登録であれば、本登録フローのメールが再送信されます。 指定したメールアドレスを使うユーザーが本登録であれば、不正なリクエストとしてエラーを返します(本登録が仮登録に戻ることはありません)。
Args:
request_body (Any): Request Body
sign_up_request (SignUpRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/sign-up"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ActualWorkingTimeApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_actual_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, actual_working_time_id: str, **kwargs
) -> Any:
"""実績時間の削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
actual_working_time_id (str): 実績稼働時間ID (required)
Returns:
ActualWorkingTime
"""
url_path = (
f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times/{actual_working_time_id}"
)
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_actual_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペース全体の実績時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[ActualWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_actual_working_times_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペースメンバーに対する実績時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 取得する範囲の開始日時。日付での範囲検索で使用
term_end (str): 取得する範囲の終了日時。日付での範囲検索で使用
Returns:
[ActualWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_sum_of_actual_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペース全体の実績時間の合計取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
includes_archived_job (bool): アーカイブ化したジョブの合計も含めるかどうか
Returns:
SumOfTimes
"""
url_path = f"/workspaces/{workspace_id}/sum-of-actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_actual_working_time_by_workspace_member(
self,
workspace_id: str,
workspace_member_id: str,
actual_working_time_id: str,
request_body: Optional[Any] = None,
**kwargs,
) -> Any:
"""実績時間の更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
actual_working_time_id (str): 実績稼働時間ID (required)
request_body (Any): Request Body
put_actual_working_time_request (PutActualWorkingTimeRequest): (required)
Returns:
ActualWorkingTime
"""
url_path = (
f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times/{actual_working_time_id}"
)
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ExpectedWorkingTimeApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_expected_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, date: str, **kwargs
) -> Any:
"""予定稼働時間の日付指定削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
date (str): 予定の対象日 (required)
Returns:
ExpectedWorkingTime
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times/{date}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_expected_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""予定稼働時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[ExpectedWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/expected-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_expected_working_times_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""予定稼働時間の一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 取得する範囲の開始日。日付での範囲検索で使用
term_end (str): 取得する範囲の終了日。日付での範囲検索で使用
Returns:
[ExpectedWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_expected_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, date: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""予定稼働時間の日付指定更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
date (str): 予定の対象日 (required)
request_body (Any): Request Body
put_expected_working_time_request (PutExpectedWorkingTimeRequest): (required)
Returns:
ExpectedWorkingTime
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times/{date}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : JobApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_job(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""ジョブの削除
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_job(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""ジョブの取得
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_job_children(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""子ジョブの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
JobChildren
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}/children"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_jobs(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ジョブの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
sort (str): sort key(複数項目を利用したソートの場合は,(カンマ)区切りで指定してください。key(id or name)、降順にしたい場合は先頭に-(ハイフン)を付ける)
Returns:
[Job]
"""
url_path = f"/workspaces/{workspace_id}/jobs"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_job(self, workspace_id: str, job_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ジョブの更新
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
request_body (Any): Request Body
put_job_request (PutJobRequest): (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : LoginApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def post_login(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ログイン
Args:
request_body (Any): Request Body
login_request (LoginRequest): (required)
Returns:
LoginToken
"""
url_path = f"/login"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : MyApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def change_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワード変更
パスワード変更
Args:
request_body (Any): Request Body
change_password_request (ChangePasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/my/account/password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_account(self, **kwargs) -> Any:
"""ログイン中のアカウント情報を取得する
Args:
Returns:
Account
"""
url_path = f"/my/account"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_schedules(self, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""自身がアサインされているスケジュール一覧を取得する
Args:
query_params (dict[str, Any]): Query Parameters
workspace_id (str): ワークスペースIDを指定することで対象のワークスペースでアサインされているスケジュールのみを取得できる
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[Schedule]
"""
url_path = f"/my/schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_workspace_members(self, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""自身のワークスペースメンバー情報一覧を取得する
Args:
query_params (dict[str, Any]): Query Parameters
workspace_id (str): ワークスペースIDを指定することで対象のワークスペースに所属しているワークスペースメンバー情報のみを取得できる
Returns:
[WorkspaceMember]
"""
url_path = f"/my/workspace-members"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_workspaces(self, **kwargs) -> Any:
"""自身の所属するワークスペース情報一覧を取得する
Args:
Returns:
[Workspace]
"""
url_path = f"/my/workspaces"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_my_account(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""アカウント情報更新
Args:
request_body (Any): Request Body
put_my_account_request (PutMyAccountRequest): (required)
Returns:
Account
"""
url_path = f"/my/account"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ScheduleApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_schedule(self, workspace_id: str, schedule_id: str, **kwargs) -> Any:
"""作業計画の削除
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_schedule(self, workspace_id: str, schedule_id: str, **kwargs) -> Any:
"""作業計画の取得
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_schedules(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""作業計画の一覧取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
job_id (str): ジョブID
Returns:
[Schedule]
"""
url_path = f"/workspaces/{workspace_id}/schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_sum_of_schedules(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ワークスペース全体のスケジュール時間の合計取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
includes_archived_job (bool): アーカイブ化したジョブの合計も含めるかどうか
Returns:
SumOfTimes
"""
url_path = f"/workspaces/{workspace_id}/sum-of-schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_schedule(self, workspace_id: str, schedule_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""作業計画の更新
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
request_body (Any): Request Body
put_schedule_request (PutScheduleRequest): (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : WorkspaceApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_workspace(self, workspace_id: str, **kwargs) -> Any:
"""ワークスペースの取得
Args:
workspace_id (str): ワークスペースID (required)
Returns:
Workspace
"""
url_path = f"/workspaces/{workspace_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tag(self, workspace_id: str, workspace_tag_id: str, **kwargs) -> Any:
"""ワークスペースタグの取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
Returns:
WorkspaceTag
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tag_members(self, workspace_id: str, workspace_tag_id: str, **kwargs) -> Any:
"""ワークスペースタグに紐付いているワークスペースメンバーの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
Returns:
WorkspaceTagMembers
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}/members"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tags(self, workspace_id: str, **kwargs) -> Any:
"""ワークスペースタグ一覧の取得
Args:
workspace_id (str): ワークスペースID (required)
Returns:
[WorkspaceTag]
"""
url_path = f"/workspaces/{workspace_id}/tags"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace(self, workspace_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ワークスペースの更新
Args:
workspace_id (str): ワークスペースID (required)
request_body (Any): Request Body
put_workspace_request (PutWorkspaceRequest): (required)
Returns:
Workspace
"""
url_path = f"/workspaces/{workspace_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace_tag(
self, workspace_id: str, workspace_tag_id: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""ワークスペースタグの更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
request_body (Any): Request Body
put_workspace_tag_request (PutWorkspaceTagRequest): (required)
Returns:
WorkspaceTag
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : WorkspaceMemberApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_workspace_member(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーの削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_member(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーの取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_member_tags(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーのタグ一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMemberTags
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/tags"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_members(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ワークスペースメンバー一覧の取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
sort (str): sort key(降順にしたい場合は先頭に-(ハイフン)を付ける)
includes_inactive_members (bool): 無効化したワークスペースメンバーも含めるかどうか
Returns:
[WorkspaceMember]
"""
url_path = f"/workspaces/{workspace_id}/members"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace_member(
self, workspace_id: str, workspace_member_id: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""ワークスペースメンバーの変更
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
request_body (Any): Request Body
put_workspace_member_request (PutWorkspaceMemberRequest): (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
| python |
#!/usr/bin/env python
from setuptools import setup, os
setup(
name='PyBabel-json-md',
version='0.1.0',
description='PyBabel json metadef (md) gettext strings extractor',
author='Wayne Okuma',
author_email='[email protected]',
packages=['pybabel_json_md'],
url="https://github.com/wkoathp/pybabel-json-md",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'babel',
],
include_package_data=True,
entry_points = """
[babel.extractors]
json_md = pybabel_json_md.extractor:extract_json_md
""",
)
| python |
# Tai Sakuma <[email protected]>
import pytest
has_no_ROOT = False
try:
import ROOT
except ImportError:
has_no_ROOT = True
from alphatwirl.roottree import Events
if not has_no_ROOT:
from alphatwirl.roottree import BEvents as BEvents
##__________________________________________________________________||
events_classes = [Events]
if not has_no_ROOT:
events_classes.append(BEvents)
events_classes_ids = [c.__name__ for c in events_classes]
##__________________________________________________________________||
class MockFile(object):
pass
class MockTree(object):
def __init__(self, entries=100):
self.entries = entries
self.ievent = -1
self.branchstatus = [ ]
self.branch1 = 1111
self.directory = MockFile()
def GetDirectory(self):
return self.directory
def GetEntries(self):
return self.entries
def GetEntry(self, ientry):
if ientry < self.entries:
nbytes = 10
self.ievent = ientry
else:
nbytes = 0
self.ievent = -1
return nbytes
def SetBranchStatus(self, bname, status):
self.branchstatus.append((bname, status))
def test_mocktree():
tree = MockTree(entries=3)
assert isinstance(tree.GetDirectory(), MockFile)
assert 3 == tree.GetEntries()
assert -1 == tree.ievent
nbytes = 10
assert nbytes == tree.GetEntry(0)
assert 0 == tree.ievent
assert nbytes == tree.GetEntry(1)
assert 1 == tree.ievent
assert nbytes == tree.GetEntry(2)
assert 2 == tree.ievent
assert 0 == tree.GetEntry(3)
assert -1 == tree.ievent
##__________________________________________________________________||
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_init(Events):
tree = MockTree()
events = Events(tree)
events = Events(tree, 100)
assert tree is events.tree
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_repr(Events):
tree = MockTree()
events = Events(tree)
repr(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_nEvents_default(Events):
tree = MockTree(entries=100)
events = Events(tree)
assert 100 == events.nEvents # default the same as entries
assert 100 == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
@pytest.mark.parametrize('maxEvents, expected ', [
pytest.param(-1, 100, id='default'),
pytest.param(50, 50, id='less'),
pytest.param(120, 100, id='more'),
pytest.param(100, 100, id='exact'),
])
def test_nEvents(Events, maxEvents, expected):
tree = MockTree(entries=100)
events = Events(tree, maxEvents)
assert expected == events.nEvents
assert expected == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
@pytest.mark.parametrize('maxEvents, start, expected ', [
pytest.param(-1, 1, 99, id='all_events_start_2nd'),
pytest.param(10, 1, 10, id='nEvents_equal_maxEvents'),
pytest.param(-1, 99, 1, id='all_events_start_last'),
pytest.param(20, 99, 1, id='nEvents_less_than_maxEvents'),
pytest.param(-1, 100, 0, id='nEvents_zero_1'),
pytest.param(-1, 110, 0, id='nEvents_zero_2'),
pytest.param(10, 100, 0, id='nEvents_zero_3'),
])
def test_nEvents_start(Events, maxEvents, start, expected):
tree = MockTree(entries=100)
events = Events(tree, maxEvents=maxEvents, start=start)
assert expected == events.nEvents
assert expected == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_nEvents_start_raise(Events):
tree = MockTree(entries=100)
with pytest.raises(ValueError):
Events(tree, maxEvents=-1, start=-10)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_iEvent(Events):
tree = MockTree(entries=4)
events = Events(tree)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 0 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 1 == tree.ievent
event = next(it)
assert 2 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 3 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_maxEvents(Events):
tree = MockTree(entries=40)
events = Events(tree, maxEvents=4)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
event = next(it)
assert 1 == event.iEvent
event = next(it)
assert 2 == event.iEvent
event = next(it)
assert 3 == event.iEvent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_iEvent_start(Events):
tree = MockTree(entries=4)
events = Events(tree, start=2)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 ==event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_maxEvents_start(Events):
tree = MockTree(entries=40)
events = Events(tree, maxEvents=4, start=2)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 3 == tree.ievent
event = next(it)
assert 2 == event.iEvent
assert 4 == tree.ievent
event = next(it)
assert 3 == event.iEvent
assert 5 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_getitem(Events):
tree = MockTree(entries=4)
events = Events(tree)
assert -1 == events.iEvent
event = events[0]
assert 0 == event.iEvent
assert 0 == tree.ievent
event = events[1]
assert 1 == event.iEvent
assert 1 == tree.ievent
event = events[2]
assert 2 == event.iEvent
assert 2 == tree.ievent
event = events[3]
assert 3 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(IndexError):
events[4]
assert -1 == events.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_getitem_start(Events):
tree = MockTree(entries=4)
events = Events(tree, start=2)
assert -1 == events.iEvent
event = events[0]
assert 0 == event.iEvent
assert 2 == tree.ievent
event = events[1]
assert 1 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(IndexError):
events[4]
assert -1 == events.iEvent
##__________________________________________________________________||
| python |
import qimpy as qp
import numpy as np
from scipy.special import sph_harm
from typing import Sequence, Any, List, Tuple
def get_harmonics_ref(l_max: int, r: np.ndarray) -> np.ndarray:
"""Reference real solid harmonics based on SciPy spherical harmonics."""
rMag = np.linalg.norm(r, axis=-1)
theta = np.arccos(r[..., 2] / rMag)
phi = np.arctan2(r[..., 1], r[..., 0])
phi += np.where(phi < 0.0, 2 * np.pi, 0)
results = []
for l in range(l_max + 1):
result = np.zeros((2 * l + 1,) + r.shape[:-1])
for m in range(0, l + 1):
ylm = ((-1) ** m) * (rMag ** l) * sph_harm(m, l, phi, theta)
if m == 0:
result[l] = ylm.real
else:
result[l + m] = np.sqrt(2) * ylm.real
result[l - m] = np.sqrt(2) * ylm.imag
results.append(result)
return np.concatenate(results, axis=0)
def get_lm(l_max: int) -> List[Tuple[int, int]]:
"""Get list of all (l,m) in order up to (and including) l_max"""
return [(l, m) for l in range(l_max + 1) for m in range(-l, l + 1)]
def format_array(array: Sequence[Any], fmt: str) -> str:
"""Convert `array` to string with format `fmt` for each entry."""
return "[" + ", ".join(fmt.format(elem) for elem in array) + "]"
def generate_harmonic_coefficients(l_max_hlf: int) -> None:
"""Generate tables of recursion coefficients for computing real
solid harmonics up to l_max = 2 * l_max_hlf, as well as tables of
product coefficients (Clebsch-Gordon coefficients) for real solid
harmonics up to order l_max_hlf. Print results formatted as Python
code that can be pasted into _spherical_harmonics_data.py."""
l_max = 2 * l_max_hlf
qp.log.info(
"from typing import List, Tuple, Dict\n\n"
f"L_MAX: int = {l_max} # Maximum l for harmonics\n"
f"L_MAX_HLF: int = {l_max_hlf} # Maximum l for products"
)
# Calculate solid harmonics on a mesh covering unit cube:
grids1d = 3 * (np.linspace(-1.0, 1.0, 2 * l_max),) # avoids zero
r = np.array(np.meshgrid(*grids1d)).reshape(3, -1).T
r_sq = (r ** 2).sum(axis=-1)
ylm = get_harmonics_ref(l_max, r)
# Calculate recursion coefficients:
ERR_TOL = 1e-14
COEFF_TOL = 1e-8
qp.log.info(
"CooIndices = Tuple[List[int], List[int], List[float]]\n\n"
"# Recursion coefficients for computing real harmonics at l>1\n"
"# from products of those at l = 1 and l-1. The integers index\n"
"# a sparse matrix with (2l+1) rows and 3*(2l-1) columns.\n"
"YLM_RECUR: List[CooIndices] = ["
)
Y_00 = np.sqrt(0.25 / np.pi)
Y_1m_prefac = np.sqrt(0.75 / np.pi)
qp.log.info(f" ([], [], [{Y_00:.16f}]), ([], [], [{Y_1m_prefac:.16f}]),")
for l in range(2, l_max + 1):
l_minus_1_slice = slice((l - 1) ** 2, l ** 2)
y_product = ylm[l_minus_1_slice, None, :] * ylm[None, 1:4, :]
y_product = y_product.reshape((2 * l - 1) * 3, -1)
index_row = []
index_col = []
values = []
for m in range(-l, l + 1):
# List pairs of m at l = 1 and l-1 that can add up to m:
m_pairs_all = set(
[
(sign * m + dsign * dm, dm)
for sign in (-1, 1)
for dsign in (-1, 1)
for dm in (-1, 0, 1)
]
)
m_pairs = [m_pair for m_pair in m_pairs_all if abs(m_pair[0]) < l]
m_pair_indices = [3 * (l - 1 + m) + (1 + dm) for m, dm in m_pairs]
# Solve for coefficients of the linear combination:
for n_sel in range(1, len(m_pair_indices) + 1):
# Try increasing numbers till we get one:
y_product_allowed = y_product[m_pair_indices[:n_sel]]
y_target = ylm[l * (l + 1) + m]
coeff = np.linalg.lstsq(y_product_allowed.T, y_target, rcond=None)[0]
residual = np.dot(coeff, y_product_allowed) - y_target
err = np.linalg.norm(residual) / np.linalg.norm(y_target)
if err < ERR_TOL:
break
assert err < ERR_TOL
# Select non-zero coefficients to form product expansion:
sel = np.where(np.abs(coeff) > COEFF_TOL * np.linalg.norm(coeff))[0]
indices = np.array(m_pair_indices)[sel]
coeff = coeff[sel]
# Sort by index and add to lists for current l:
sort_index = indices.argsort()
index_row += [l + m] * len(sort_index)
index_col += list(indices[sort_index])
values += list(coeff[sort_index])
# Format as python code:
qp.log.info(
f" ("
f"{format_array(index_row, '{:d}')}, "
f"{format_array(index_col, '{:d}')}, "
f"{format_array(values, '{:.16f}')}),"
)
qp.log.info("]\n")
# Calculate Clebsch-Gordon coefficients:
lm_hlf = get_lm(l_max_hlf)
qp.log.info(
"# Clebsch-Gordon coefficients for products of real harmonics.\n"
"# The integer indices correspond to l*(l+1)+m for each (l,m).\n"
"YLM_PROD: Dict[Tuple[int, int],"
" Tuple[List[int], List[float]]] = {"
)
for ilm1, (l1, m1) in enumerate(lm_hlf):
for ilm2, (l2, m2) in enumerate(lm_hlf[: ilm1 + 1]):
# List (l,m) pairs allowed by angular momentum addition rules:
m_allowed = {m1 + m2, m1 - m2, m2 - m1, -(m1 + m2)}
l_allowed = range(l1 - l2, l1 + l2 + 1, 2)
lm_all = np.array(
[(l, m) for l in l_allowed for m in m_allowed if (abs(m) <= l)]
)
l_all = lm_all[:, 0]
m_all = lm_all[:, 1]
ilm = l_all * (l_all + 1) + m_all # flattened index
# Solve for coefficients of the linear combination:
y_product = ylm[ilm1] * ylm[ilm2]
y_terms = ylm[ilm] * (r_sq[None, :] ** ((l1 + l2 - l_all) // 2)[:, None])
results = np.linalg.lstsq(y_terms.T, y_product, rcond=None)
coeff = results[0]
err = np.sqrt(results[1][0]) / np.linalg.norm(y_product)
assert err < ERR_TOL
# Select non-zero coefficients to form product expansion:
sel = np.where(np.abs(coeff) > COEFF_TOL * np.linalg.norm(coeff))[0]
ilm = ilm[sel]
coeff = coeff[sel]
# Sort by (l,m):
sort_index = ilm.argsort()
ilm = ilm[sort_index]
coeff = coeff[sort_index]
# Format as python code:
qp.log.info(
f" ({ilm1}, {ilm2}): ("
f"{format_array(ilm, '{:d}')}, "
f"{format_array(coeff, '{:.16f}')}),"
)
qp.log.info("}")
def main():
qp.rc.init()
assert qp.rc.n_procs == 1 # no MPI
qp.utils.log_config() # after rc to suppress header messages
generate_harmonic_coefficients(l_max_hlf=3)
if __name__ == "__main__":
main()
| python |
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import asyncio
from typing import Set
from typing import List
from dateutil.parser import parse
from pytz import UTC
from packageurl import PackageURL
from univers.version_specifier import VersionSpecifier
from univers.versions import SemverVersion
from vulnerabilities.data_source import Advisory
from vulnerabilities.data_source import GitDataSource
from vulnerabilities.data_source import Reference
from vulnerabilities.package_managers import RubyVersionAPI
from vulnerabilities.helpers import load_yaml
from vulnerabilities.helpers import nearest_patched_package
class RubyDataSource(GitDataSource):
def __enter__(self):
super(RubyDataSource, self).__enter__()
if not getattr(self, "_added_files", None):
self._added_files, self._updated_files = self.file_changes(
recursive=True, file_ext="yml", subdir="./gems"
)
self.pkg_manager_api = RubyVersionAPI()
self.set_api(self.collect_packages())
def set_api(self, packages):
asyncio.run(self.pkg_manager_api.load_api(packages))
def updated_advisories(self) -> Set[Advisory]:
files = self._updated_files
advisories = []
for f in files:
processed_data = self.process_file(f)
if processed_data:
advisories.append(processed_data)
return self.batch_advisories(advisories)
def added_advisories(self) -> Set[Advisory]:
files = self._added_files
advisories = []
for f in files:
processed_data = self.process_file(f)
if processed_data:
advisories.append(processed_data)
return self.batch_advisories(advisories)
def collect_packages(self):
packages = set()
files = self._updated_files.union(self._added_files)
for f in files:
data = load_yaml(f)
if data.get("gem"):
packages.add(data["gem"])
return packages
def process_file(self, path) -> List[Advisory]:
record = load_yaml(path)
package_name = record.get("gem")
if not package_name:
return
if "cve" in record:
cve_id = "CVE-{}".format(record["cve"])
else:
return
publish_time = parse(record["date"]).replace(tzinfo=UTC)
safe_version_ranges = record.get("patched_versions", [])
# this case happens when the advisory contain only 'patched_versions' field
# and it has value None(i.e it is empty :( ).
if not safe_version_ranges:
safe_version_ranges = []
safe_version_ranges += record.get("unaffected_versions", [])
safe_version_ranges = [i for i in safe_version_ranges if i]
if not getattr(self, "pkg_manager_api", None):
self.pkg_manager_api = RubyVersionAPI()
all_vers = self.pkg_manager_api.get(package_name, until=publish_time).valid_versions
safe_versions, affected_versions = self.categorize_versions(all_vers, safe_version_ranges)
impacted_purls = [
PackageURL(
name=package_name,
type="gem",
version=version,
)
for version in affected_versions
]
resolved_purls = [
PackageURL(
name=package_name,
type="gem",
version=version,
)
for version in safe_versions
]
references = []
if record.get("url"):
references.append(Reference(url=record.get("url")))
return Advisory(
summary=record.get("description", ""),
affected_packages=nearest_patched_package(impacted_purls, resolved_purls),
references=references,
vulnerability_id=cve_id,
)
@staticmethod
def categorize_versions(all_versions, unaffected_version_ranges):
for id, elem in enumerate(unaffected_version_ranges):
unaffected_version_ranges[id] = VersionSpecifier.from_scheme_version_spec_string(
"semver", elem
)
safe_versions = []
vulnerable_versions = []
for i in all_versions:
vobj = SemverVersion(i)
is_vulnerable = False
for ver_rng in unaffected_version_ranges:
if vobj in ver_rng:
safe_versions.append(i)
is_vulnerable = True
break
if not is_vulnerable:
vulnerable_versions.append(i)
return safe_versions, vulnerable_versions
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/app_ui.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
SETUP_DIR="/usr/share/gnome-extensions-loader"
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(250, 300)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/gnome-extensions-loader.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setContentsMargins(3, 3, 3, 3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setObjectName("listWidget")
self.horizontalLayout.addWidget(self.listWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 250, 22))
self.menubar.setObjectName("menubar")
self.menuLayouts = QtWidgets.QMenu(self.menubar)
self.menuLayouts.setObjectName("menuLayouts")
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
MainWindow.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.action_Add = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Add.setIcon(icon1)
self.action_Add.setObjectName("action_Add")
self.action_Remove = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/remove.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Remove.setIcon(icon2)
self.action_Remove.setObjectName("action_Remove")
self.action_Overwrite = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/edit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Overwrite.setIcon(icon3)
self.action_Overwrite.setObjectName("action_Overwrite")
self.action_About = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_About.setIcon(icon4)
self.action_About.setObjectName("action_About")
self.action_Exit = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/exit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Exit.setIcon(icon5)
self.action_Exit.setObjectName("action_Exit")
self.action_Apply = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/apply.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Apply.setIcon(icon6)
self.action_Apply.setObjectName("action_Apply")
self.menuLayouts.addAction(self.action_Add)
self.menuLayouts.addAction(self.action_Remove)
self.menuLayouts.addAction(self.action_Overwrite)
self.menu_Help.addAction(self.action_About)
self.menu_File.addAction(self.action_Apply)
self.menu_File.addAction(self.action_Exit)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuLayouts.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.action_Apply)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_Add)
self.toolBar.addAction(self.action_Remove)
self.toolBar.addAction(self.action_Overwrite)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_About)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_Exit)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Gnome Extensions Loader"))
self.menuLayouts.setTitle(_translate("MainWindow", "&Layouts"))
self.menu_Help.setTitle(_translate("MainWindow", "&Help"))
self.menu_File.setTitle(_translate("MainWindow", "&File"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.action_Add.setText(_translate("MainWindow", "&Add"))
self.action_Add.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.action_Remove.setText(_translate("MainWindow", "&Remove"))
self.action_Remove.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.action_Overwrite.setText(_translate("MainWindow", "&Overwrite"))
self.action_Overwrite.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.action_About.setText(_translate("MainWindow", "&About"))
self.action_About.setShortcut(_translate("MainWindow", "Ctrl+I"))
self.action_Exit.setText(_translate("MainWindow", "&Exit"))
self.action_Exit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.action_Apply.setText(_translate("MainWindow", "&Apply"))
self.action_Apply.setShortcut(_translate("MainWindow", "Ctrl+A"))
| python |
from cloudshell.shell.core.resource_driver_interface import ResourceDriverInterface
from cloudshell.shell.core.context import InitCommandContext, ResourceCommandContext
import cloudshell.api.cloudshell_api as api
from natsort import natsorted, ns
import ipcalc
import json
class IpcalcDriver (ResourceDriverInterface):
# Calc sizes for common subnets
NetSizes = {}
NetSizes["24"] = 254 + 2
NetSizes["25"] = 126 + 2
NetSizes["26"] = 62 + 2
NetSizes["27"] = 30 + 2
NetSizes["28"] = 14 + 2
NetSizes["29"] = 6 + 2
NetSizes["30"] = 2 + 2
NetSizes["31"] = 2
NetSizes["32"] = 1
def cleanup(self):
"""
Destroy the driver session, this function is called everytime a driver instance is destroyed
This is a good place to close any open sessions, finish writing to log files
"""
pass
def __init__(self):
"""
ctor must be without arguments, it is created with reflection at run time
"""
pass
def initialize(self, context):
"""
Initialize the driver session, this function is called everytime a new instance of the driver is created
This is a good place to load and cache the driver configuration, initiate sessions etc.
:param InitCommandContext context: the context the command runs on
"""
pass
def printIPsInContainer(self, context, containerName):
ApiSession = api.CloudShellAPISession(host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain="Global")
try:
containerResource = ApiSession.GetResourceDetails(containerName)
except:
raise ValueError("Specified container does not exist.")
rl = ApiSession.FindResources(resourceFamily="Address",resourceModel="IP Address", includeSubResources=True)
cleanList = []
for address in rl.Resources:
if (containerName in address.FullName):
cleanList.append(address.Name)
cleanList = natsorted(cleanList, alg=ns.IGNORECASE)
return json.dumps(cleanList)
def getNextIP(self, context, containerName, CIDR):
ApiSession = api.CloudShellAPISession(host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain="Global")
# validate that the container to pull from exists in RM in this domain
try:
containerResource = ApiSession.GetResourceDetails(containerName)
except:
raise ValueError("Specified container does not exist.")
rl = ApiSession.FindResources(resourceFamily="Address",resourceModel="IP Address", includeSubResources=True)
cleanList = []
for address in rl.Resources:
if (containerName in address.FullName):
if ((address.ReservedStatus == "Not In Reservations") and (address.Excluded == False)):
cleanList.append(address.Name)
cleanList = natsorted(cleanList, alg=ns.IGNORECASE)
# we now have a sorted list of IPs which are available
# that are in the given container (cleanList). It is
# sorted to be in numeric order. We also have the
# original list of resource objects still (rl)
containerCidr = str(containerResource.ResourceAttributes[0].Value)
# Confirm that the requested size is possible given the allocated range we are managing
if(int(CIDR)<int(containerCidr)):
raise ValueError("Requested network size is greater than allocated container has to offer.")
try:
numAddressesNeeded = self.NetSizes[CIDR]
except:
raise ValueError("The subnet size requested cannot be converted into available IP space.")
# confirm that we still have enough addresses to handle the requested subnet size
if(numAddressesNeeded > len(cleanList)):
raise ValueError("The requested number of IPs needed for this sandbox do not exist in this allocation range of " + containerName)
# I guess we are ok now so handle this request
i = 0
returnedAddresses = []
try:
while (i < numAddressesNeeded):
newIP = containerName + "/" + cleanList[i]
returnedAddresses.append(newIP)
i = i + 1
ApiSession.AddResourcesToReservation(reservationId=context.reservation.reservation_id,resourcesFullPath=returnedAddresses)
except:
raise ValueError("Something went wrong allocating the IPs.")
return json.dumps(returnedAddresses) | python |
"""
Routine to create the light cones shells
L1 L2 L3 u11 u12 u13 u21 u22 u23 u31 u32 u33 (periodicity)
C2 '2.2361', '1.0954', '0.4082', '2', '1', '0', '1', '0', '1', '1', '0', '0', '(1)'
C15 '1.4142', '1.0000', '0.7071', '1', '1', '0', '0', '0', '1', '1', '0', '0', '(12)'
C6 '5.9161', '0.4140', '0.4082', '5', '3', '1', '1', '1', '0', '0', '1', '0', '(1)'
C3 '2.4495', '0.7071', '0.5774', '2', '1', '1', '1', '1', '0', '0', '1', '0', '(1)'
python3 create_light_cone_shells.py 10 MD10 1000
python3 create_light_cone_shells.py 10 MD10 1000
import numpy as n
import os
for ii in n.arange(50,115,1)[::-1]:
comm="python3 create_light_cone_shells.py "+str(ii)+" MD10 1000"
print(comm)
os.system(comm)
"""
import sys
ii = int(sys.argv[1])
env = sys.argv[2] # 'MD10'
L_box = float(sys.argv[3]) / 0.6777
positions_group_name = sys.argv[4] # 'remaped_position_L3'
if positions_group_name == 'remaped_position_L3' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = 0., 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L3_z1' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = -2.4495*L_box, 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L2' :
positions_group = 'remaped_position_L2'
x_obs, y_obs, z_obs = 2.2361/2.*L_box, -1.5400*L_box, 0.4082/2.*L_box
if positions_group_name == 'remaped_position_L6' :
positions_group = 'remaped_position_L6'
x_obs, y_obs, z_obs = 0., 0.4140/2.*L_box, 0.4082/2.*L_box
if positions_group_name == 'remaped_position_L15' :
positions_group = 'remaped_position_L15'
#1.4142', '1.0000', '0.7071
x_obs, y_obs, z_obs = 0., 1.0000/2.*L_box, 0.7071/2.*L_box
import h5py # HDF5 support
import os
import glob
import numpy as n
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
h5_lc_dir = os.path.join(os.environ[env], 'h5_lc', 'shells_'+positions_group_name )
if os.path.isdir(h5_lc_dir)==False:
os.mkdir(h5_lc_dir)
h5_dir = os.path.join(os.environ[env], 'h5' )
input_list_i = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????_emerge.hdf5")))
input_list_i.sort()
# removing snapshots that cannot be remapped ...
input_list = n.delete(input_list_i,n.array([
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08000_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08180_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08360_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.13320_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.13620_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.15210_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.16620_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.17380_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.17770_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.18570_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.18990_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.19410_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.20750_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.21210_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.22170_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.22670_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.23690_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.24230_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.25320_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.25890_emerge.hdf5")), # LSAR issue 51
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.26470_emerge.hdf5")), # LSAR issue 52
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.27060_emerge.hdf5")), # LSAR + remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.28920_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.29570_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.30910_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.34530_emerge.hdf5")), # LSAR issue
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.27060_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.43090_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.71730_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.93570_emerge.hdf5")) # remap issue
]) )
# creates the redshift list
redshifts = []
for file_1 in input_list :
f1 = h5py.File(file_1, "r")
redshifts.append(f1.attrs['redshift'])
f1.close()
redshifts = n.array(redshifts)
# creates the shell list
Dcom = cosmoMD.comoving_distance(redshifts).value
Dmax = n.hstack((Dcom[0],(Dcom[1:]+Dcom[:-1])/2.))
Dmin = n.hstack(((Dcom[1:]+Dcom[:-1])/2., Dcom[-1]))
def copylc_data(ii, option=False):
"""
Creates the selection array to obtain the shell in a snapshot to be added in the light cone
Writes a lightcone shell for each snapshot
"""
file_1 = input_list[ii]
file_out = os.path.join(h5_lc_dir, 'shell_'+os.path.basename( input_list[ii] ) )
print(file_1, "==>>", file_out)
f1 = h5py.File(file_1, "r")
print( "n halos=",f1['/halo_properties/'].attrs['N_halos'])
x,y,z=f1[positions_group + '/xyx_Lbox'].value.T*L_box
distance = ((x-x_obs)**2.+(y-y_obs)**2.+(z-z_obs)**2.)**0.5
selection = (distance>=Dmin[ii])&(distance<Dmax[ii])
print( len(distance[selection])," halos in shell ", Dmin[ii], "<d comoving<",Dmax[ii])
if len(distance[selection])>1:
f = h5py.File(file_out, "a")
f.attrs['file_name'] = os.path.basename(file_out)
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
halo_data = f.create_group('halo_position')
ds = halo_data.create_dataset('x', data = x[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'x'
ds = halo_data.create_dataset('y', data = y[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'y'
ds = halo_data.create_dataset('z', data = z[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'z'
ds = halo_data.create_dataset('vx', data = f1['/halo_position/vx'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vx'
ds = halo_data.create_dataset('vy', data = f1['/halo_position/vy'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vy'
ds = halo_data.create_dataset('vz', data = f1['/halo_position/vz'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vz'
halo_data = f.create_group('halo_properties')
ds = halo_data.create_dataset('id', data = f1['/halo_properties/id'].value[selection] )
ds.attrs['units'] = '-'
ds.attrs['long_name'] = 'halo identifier'
ds = halo_data.create_dataset('pid', data = f1['/halo_properties/pid'].value[selection] )
ds.attrs['units'] = '-'
ds.attrs['long_name'] = 'parent identifier, -1 if distinct halo'
ds = halo_data.create_dataset('mvir', data = f1['/halo_properties/mvir'].value[selection] )
ds.attrs['units'] = r'$h^{-1} M_\odot$'
ds.attrs['long_name'] = r'$M_{vir}$'
ds = halo_data.create_dataset('rvir', data = f1['/halo_properties/rvir'].value[selection] )
ds.attrs['units'] = r'$h^{-1} kpc$'
ds.attrs['long_name'] = r'$r_{vir}$'
ds = halo_data.create_dataset('rs', data = f1['/halo_properties/rs'].value[selection] )
ds.attrs['units'] = r'$h^{-1} kpc$'
ds.attrs['long_name'] = r'$r_{s}$'
ds = halo_data.create_dataset('Vmax' , data = f1['/halo_properties/Vmax'].value[selection])
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = r'$V_{max}$'
ds = halo_data.create_dataset('Mpeak' , data = f1['/halo_properties/Mpeak'].value[selection])
ds.attrs['units'] = r'$h^{-1} M_\odot$'
ds.attrs['long_name'] = r'$M_{peak}$'
moster_2013_data = f.create_group('moster_2013_data')
ds = moster_2013_data.create_dataset('stellar_mass', data = f1['/moster_2013_data/stellar_mass'].value[selection])
ds.attrs['units'] = r'$ M_\odot$'
ds.attrs['long_name'] = 'stellar mass'
agn_properties = f.create_group('agn_properties')
ds = agn_properties.create_dataset('log_lambda_sar', data = f1['/agn_properties/log_lambda_sar'].value[selection])
ds.attrs['units'] = r'log lambda SAR'
ds.attrs['long_name'] = 'log lambda SAR'
ds = agn_properties.create_dataset('agn_activity', data = f1['/agn_properties/agn_activity'].value[selection])
emerge_data = f.create_group('emerge_data')
ds = emerge_data.create_dataset('dMdt', data = f1['/emerge_data/dMdt'].value[selection])
ds.attrs['units'] = r'$ M_\odot/yr$'
ds.attrs['long_name'] = 'halo growth rate'
ds = emerge_data.create_dataset('mvir_dot', data = f1['/emerge_data/mvir_dot'].value[selection] )
ds.attrs['units'] = r'$ M_\odot/yr$'
ds.attrs['long_name'] = 'mvir variation with respect to last snapshot'
ds = emerge_data.create_dataset('rvir_dot', data = f1['/emerge_data/rvir_dot'].value[selection] )
ds.attrs['units'] = r'$ kpc /yr $'
ds.attrs['long_name'] = 'rvir variation with respect to last snapshot'
c4 = f.create_group('cosmo_4most')
ds = c4.create_dataset('is_BG_lz', data = f1['cosmo_4most/is_BG_lz'].value[selection])
ds = c4.create_dataset('is_BG_hz', data = f1['cosmo_4most/is_BG_hz'].value[selection])
ds = c4.create_dataset('is_ELG', data = f1['cosmo_4most/is_ELG'].value[selection])
ds = c4.create_dataset('is_QSO', data = f1['cosmo_4most/is_QSO'].value[selection])
ds = c4.create_dataset('is_Lya', data = f1['cosmo_4most/is_Lya'].value[selection])
f.close()
f1.close()
copylc_data(ii)
| python |
# This module is avaible both in the Python and Transcrypt environments
# It is included in-between the __core__ and the __builtin__ module, so the latter can adapt __envir__
# In Transcrypt, __base__ is available inline, it isn't nested and cannot be imported in the normal way
class __Envir__:
def __init__ (self):
self.interpreter_name = 'python'
self.transpiler_name = 'transcrypt'
self.transpiler_version = '3.6.92'
self.target_subdir = '__javascript__'
__envir__ = __Envir__ () | python |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_param_X86System')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_param_X86System')
_param_X86System = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_X86System', [dirname(__file__)])
except ImportError:
import _param_X86System
return _param_X86System
try:
_mod = imp.load_module('_param_X86System', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_param_X86System = swig_import_helper()
del swig_import_helper
else:
import _param_X86System
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_X86ACPIRSDP
import m5.internal.param_X86ACPIRSDT
import m5.internal.X86ACPISysDescTable_vector
import m5.internal.param_X86ACPISysDescTable
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
import m5.internal.param_X86ACPIXSDT
import m5.internal.param_X86IntelMPFloatingPointer
import m5.internal.param_X86IntelMPConfigTable
import m5.internal.X86IntelMPBaseConfigEntry_vector
import m5.internal.param_X86IntelMPBaseConfigEntry
import m5.internal.X86IntelMPExtConfigEntry_vector
import m5.internal.param_X86IntelMPExtConfigEntry
import m5.internal.param_X86SMBiosSMBiosTable
import m5.internal.X86SMBiosSMBiosStructure_vector
import m5.internal.param_X86SMBiosSMBiosStructure
import m5.internal.param_System
import m5.internal.enum_MemoryMode
import m5.internal.AddrRange_vector
import m5.internal.AbstractMemory_vector
import m5.internal.param_AbstractMemory
import m5.internal.param_MemObject
import m5.internal.param_ClockedObject
import m5.internal.param_ClockDomain
class X86System(m5.internal.param_System.System):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
X86System_swigregister = _param_X86System.X86System_swigregister
X86System_swigregister(X86System)
class X86SystemParams(m5.internal.param_System.SystemParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_X86System.X86SystemParams_create(self)
acpi_description_table_pointer = _swig_property(_param_X86System.X86SystemParams_acpi_description_table_pointer_get, _param_X86System.X86SystemParams_acpi_description_table_pointer_set)
intel_mp_pointer = _swig_property(_param_X86System.X86SystemParams_intel_mp_pointer_get, _param_X86System.X86SystemParams_intel_mp_pointer_set)
intel_mp_table = _swig_property(_param_X86System.X86SystemParams_intel_mp_table_get, _param_X86System.X86SystemParams_intel_mp_table_set)
smbios_table = _swig_property(_param_X86System.X86SystemParams_smbios_table_get, _param_X86System.X86SystemParams_smbios_table_set)
def __init__(self):
this = _param_X86System.new_X86SystemParams()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _param_X86System.delete_X86SystemParams
__del__ = lambda self: None
X86SystemParams_swigregister = _param_X86System.X86SystemParams_swigregister
X86SystemParams_swigregister(X86SystemParams)
| python |
import hashlib
class HashUtils(object):
@staticmethod
def md5(string: str):
md5 = hashlib.md5(string.encode("utf-8"))
return md5.hexdigest()
@staticmethod
def sha1(string: str):
sha1 = hashlib.sha1(string.encode("utf-8"))
return sha1.hexdigest()
@staticmethod
def sha256(string: str):
sha256 = hashlib.sha256(string.encode("utf-8"))
return sha256.hexdigest()
if __name__ == '__main__':
print(HashUtils.sha1("wen")) | python |
#!/bin/python
# Solution for https://www.hackerrank.com/challenges/jumping-on-the-clouds-revisited
import sys
n,k = raw_input().strip().split(' ')
n,k = [int(n),int(k)]
c = map(int,raw_input().strip().split(' '))
E = 100
current = 0
time = 0
while not (time > 0 and current == 0):
current += k
current = current % n
if c[current] == 0:
E -= 1
if c[current] == 1:
E -= 3
time += 1
print E | python |
import pickle
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import os
def feat_eng(df_fe):
'''
Función que realiza la selección de los features que serán utilizdos para la clasificación
inputs: Data Frame limpio (df_clean.pkl)
outputs: Data Frame con la matriz de diseño para el modelo (df_clean.pkl)
'''
# Transformación de variables facility_type y zip
tipo = pd.DataFrame(df_fe.facility_type.value_counts())
tipo['name'] = tipo.index
tipo.index = range(len(tipo.name))
grupo1 = tipo.iloc[0:4,1].tolist()
grupo2 = tipo.iloc[[5,6,7,11],1].tolist()
df_fe['class'] = df_fe['facility_type'].apply(lambda x: x if x in grupo1 else ('daycare' if x in grupo2 else 'other'))
lev = pd.read_csv(os.path.realpath('src/utils/zip_catalog.csv'))
lev['zip'] = lev['zip'].astype(str)
lev.index = lev.zip
dic = lev.level.to_dict()
df_fe['level'] = df_fe['zip'].apply(lambda x: zips(x,lev,dic))
# Transformación a OHE
df_fe = df_fe.sort_values(by='inspection_date', ascending=True)
df_input = pd.DataFrame(df_fe[['label_risk','label_results','level','class']])
data_input_ohe = pd.get_dummies(df_input)
etiqueta = data_input_ohe.label_results
data_input_ohe= data_input_ohe.drop('label_results', axis = 1)
variables_lista = list(data_input_ohe.columns)
# Grid Search
np.random.seed(20201124)
# ocuparemos un RF
classifier = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=1234)
# separando en train, test
#X_train, X_test, y_train, y_test = train_test_split(data_input_ohe, etiqueta, test_size=0.3)
# definicion de los hiperparametros que queremos probar
hyper_param_grid = {'n_estimators': [300, 400], #'min_samples_leaf': [3,5,7],
'max_depth': [7, 10],
#'min_samples_split': [3],
'max_features': [3, 5, 6],
'criterion': ['gini']}
# usamos TimeSeriesSplit para dividir respetando el orden cronológico
tscv = TimeSeriesSplit(n_splits=3)
# This was the trickiest part as a newbie. Straight from the docs
# If you only have experience with CV splits this way
# of making the splits might seem foreign. Fret not.
for train_index, test_index in tscv.split(data_input_ohe):
X_train, X_test = data_input_ohe.iloc[train_index, :], data_input_ohe.iloc[test_index,:]
y_train, y_test = etiqueta.iloc[train_index], etiqueta.iloc[test_index]
# ocupemos grid search
gs = GridSearchCV(classifier,
hyper_param_grid,
scoring = 'precision', return_train_score=True,
cv = tscv)
start_time = time.time()
gs.fit(X_train, y_train)
best_rf = gs.best_estimator_
best_score = gs.best_estimator_.oob_score_
feature_importance = pd.DataFrame({'importance':\
best_rf.feature_importances_,\
'feature': variables_lista})
feature_importance=feature_importance.sort_values(by="importance", ascending=False)
#fi_out = feature_importance.head(10)
time_exec = time.time() - start_time
nrows_ohe = data_input_ohe.shape[0]
ncols_ohe = data_input_ohe.shape[1]
#print("Tiempo en ejecutar: ", time.time() - start_time)
df_input = pd.DataFrame(df_fe[['aka_name','license','label_risk','label_results','level','class']])
return df_input, nrows_ohe, ncols_ohe, float(best_score), time_exec, str(best_rf)
def zips(x,lev,dic):
if x in lev.zip.to_list():
return dic[x]
else:
return 'other' | python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: transaction/v4/transaction_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
from common.v3 import model_pb2 as common_dot_v3_dot_model__pb2
from common.v4 import model_pb2 as common_dot_v4_dot_model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='transaction/v4/transaction_service.proto',
package='kin.agora.transaction.v4',
syntax='proto3',
serialized_options=b'\n org.kin.agora.gen.transaction.v4ZEgithub.com/kinecosystem/agora-api/genproto/transaction/v4;transaction\242\002\020APBTransactionV4',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(transaction/v4/transaction_service.proto\x12\x18kin.agora.transaction.v4\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\x1a\x15\x63ommon/v3/model.proto\x1a\x15\x63ommon/v4/model.proto\"\x19\n\x17GetServiceConfigRequest\"\xe2\x01\n\x18GetServiceConfigResponse\x12@\n\x12subsidizer_account\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountId\x12\x45\n\rtoken_program\x18\x02 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12=\n\x05token\x18\x03 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\"\x1d\n\x1bGetMinimumKinVersionRequest\"/\n\x1cGetMinimumKinVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\r\"\x1b\n\x19GetRecentBlockhashRequest\"Y\n\x1aGetRecentBlockhashResponse\x12;\n\tblockhash\x18\x01 \x01(\x0b\x32\x1e.kin.agora.common.v4.BlockhashB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\"8\n(GetMinimumBalanceForRentExemptionRequest\x12\x0c\n\x04size\x18\x01 \x01(\x04\"=\n)GetMinimumBalanceForRentExemptionResponse\x12\x10\n\x08lamports\x18\x01 \x01(\x04\"\xf3\x01\n\x11GetHistoryRequest\x12\x42\n\naccount_id\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x30\n\x06\x63ursor\x18\x02 \x01(\x0b\x32 .kin.agora.transaction.v4.Cursor\x12H\n\tdirection\x18\x03 \x01(\x0e\x32\x35.kin.agora.transaction.v4.GetHistoryRequest.Direction\"\x1e\n\tDirection\x12\x07\n\x03\x41SC\x10\x00\x12\x08\n\x04\x44\x45SC\x10\x01\"\xbd\x01\n\x12GetHistoryResponse\x12\x43\n\x06result\x18\x01 \x01(\x0e\x32\x33.kin.agora.transaction.v4.GetHistoryResponse.Result\x12\x41\n\x05items\x18\x02 \x03(\x0b\x32%.kin.agora.transaction.v4.HistoryItemB\x0b\xfa\x42\x08\x92\x01\x05\x08\x00\x10\x80\x01\"\x1f\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\"\x91\x01\n\x16SignTransactionRequest\x12?\n\x0btransaction\x18\x01 \x01(\x0b\x32 .kin.agora.common.v4.TransactionB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x36\n\x0cinvoice_list\x18\x02 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\"\x8f\x02\n\x17SignTransactionResponse\x12H\n\x06result\x18\x01 \x01(\x0e\x32\x38.kin.agora.transaction.v4.SignTransactionResponse.Result\x12<\n\tsignature\x18\x02 \x01(\x0b\x32).kin.agora.common.v4.TransactionSignature\x12\x39\n\x0einvoice_errors\x18\x04 \x03(\x0b\x32!.kin.agora.common.v3.InvoiceError\"1\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08REJECTED\x10\x03\x12\x11\n\rINVOICE_ERROR\x10\x04\"\x83\x02\n\x18SubmitTransactionRequest\x12?\n\x0btransaction\x18\x01 \x01(\x0b\x32 .kin.agora.common.v4.TransactionB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x36\n\x0cinvoice_list\x18\x02 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\x12\x33\n\ncommitment\x18\x03 \x01(\x0e\x32\x1f.kin.agora.common.v4.Commitment\x12\x1a\n\tdedupe_id\x18\x04 \x01(\x0c\x42\x07\xfa\x42\x04z\x02\x18@\x12\x1d\n\x15send_simulation_event\x18\x05 \x01(\x08\"\x8c\x03\n\x19SubmitTransactionResponse\x12J\n\x06result\x18\x01 \x01(\x0e\x32:.kin.agora.transaction.v4.SubmitTransactionResponse.Result\x12<\n\tsignature\x18\x02 \x01(\x0b\x32).kin.agora.common.v4.TransactionSignature\x12@\n\x11transaction_error\x18\x03 \x01(\x0b\x32%.kin.agora.common.v4.TransactionError\x12\x39\n\x0einvoice_errors\x18\x04 \x03(\x0b\x32!.kin.agora.common.v3.InvoiceError\"h\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x15\n\x11\x41LREADY_SUBMITTED\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0c\n\x08REJECTED\x10\x03\x12\x11\n\rINVOICE_ERROR\x10\x04\x12\x12\n\x0ePAYER_REQUIRED\x10\x05\"\x92\x01\n\x15GetTransactionRequest\x12\x44\n\x0etransaction_id\x18\x01 \x01(\x0b\x32\".kin.agora.common.v4.TransactionIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x33\n\ncommitment\x18\x02 \x01(\x0e\x32\x1f.kin.agora.common.v4.Commitment\"\xf9\x01\n\x16GetTransactionResponse\x12\x45\n\x05state\x18\x01 \x01(\x0e\x32\x36.kin.agora.transaction.v4.GetTransactionResponse.State\x12\x10\n\x04slot\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x15\n\rconfirmations\x18\x03 \x01(\r\x12\x33\n\x04item\x18\x04 \x01(\x0b\x32%.kin.agora.transaction.v4.HistoryItem\":\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0b\n\x07PENDING\x10\x03\"\xc1\x05\n\x0bHistoryItem\x12\x44\n\x0etransaction_id\x18\x01 \x01(\x0b\x32\".kin.agora.common.v4.TransactionIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x30\n\x06\x63ursor\x18\x02 \x01(\x0b\x32 .kin.agora.transaction.v4.Cursor\x12>\n\x12solana_transaction\x18\x03 \x01(\x0b\x32 .kin.agora.common.v4.TransactionH\x00\x12\x46\n\x13stellar_transaction\x18\x04 \x01(\x0b\x32\'.kin.agora.common.v4.StellarTransactionH\x00\x12@\n\x11transaction_error\x18\x05 \x01(\x0b\x32%.kin.agora.common.v4.TransactionError\x12?\n\x08payments\x18\x06 \x03(\x0b\x32-.kin.agora.transaction.v4.HistoryItem.Payment\x12\x36\n\x0cinvoice_list\x18\x07 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\x12\x34\n\x10transaction_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xad\x01\n\x07Payment\x12>\n\x06source\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x43\n\x0b\x64\x65stination\x18\x02 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x03\x12\r\n\x05index\x18\x04 \x01(\rB\x11\n\x0fraw_transaction\"#\n\x06\x43ursor\x12\x19\n\x05value\x18\x01 \x01(\x0c\x42\n\xfa\x42\x07z\x05\x10\x01\x18\x80\x01\x32\x94\x08\n\x0bTransaction\x12y\n\x10GetServiceConfig\x12\x31.kin.agora.transaction.v4.GetServiceConfigRequest\x1a\x32.kin.agora.transaction.v4.GetServiceConfigResponse\x12\x85\x01\n\x14GetMinimumKinVersion\x12\x35.kin.agora.transaction.v4.GetMinimumKinVersionRequest\x1a\x36.kin.agora.transaction.v4.GetMinimumKinVersionResponse\x12\x7f\n\x12GetRecentBlockhash\x12\x33.kin.agora.transaction.v4.GetRecentBlockhashRequest\x1a\x34.kin.agora.transaction.v4.GetRecentBlockhashResponse\x12\xac\x01\n!GetMinimumBalanceForRentExemption\x12\x42.kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest\x1a\x43.kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse\x12g\n\nGetHistory\x12+.kin.agora.transaction.v4.GetHistoryRequest\x1a,.kin.agora.transaction.v4.GetHistoryResponse\x12v\n\x0fSignTransaction\x12\x30.kin.agora.transaction.v4.SignTransactionRequest\x1a\x31.kin.agora.transaction.v4.SignTransactionResponse\x12|\n\x11SubmitTransaction\x12\x32.kin.agora.transaction.v4.SubmitTransactionRequest\x1a\x33.kin.agora.transaction.v4.SubmitTransactionResponse\x12s\n\x0eGetTransaction\x12/.kin.agora.transaction.v4.GetTransactionRequest\x1a\x30.kin.agora.transaction.v4.GetTransactionResponseB|\n org.kin.agora.gen.transaction.v4ZEgithub.com/kinecosystem/agora-api/genproto/transaction/v4;transaction\xa2\x02\x10\x41PBTransactionV4b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,common_dot_v3_dot_model__pb2.DESCRIPTOR,common_dot_v4_dot_model__pb2.DESCRIPTOR,])
_GETHISTORYREQUEST_DIRECTION = _descriptor.EnumDescriptor(
name='Direction',
full_name='kin.agora.transaction.v4.GetHistoryRequest.Direction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ASC', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DESC', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=965,
serialized_end=995,
)
_sym_db.RegisterEnumDescriptor(_GETHISTORYREQUEST_DIRECTION)
_GETHISTORYRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.GetHistoryResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1156,
serialized_end=1187,
)
_sym_db.RegisterEnumDescriptor(_GETHISTORYRESPONSE_RESULT)
_SIGNTRANSACTIONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.SignTransactionResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REJECTED', index=1, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVOICE_ERROR', index=2, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1560,
serialized_end=1609,
)
_sym_db.RegisterEnumDescriptor(_SIGNTRANSACTIONRESPONSE_RESULT)
_SUBMITTRANSACTIONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.SubmitTransactionResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ALREADY_SUBMITTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REJECTED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVOICE_ERROR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PAYER_REQUIRED', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2166,
serialized_end=2270,
)
_sym_db.RegisterEnumDescriptor(_SUBMITTRANSACTIONRESPONSE_RESULT)
_GETTRANSACTIONRESPONSE_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='kin.agora.transaction.v4.GetTransactionResponse.State',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2613,
serialized_end=2671,
)
_sym_db.RegisterEnumDescriptor(_GETTRANSACTIONRESPONSE_STATE)
_GETSERVICECONFIGREQUEST = _descriptor.Descriptor(
name='GetServiceConfigRequest',
full_name='kin.agora.transaction.v4.GetServiceConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=199,
)
_GETSERVICECONFIGRESPONSE = _descriptor.Descriptor(
name='GetServiceConfigResponse',
full_name='kin.agora.transaction.v4.GetServiceConfigResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subsidizer_account', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.subsidizer_account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='token_program', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.token_program', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='token', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.token', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=202,
serialized_end=428,
)
_GETMINIMUMKINVERSIONREQUEST = _descriptor.Descriptor(
name='GetMinimumKinVersionRequest',
full_name='kin.agora.transaction.v4.GetMinimumKinVersionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=459,
)
_GETMINIMUMKINVERSIONRESPONSE = _descriptor.Descriptor(
name='GetMinimumKinVersionResponse',
full_name='kin.agora.transaction.v4.GetMinimumKinVersionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='kin.agora.transaction.v4.GetMinimumKinVersionResponse.version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=461,
serialized_end=508,
)
_GETRECENTBLOCKHASHREQUEST = _descriptor.Descriptor(
name='GetRecentBlockhashRequest',
full_name='kin.agora.transaction.v4.GetRecentBlockhashRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=510,
serialized_end=537,
)
_GETRECENTBLOCKHASHRESPONSE = _descriptor.Descriptor(
name='GetRecentBlockhashResponse',
full_name='kin.agora.transaction.v4.GetRecentBlockhashResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='blockhash', full_name='kin.agora.transaction.v4.GetRecentBlockhashResponse.blockhash', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=539,
serialized_end=628,
)
_GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST = _descriptor.Descriptor(
name='GetMinimumBalanceForRentExemptionRequest',
full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest.size', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=630,
serialized_end=686,
)
_GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE = _descriptor.Descriptor(
name='GetMinimumBalanceForRentExemptionResponse',
full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='lamports', full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse.lamports', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=688,
serialized_end=749,
)
_GETHISTORYREQUEST = _descriptor.Descriptor(
name='GetHistoryRequest',
full_name='kin.agora.transaction.v4.GetHistoryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='kin.agora.transaction.v4.GetHistoryRequest.account_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cursor', full_name='kin.agora.transaction.v4.GetHistoryRequest.cursor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='direction', full_name='kin.agora.transaction.v4.GetHistoryRequest.direction', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETHISTORYREQUEST_DIRECTION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=752,
serialized_end=995,
)
_GETHISTORYRESPONSE = _descriptor.Descriptor(
name='GetHistoryResponse',
full_name='kin.agora.transaction.v4.GetHistoryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.GetHistoryResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='items', full_name='kin.agora.transaction.v4.GetHistoryResponse.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\010\222\001\005\010\000\020\200\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETHISTORYRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=998,
serialized_end=1187,
)
_SIGNTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SignTransactionRequest',
full_name='kin.agora.transaction.v4.SignTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='kin.agora.transaction.v4.SignTransactionRequest.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.SignTransactionRequest.invoice_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1190,
serialized_end=1335,
)
_SIGNTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SignTransactionResponse',
full_name='kin.agora.transaction.v4.SignTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.SignTransactionResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='kin.agora.transaction.v4.SignTransactionResponse.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_errors', full_name='kin.agora.transaction.v4.SignTransactionResponse.invoice_errors', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SIGNTRANSACTIONRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1338,
serialized_end=1609,
)
_SUBMITTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SubmitTransactionRequest',
full_name='kin.agora.transaction.v4.SubmitTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.invoice_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commitment', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.commitment', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dedupe_id', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.dedupe_id', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004z\002\030@', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='send_simulation_event', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.send_simulation_event', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1612,
serialized_end=1871,
)
_SUBMITTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SubmitTransactionResponse',
full_name='kin.agora.transaction.v4.SubmitTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_error', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.transaction_error', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_errors', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.invoice_errors', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SUBMITTRANSACTIONRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1874,
serialized_end=2270,
)
_GETTRANSACTIONREQUEST = _descriptor.Descriptor(
name='GetTransactionRequest',
full_name='kin.agora.transaction.v4.GetTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='kin.agora.transaction.v4.GetTransactionRequest.transaction_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commitment', full_name='kin.agora.transaction.v4.GetTransactionRequest.commitment', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2273,
serialized_end=2419,
)
_GETTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='GetTransactionResponse',
full_name='kin.agora.transaction.v4.GetTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='kin.agora.transaction.v4.GetTransactionResponse.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='slot', full_name='kin.agora.transaction.v4.GetTransactionResponse.slot', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'0\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='confirmations', full_name='kin.agora.transaction.v4.GetTransactionResponse.confirmations', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='item', full_name='kin.agora.transaction.v4.GetTransactionResponse.item', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRANSACTIONRESPONSE_STATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2422,
serialized_end=2671,
)
_HISTORYITEM_PAYMENT = _descriptor.Descriptor(
name='Payment',
full_name='kin.agora.transaction.v4.HistoryItem.Payment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='kin.agora.transaction.v4.HistoryItem.Payment.source', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination', full_name='kin.agora.transaction.v4.HistoryItem.Payment.destination', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount', full_name='kin.agora.transaction.v4.HistoryItem.Payment.amount', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='index', full_name='kin.agora.transaction.v4.HistoryItem.Payment.index', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3187,
serialized_end=3360,
)
_HISTORYITEM = _descriptor.Descriptor(
name='HistoryItem',
full_name='kin.agora.transaction.v4.HistoryItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='kin.agora.transaction.v4.HistoryItem.transaction_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cursor', full_name='kin.agora.transaction.v4.HistoryItem.cursor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='solana_transaction', full_name='kin.agora.transaction.v4.HistoryItem.solana_transaction', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stellar_transaction', full_name='kin.agora.transaction.v4.HistoryItem.stellar_transaction', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_error', full_name='kin.agora.transaction.v4.HistoryItem.transaction_error', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payments', full_name='kin.agora.transaction.v4.HistoryItem.payments', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.HistoryItem.invoice_list', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_time', full_name='kin.agora.transaction.v4.HistoryItem.transaction_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_HISTORYITEM_PAYMENT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='raw_transaction', full_name='kin.agora.transaction.v4.HistoryItem.raw_transaction',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2674,
serialized_end=3379,
)
_CURSOR = _descriptor.Descriptor(
name='Cursor',
full_name='kin.agora.transaction.v4.Cursor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='kin.agora.transaction.v4.Cursor.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007z\005\020\001\030\200\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3381,
serialized_end=3416,
)
_GETSERVICECONFIGRESPONSE.fields_by_name['subsidizer_account'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETSERVICECONFIGRESPONSE.fields_by_name['token_program'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETSERVICECONFIGRESPONSE.fields_by_name['token'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETRECENTBLOCKHASHRESPONSE.fields_by_name['blockhash'].message_type = common_dot_v4_dot_model__pb2._BLOCKHASH
_GETHISTORYREQUEST.fields_by_name['account_id'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETHISTORYREQUEST.fields_by_name['cursor'].message_type = _CURSOR
_GETHISTORYREQUEST.fields_by_name['direction'].enum_type = _GETHISTORYREQUEST_DIRECTION
_GETHISTORYREQUEST_DIRECTION.containing_type = _GETHISTORYREQUEST
_GETHISTORYRESPONSE.fields_by_name['result'].enum_type = _GETHISTORYRESPONSE_RESULT
_GETHISTORYRESPONSE.fields_by_name['items'].message_type = _HISTORYITEM
_GETHISTORYRESPONSE_RESULT.containing_type = _GETHISTORYRESPONSE
_SIGNTRANSACTIONREQUEST.fields_by_name['transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_SIGNTRANSACTIONREQUEST.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_SIGNTRANSACTIONRESPONSE.fields_by_name['result'].enum_type = _SIGNTRANSACTIONRESPONSE_RESULT
_SIGNTRANSACTIONRESPONSE.fields_by_name['signature'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONSIGNATURE
_SIGNTRANSACTIONRESPONSE.fields_by_name['invoice_errors'].message_type = common_dot_v3_dot_model__pb2._INVOICEERROR
_SIGNTRANSACTIONRESPONSE_RESULT.containing_type = _SIGNTRANSACTIONRESPONSE
_SUBMITTRANSACTIONREQUEST.fields_by_name['transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_SUBMITTRANSACTIONREQUEST.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_SUBMITTRANSACTIONREQUEST.fields_by_name['commitment'].enum_type = common_dot_v4_dot_model__pb2._COMMITMENT
_SUBMITTRANSACTIONRESPONSE.fields_by_name['result'].enum_type = _SUBMITTRANSACTIONRESPONSE_RESULT
_SUBMITTRANSACTIONRESPONSE.fields_by_name['signature'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONSIGNATURE
_SUBMITTRANSACTIONRESPONSE.fields_by_name['transaction_error'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONERROR
_SUBMITTRANSACTIONRESPONSE.fields_by_name['invoice_errors'].message_type = common_dot_v3_dot_model__pb2._INVOICEERROR
_SUBMITTRANSACTIONRESPONSE_RESULT.containing_type = _SUBMITTRANSACTIONRESPONSE
_GETTRANSACTIONREQUEST.fields_by_name['transaction_id'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONID
_GETTRANSACTIONREQUEST.fields_by_name['commitment'].enum_type = common_dot_v4_dot_model__pb2._COMMITMENT
_GETTRANSACTIONRESPONSE.fields_by_name['state'].enum_type = _GETTRANSACTIONRESPONSE_STATE
_GETTRANSACTIONRESPONSE.fields_by_name['item'].message_type = _HISTORYITEM
_GETTRANSACTIONRESPONSE_STATE.containing_type = _GETTRANSACTIONRESPONSE
_HISTORYITEM_PAYMENT.fields_by_name['source'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_HISTORYITEM_PAYMENT.fields_by_name['destination'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_HISTORYITEM_PAYMENT.containing_type = _HISTORYITEM
_HISTORYITEM.fields_by_name['transaction_id'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONID
_HISTORYITEM.fields_by_name['cursor'].message_type = _CURSOR
_HISTORYITEM.fields_by_name['solana_transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_HISTORYITEM.fields_by_name['stellar_transaction'].message_type = common_dot_v4_dot_model__pb2._STELLARTRANSACTION
_HISTORYITEM.fields_by_name['transaction_error'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONERROR
_HISTORYITEM.fields_by_name['payments'].message_type = _HISTORYITEM_PAYMENT
_HISTORYITEM.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_HISTORYITEM.fields_by_name['transaction_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_HISTORYITEM.oneofs_by_name['raw_transaction'].fields.append(
_HISTORYITEM.fields_by_name['solana_transaction'])
_HISTORYITEM.fields_by_name['solana_transaction'].containing_oneof = _HISTORYITEM.oneofs_by_name['raw_transaction']
_HISTORYITEM.oneofs_by_name['raw_transaction'].fields.append(
_HISTORYITEM.fields_by_name['stellar_transaction'])
_HISTORYITEM.fields_by_name['stellar_transaction'].containing_oneof = _HISTORYITEM.oneofs_by_name['raw_transaction']
DESCRIPTOR.message_types_by_name['GetServiceConfigRequest'] = _GETSERVICECONFIGREQUEST
DESCRIPTOR.message_types_by_name['GetServiceConfigResponse'] = _GETSERVICECONFIGRESPONSE
DESCRIPTOR.message_types_by_name['GetMinimumKinVersionRequest'] = _GETMINIMUMKINVERSIONREQUEST
DESCRIPTOR.message_types_by_name['GetMinimumKinVersionResponse'] = _GETMINIMUMKINVERSIONRESPONSE
DESCRIPTOR.message_types_by_name['GetRecentBlockhashRequest'] = _GETRECENTBLOCKHASHREQUEST
DESCRIPTOR.message_types_by_name['GetRecentBlockhashResponse'] = _GETRECENTBLOCKHASHRESPONSE
DESCRIPTOR.message_types_by_name['GetMinimumBalanceForRentExemptionRequest'] = _GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST
DESCRIPTOR.message_types_by_name['GetMinimumBalanceForRentExemptionResponse'] = _GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetHistoryRequest'] = _GETHISTORYREQUEST
DESCRIPTOR.message_types_by_name['GetHistoryResponse'] = _GETHISTORYRESPONSE
DESCRIPTOR.message_types_by_name['SignTransactionRequest'] = _SIGNTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SignTransactionResponse'] = _SIGNTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['SubmitTransactionRequest'] = _SUBMITTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SubmitTransactionResponse'] = _SUBMITTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetTransactionRequest'] = _GETTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['GetTransactionResponse'] = _GETTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['HistoryItem'] = _HISTORYITEM
DESCRIPTOR.message_types_by_name['Cursor'] = _CURSOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetServiceConfigRequest = _reflection.GeneratedProtocolMessageType('GetServiceConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _GETSERVICECONFIGREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetServiceConfigRequest)
})
_sym_db.RegisterMessage(GetServiceConfigRequest)
GetServiceConfigResponse = _reflection.GeneratedProtocolMessageType('GetServiceConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _GETSERVICECONFIGRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetServiceConfigResponse)
})
_sym_db.RegisterMessage(GetServiceConfigResponse)
GetMinimumKinVersionRequest = _reflection.GeneratedProtocolMessageType('GetMinimumKinVersionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMKINVERSIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumKinVersionRequest)
})
_sym_db.RegisterMessage(GetMinimumKinVersionRequest)
GetMinimumKinVersionResponse = _reflection.GeneratedProtocolMessageType('GetMinimumKinVersionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMKINVERSIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumKinVersionResponse)
})
_sym_db.RegisterMessage(GetMinimumKinVersionResponse)
GetRecentBlockhashRequest = _reflection.GeneratedProtocolMessageType('GetRecentBlockhashRequest', (_message.Message,), {
'DESCRIPTOR' : _GETRECENTBLOCKHASHREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetRecentBlockhashRequest)
})
_sym_db.RegisterMessage(GetRecentBlockhashRequest)
GetRecentBlockhashResponse = _reflection.GeneratedProtocolMessageType('GetRecentBlockhashResponse', (_message.Message,), {
'DESCRIPTOR' : _GETRECENTBLOCKHASHRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetRecentBlockhashResponse)
})
_sym_db.RegisterMessage(GetRecentBlockhashResponse)
GetMinimumBalanceForRentExemptionRequest = _reflection.GeneratedProtocolMessageType('GetMinimumBalanceForRentExemptionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest)
})
_sym_db.RegisterMessage(GetMinimumBalanceForRentExemptionRequest)
GetMinimumBalanceForRentExemptionResponse = _reflection.GeneratedProtocolMessageType('GetMinimumBalanceForRentExemptionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse)
})
_sym_db.RegisterMessage(GetMinimumBalanceForRentExemptionResponse)
GetHistoryRequest = _reflection.GeneratedProtocolMessageType('GetHistoryRequest', (_message.Message,), {
'DESCRIPTOR' : _GETHISTORYREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetHistoryRequest)
})
_sym_db.RegisterMessage(GetHistoryRequest)
GetHistoryResponse = _reflection.GeneratedProtocolMessageType('GetHistoryResponse', (_message.Message,), {
'DESCRIPTOR' : _GETHISTORYRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetHistoryResponse)
})
_sym_db.RegisterMessage(GetHistoryResponse)
SignTransactionRequest = _reflection.GeneratedProtocolMessageType('SignTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _SIGNTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SignTransactionRequest)
})
_sym_db.RegisterMessage(SignTransactionRequest)
SignTransactionResponse = _reflection.GeneratedProtocolMessageType('SignTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _SIGNTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SignTransactionResponse)
})
_sym_db.RegisterMessage(SignTransactionResponse)
SubmitTransactionRequest = _reflection.GeneratedProtocolMessageType('SubmitTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBMITTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SubmitTransactionRequest)
})
_sym_db.RegisterMessage(SubmitTransactionRequest)
SubmitTransactionResponse = _reflection.GeneratedProtocolMessageType('SubmitTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBMITTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SubmitTransactionResponse)
})
_sym_db.RegisterMessage(SubmitTransactionResponse)
GetTransactionRequest = _reflection.GeneratedProtocolMessageType('GetTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetTransactionRequest)
})
_sym_db.RegisterMessage(GetTransactionRequest)
GetTransactionResponse = _reflection.GeneratedProtocolMessageType('GetTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetTransactionResponse)
})
_sym_db.RegisterMessage(GetTransactionResponse)
HistoryItem = _reflection.GeneratedProtocolMessageType('HistoryItem', (_message.Message,), {
'Payment' : _reflection.GeneratedProtocolMessageType('Payment', (_message.Message,), {
'DESCRIPTOR' : _HISTORYITEM_PAYMENT,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.HistoryItem.Payment)
})
,
'DESCRIPTOR' : _HISTORYITEM,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.HistoryItem)
})
_sym_db.RegisterMessage(HistoryItem)
_sym_db.RegisterMessage(HistoryItem.Payment)
Cursor = _reflection.GeneratedProtocolMessageType('Cursor', (_message.Message,), {
'DESCRIPTOR' : _CURSOR,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.Cursor)
})
_sym_db.RegisterMessage(Cursor)
DESCRIPTOR._options = None
_GETSERVICECONFIGRESPONSE.fields_by_name['token_program']._options = None
_GETSERVICECONFIGRESPONSE.fields_by_name['token']._options = None
_GETRECENTBLOCKHASHRESPONSE.fields_by_name['blockhash']._options = None
_GETHISTORYREQUEST.fields_by_name['account_id']._options = None
_GETHISTORYRESPONSE.fields_by_name['items']._options = None
_SIGNTRANSACTIONREQUEST.fields_by_name['transaction']._options = None
_SUBMITTRANSACTIONREQUEST.fields_by_name['transaction']._options = None
_SUBMITTRANSACTIONREQUEST.fields_by_name['dedupe_id']._options = None
_GETTRANSACTIONREQUEST.fields_by_name['transaction_id']._options = None
_GETTRANSACTIONRESPONSE.fields_by_name['slot']._options = None
_HISTORYITEM_PAYMENT.fields_by_name['source']._options = None
_HISTORYITEM_PAYMENT.fields_by_name['destination']._options = None
_HISTORYITEM.fields_by_name['transaction_id']._options = None
_CURSOR.fields_by_name['value']._options = None
_TRANSACTION = _descriptor.ServiceDescriptor(
name='Transaction',
full_name='kin.agora.transaction.v4.Transaction',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3419,
serialized_end=4463,
methods=[
_descriptor.MethodDescriptor(
name='GetServiceConfig',
full_name='kin.agora.transaction.v4.Transaction.GetServiceConfig',
index=0,
containing_service=None,
input_type=_GETSERVICECONFIGREQUEST,
output_type=_GETSERVICECONFIGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMinimumKinVersion',
full_name='kin.agora.transaction.v4.Transaction.GetMinimumKinVersion',
index=1,
containing_service=None,
input_type=_GETMINIMUMKINVERSIONREQUEST,
output_type=_GETMINIMUMKINVERSIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetRecentBlockhash',
full_name='kin.agora.transaction.v4.Transaction.GetRecentBlockhash',
index=2,
containing_service=None,
input_type=_GETRECENTBLOCKHASHREQUEST,
output_type=_GETRECENTBLOCKHASHRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMinimumBalanceForRentExemption',
full_name='kin.agora.transaction.v4.Transaction.GetMinimumBalanceForRentExemption',
index=3,
containing_service=None,
input_type=_GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST,
output_type=_GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetHistory',
full_name='kin.agora.transaction.v4.Transaction.GetHistory',
index=4,
containing_service=None,
input_type=_GETHISTORYREQUEST,
output_type=_GETHISTORYRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SignTransaction',
full_name='kin.agora.transaction.v4.Transaction.SignTransaction',
index=5,
containing_service=None,
input_type=_SIGNTRANSACTIONREQUEST,
output_type=_SIGNTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SubmitTransaction',
full_name='kin.agora.transaction.v4.Transaction.SubmitTransaction',
index=6,
containing_service=None,
input_type=_SUBMITTRANSACTIONREQUEST,
output_type=_SUBMITTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetTransaction',
full_name='kin.agora.transaction.v4.Transaction.GetTransaction',
index=7,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_GETTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TRANSACTION)
DESCRIPTOR.services_by_name['Transaction'] = _TRANSACTION
# @@protoc_insertion_point(module_scope)
| python |
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict
import braintree
from airbyte_protocol import SyncMode
from base_python import AirbyteLogger
from base_singer import BaseSingerSource, SyncModeInfo
from braintree.exceptions.authentication_error import AuthenticationError
from dateutil import parser
from dateutil.relativedelta import relativedelta
class SourceBraintreeSinger(BaseSingerSource):
tap_cmd = "tap-braintree"
tap_name = "BrainTree API"
api_error = AuthenticationError
force_full_refresh = True
def transform_config(self, raw_config: json) -> json:
config = raw_config
if "start_date" in raw_config:
config["start_date"] = (parser.parse(raw_config["start_date"]) + relativedelta(months=+1)).strftime("%Y-%m-%dT%H:%M:%SZ")
else:
config["start_date"] = (datetime.now() + relativedelta(months=+1)).strftime("%Y-%m-%dT%H:%M:%SZ")
return config
def try_connect(self, logger: AirbyteLogger, config: json):
"""Test provided credentials, raises self.api_error if something goes wrong"""
client = braintree.BraintreeGateway(
braintree.Configuration(
environment=getattr(braintree.Environment, config["environment"]),
merchant_id=config["merchant_id"],
public_key=config["public_key"],
private_key=config["private_key"],
)
)
client.transaction.search(braintree.TransactionSearch.created_at.between(datetime.now() + relativedelta(days=-1), datetime.now()))
def get_sync_mode_overrides(self) -> Dict[str, SyncModeInfo]:
return {"transactions": SyncModeInfo(supported_sync_modes=[SyncMode.incremental])}
def discover_cmd(self, logger: AirbyteLogger, config_path: str) -> str:
return (
f"{self.tap_cmd} -c {config_path} --discover"
+ ' | grep "\\"type\\": \\"SCHEMA\\"" | head -1'
+ '| jq -c "{\\"streams\\":[{\\"stream\\": .stream, \\"schema\\": .schema}]}"'
)
def read_cmd(self, logger: AirbyteLogger, config_path: str, catalog_path: str, state_path: str = None) -> str:
state_option = f"--state {state_path}" if state_path else ""
return f"{self.tap_cmd} -c {config_path} -p {catalog_path} {state_option}"
| python |
import libsinan
from libsinan import handler, output, jsax
class VersionCheckTaskHandler(output.SimpleTaskHandler):
def __init__(self):
output.SimpleTaskHandler.__init__(self)
self.version = None
def object_end(self):
""" We only get one object per right now so
lets print it out when we get it """
if self.task == "version":
if self.event_type == 'info':
self.version = self.desc
return True
else:
return output.SimpleTaskHandler.object_end(self)
class VersionCheckHandler(handler.Handler):
def handles(self, task):
return task == "version"
def handle(self, largs):
self.do_request(largs, handle)
version = None
def handle(task, conn):
global version
if conn.status == 200:
try:
task_handler = VersionCheckTaskHandler()
jsax.parse(conn, task_handler)
version = task_handler.version
return 0
except ValueError, msg:
print "Got an error back from sinan. Check the logs at ~/.sinan/logs/kernel.log"
else:
return 1
| python |
import urllib.request, json
from .models import News_Update
from .models import Article
api_key = None
base_url = None
articles_url = None
def configure_request(app):
global api_key, base_url, articles_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
articles_url = app.config['ARTICLES_BASE_URL']
def get_updates(category):
'''
function to get json response of out request
:param category
:return:
'''
get_updates_url = base_url.format(category, api_key)
print(get_updates_url)
with urllib.request.urlopen(get_updates_url) as url:
get_updates_data = url.read()
get_updates_response = json.loads(get_updates_data)
update_results = []
if get_updates_response['sources']:
update_results = get_updates_response['sources']
update_results = process_results(update_results)
return update_results
def process_results(update_results_list):
'''
process update result and transform to list of object
'''
update_results = []
for update_content in update_results_list:
id = update_content.get('id')
name = update_content.get('name')
category = update_content.get('category')
url = update_content.get('url')
update_object = News_Update(id, name, category, url)
update_results.append(update_object)
return update_results
def get_articles(id):
get_articles_url = articles_url.format(id, api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_results = get_articles_response['articles']
articles_results = process_articles(articles_results)
return articles_results
# articles_results = json.loads(url.read())
# articles_object = None
# if articles_results['articles']:
# articles_object = process_articles(articles_results['articles'])
#
# return articles_object
def process_articles(articles_list):
articles_results = []
for article_cont in articles_list:
id = article_cont.get('id')
author = article_cont.get('author')
title = article_cont.get('title')
description = article_cont.get('description')
url = article_cont.get('url')
image = article_cont.get('urlToImage')
date = article_cont.get('publishedAt')
articles_object = Article(id,author,title,description,url,image,date)
articles_results.append(articles_object)
return articles_results
| python |
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from rest_framework import authentication, viewsets
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework import status
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
import jwt
from rest_framework_jwt.utils import jwt_payload_handler
from restfulexperiment.restful.models import User
from restfulexperiment.restful.serializers import UserSerializer
@api_view(['POST'])
@permission_classes((AllowAny, ))
def login(request):
'''
TODO Incomplete
'''
if request.method == 'POST':
email = request.data.get('email')
password = request.data.get('password')
user = User.objects.get(email=email, password=password)
payload = jwt_payload_handler(user)
token = jwt.encode(payload, settings.SECRET_KEY)
return Response(serializer.data)
return Response({'mensagem': 'todo'}, status=404)
@api_view(['GET'])
@permission_classes((AllowAny, ))
def user_collection(request):
if request.method == 'GET':
users = User.objects.all().order_by('-created')
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
@api_view(['GET', 'POST'])
@permission_classes((AllowAny, ))
def user_element(request, pk=None):
if request.method == 'GET':
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
return Response(status=404)
serializer = UserSerializer(user)
return Response(serializer.data)
if request.method == 'POST':
data = {
"name": request.data.get('name'),
"email": request.data.get('email'),
"password": request.data.get('password'),
"phones": request.data.get('phones'),
}
serializer = UserSerializer(data=data)
if serializer.is_valid():
try:
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
except IntegrityError:
return Response({'mensagem': 'E-mail ja existente'}, status=status.HTTP_406_NOT_ACCEPTABLE)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| python |
""" Docstring for the app_test.py module.
"""
import pytest
from app import app
@pytest.fixture
def client():
"""
Method to yield a test client from app.
"""
app.config['TESTING'] = True
client = app.test_client()
yield client
def test_ping(client):
"""
Function to test debug route.
:param client: A testing client object.
"""
rep = client.get("/ping")
assert 200 == rep.status_code
def test_weather(client):
"""
Function to test weather route.
:param client: A testing client object.
"""
rep = client.get('temperature/London,uk')
assert 200 == rep.status_code
def test_all_temperature(client):
"""
Function to test weather cached route.
:param client: A testing client object.
"""
rep = client.get('temperature?max=4')
assert 200 == rep.status_code
| python |
#!/usr/bin/python
# An example vendordata server implementation for OpenStack Nova. With a giant
# nod in the direction of Chad Lung for his very helpful blog post at
# http://www.giantflyingsaucer.com/blog/?p=4701
import json
import sys
from webob import Response
from webob.dec import wsgify
from paste import httpserver
from paste.deploy import loadapp
from oslo_config import cfg
from oslo_log import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@wsgify
def application(req):
if req.environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return Response('User is not authenticated', status=401)
try:
data = req.environ.get('wsgi.input').read()
if not data:
return Response('No data provided', status=500)
# Get the data nova handed us for this request
#
# An example of this data:
# {
# "hostname": "foo",
# "image-id": "75a74383-f276-4774-8074-8c4e3ff2ca64",
# "instance-id": "2ae914e9-f5ab-44ce-b2a2-dcf8373d899d",
# "metadata": {},
# "project-id": "039d104b7a5c4631b4ba6524d0b9e981",
# "user-data": null
# }
indata = json.loads(data)
# We need to make up a response. This is where your interesting thing
# would happen. However, I don't have anything interesting to do, so
# I just return Carrie Fisher quotes instead.
quotes = {'0': 'Instant gratification takes too long.',
'1': ('Resentment is like drinking poison and waiting for '
'the other person to die.'),
'2': ('I was street smart, but unfortunately the street was '
'Rodeo Drive.'),
'3': ('You can\'t find any true closeness in Hollywood, '
'because everybody does the fake closeness so well.'),
'4': ('As you get older, the pickings get slimmer, but the '
'people don\'t.'),
'5': ('There is no point at which you can say, "Well, I\'m '
'successful now. I might as well take a nap."'),
'6': ('I really love the internet. They say chat-rooms are '
'the trailer park of the internet but I find it '
'amazing.'),
'7': ('I don\'t think Christmas is necessarily about '
'things. It\'s about being good to one another, it\'s '
'about the Christian ethic, it\'s about kindness.'),
'8': ('I don\'t want my life to imitate art, I want my '
'life to be art.'),
'9': ('I am a spy in the house of me. I report back from '
'the front lines of the battle that is me. I am '
'somewhat nonplused by the event that is my life.'),
'a': 'I drowned in moonlight, strangled by my own bra.',
'b': 'Even in space there\'s a double standard for women.',
'c': ('Everyone drives somebody crazy. I just have a bigger '
'car.'),
'd': ('Sometimes you can only find Heaven by slowly '
'backing away from Hell.'),
'e': 'I\'m thinking of having my DNA fumigated.',
'f': 'Leia follows me like a vague smell.'
}
outdata = {'carrie_says': quotes[indata['instance-id'][-1]]}
return Response(json.dumps(outdata, indent=4, sort_keys=True))
except Exception as e:
return Response('Server error while processing request: %s' % e,
status=500)
def app_factory(global_config, **local_config):
return application
def main():
logging.register_options(CONF)
# Make keystonemiddleware emit debug logs
extra_default_log_levels = ['keystonemiddleware=DEBUG']
logging.set_defaults(default_log_levels=(logging.get_default_log_levels() +
extra_default_log_levels))
# Parse our config
CONF(sys.argv[1:])
# Set us up to log as well
logging.setup(CONF, 'vendordata')
# Start the web server
wsgi_app = loadapp('config:paste.ini', relative_to='.')
httpserver.serve(wsgi_app, host='0.0.0.0', port=8888)
if __name__ == '__main__':
main()
| python |
#PYTHON 3.6
#coding : utf8
from tkinter import *
class Aplication(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.msg = Label(self, text='Hello World')
self.msg.pack()
self.bye = Button(self, text="Bye", command=self.quit)
self.bye.pack()
self.pack()
app = Aplication()
app.master.title('Exemplo')
app.master.geometry('200x200+100+100')
mainloop()
| python |
# %matplotlib notebook
import os, re, sys, urllib, requests, base64, IPython, io, pickle, glob
import itertools as itt
import numpy as np
import subprocess as sb
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import roadrunner
from bs4 import BeautifulSoup as BS
from IPython.display import Image, display
from matplotlib import rcParams
import analyzerTools as AT
def run_test(analyzer, test_no, t_end=1000, atomize=False):
if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize)):
print("run successful {}".format(test_no))
analyzer.plot_results(test_no, legend=True, save_fig=True)
# if(analyzer.run_old_test(test_no, t_end=100, atomize=atomize)):
# print("run successful {}".format(test_no))
# analyzer.plot_old_results(test_no, legend=False, save_fig=True)
else:
print("run failed {}".format(test_no))
def update_results(results, fname="analyzer.pkl"):
if os.path.isfile(fname):
with open(fname, "rb") as f:
old_results = pickle.load(f)
for key in results.keys():
old_results[key] = results[key]
with open(fname, "wb") as f:
pickle.dump(old_results, f)
else:
with open(fname, "wb") as f:
pickle.dump(results, f)
print("updated results")
return True
# All the paths we need
# The BNG2.pl file for bionetgen runs
bng_path = "/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl"
# This is the python file that can be called from the command line
sbml_translator_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py"
# if you give this the ATOMIZER ANALYZER 5000 will import atomizer and run internally
# translator_package_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser"
translator_package_path = None
# This is neccesary for atomizer, has default naming conventions and a lot more
# this path will be sym linked to everywhere you want to run translator under
config_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config"
# the path to the folder that contains 5 zero padded folders for each test
tests_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated"
# Now we also add COPASI PATH!!_!_
copasi_path = "/home/monoid/apps/copasi/4.27/bin/CopasiSE"
# change directory to where we want to run the tests
os.chdir("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools")
# The analyzer setup
ba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path,
translator_import=translator_package_path, copasi_path=copasi_path)
# test_no = 3cleanup 1.0s symbolically, should fix some errors, less bug prone for future
# imgdats = ba.load_test_data(test_no)207
# print(len(imgdats))
# Image(imgdats[0])
#
# if(ba.run_single_test(test_no, t_end=100)):
# ba.plot_results(test_no,legend=False)print(r)
# Let's re-run everything
# tests = list(range(419,730))
tests = list(range(1,915))
known_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented
643,644,645, # Complex "i" is used in function/parameter
63,245,248,305,556,575,578,542, # rule named used as parameter
342,429,457,547,570,627,637,638, # compartment used as parameter
527,562,592,593,596,723,250, # Actually broken, even in Copasi
304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,
549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,
561, # no reactions
306,307,308,309,310,311,388,390,391,393,409,
428,505,512,528,557,566,567,719,641,71,90,173,
253, # assignment rules used in reactions
610, # function defs for v16/v17
558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,
691,465,466,238,312,538,603,604,605,215, # Uses time
635,636, # Uses not only time but also encoded strings for parameters
119, # single reaction, not really suitable for translation
47,483,484,486,487, # initial states should result in no reactions,
164,165,167,326,375,400,554,577,664,672,693,698,
234,237,286,450, # Uses piecewise definitions
396,398,507,522,705,
499,474, # SBML modeller is careless and uses species that should be params
607, # Function not defined properly/links to another function
319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry
161,182,239, # true multi-compartment model
271 # multi-compartment and the modeller has issues
])
# Need to figure out, mostly CVODE
list_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,
279,292,328,617,678,606, # new ones
616, # Legitimate bug, if species name is very simple AND rate constant
# only depenent on the species concentration AND we end up generating
# an observable with the same name as species name, then BNGL thinkg
# we are giving obs name as the rate constant, leading to a bug
255, # Circular dependency in funcs?
401,402,403, # if func messes with func ordering
559, # can't load copasi result
64, # Due to website addition? also in too long set
232, # BNG takes too long?
172,176,177 # doesn't end up translating, takes a long time?
])
too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug
332,334, # ATOMIZER BREAKS THESE
217,247,293,426,469 # too long when atomized
])
################# NEW CHECKS ##############
# A complete new set of checks to see the latest state of the tool as we are
# writing the manuscript.
new_checks = set([64,217, # too long
63, # fairly long but does complete
119,465,468, # no data?
247,269,469,470,471,472,473,474,
503,505,506,595,606,608,835,863 # long, didn't check if completes
])
################# RUN FAILS ###############
run_fails = set([9,24,25,34,51,55,107,
123,154,155,162,164,165,167,172,176,177,179,180,183,192,
201,208,215,232,234,237,238,245,246,248,250,255,268,279,286,292,
302,305,312,326,328,332,334,336,353,375,383,384,385,387,396,398,
400,401,402,403,412,426,429,438,445,450,451,457,463,466,483,484,
486,487,499,507,522,527,531,532,538,542,547,554,555,556,558,559,
561,562,574,575,577,578,579,589,592,593,599,600,602,607,610,617,
627,635,636,637,638,643,644,645,664,668,669,672,673,674,675,678,
687,688,692,693,696,698,705,722,723,730,731,748,749,757,759,760,
763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,
858,859,876,879,880 # run_failed
])
################# EVENTS #################
w_event = set([1,7,56,77,81,87,88,95,96,97,101,104,109, # models with events
111,117,120,121,122,124,125,126,127,128,129,130,131, # models with events
132,133,134,135,136,137,139,140,141,142,144,148,149, # models with events
152,153,158,186,187,188,189,193,194,195,196,227,235, # models with events
241,244,256,265,281,285,287,297,301,316,317,318,327, # models with events
337,338,339,340,342,344,404,408,422,436,437,439,479, # models with events
480,488,493,494,496,497,534,535,536,537,540,541,563, # models with events
570,571,597,598,601,612,613,620,621,628,632,634,650, # models with events
659,681,695,699,702,706,711,718,727,734,735,736,786, # models with events
789,791,794,806,814,816,817,818,820,822,825,829,834, # models with events
856,860,862,864,901]) # models with events
################# END CHECKS ##############
all_issues = known_issues.union(w_event)
all_issues = all_issues.union(list_of_fails)
# run tests
for test_no in tests:
#if test_no in all_issues:
# continue
if test_no in w_event or test_no in new_checks or test_no in run_fails:
continue
if (os.path.isfile("../curated/BIOMD{0:010d}.xml".format(test_no))):
run_test(ba, test_no, t_end=100, atomize=False)
# update_results(ba.all_results)
else:
print("number {} doesn't exist".format(test_no))
| python |
from datetime import datetime, timedelta
from freezegun import freeze_time
from pyobjdb import PyObjDB
def test_basic(tmp_path):
db = PyObjDB(str(tmp_path / 'test.db'))
db.put('key_str', 'foo')
assert db.get('key_str') == 'foo'
assert db.get(b'key_str') == 'foo'
db.put('key_str', 'bar')
assert db.get('key_str') == 'bar'
db.put('key_int', 42)
assert db.get('key_int') == 42
db.put('key_float', 4.125)
assert db.get('key_float') == 4.125
db.put('key_list', ['foo', 42, 4.125])
assert db.get('key_list') == ['foo', 42, 4.125]
db.put('key_tuple', ('foo', 42, 4.125))
db.put('key_dict', {'foo': 42, 'bar': 4.125})
assert db.get('key_dict') == {'foo': 42, 'bar': 4.125}
db.close()
def test_reopen(tmp_path):
db1 = PyObjDB(str(tmp_path / 'test.db'))
db1.put('foo', 'bar')
assert db1.get('foo') == 'bar'
db1.close()
db2 = PyObjDB(str(tmp_path / 'test.db'))
assert db2.get('foo') == 'bar'
db2.close()
def test_ttl(tmp_path):
db = PyObjDB(str(tmp_path / 'test.db'))
with freeze_time(datetime.utcnow()) as frozen_time:
db.put('foo', 'bar', ttl=5)
assert db.get('foo') == 'bar'
frozen_time.tick(timedelta(seconds=3))
assert db.get('foo') == 'bar'
frozen_time.tick(timedelta(seconds=5))
assert db.get('foo') is None
def test_delete(tmp_path):
db = PyObjDB(str(tmp_path / 'test.db'))
db.put('foo', 'bar')
assert db.get('foo') == 'bar'
db.delete('foo')
assert db.get('foo') is None
class Greeter(object):
def __init__(self, name):
self.name = name
def get_greeting(self):
return f'Hello, {self.name}!'
def test_custom_object(tmp_path):
db = PyObjDB(str(tmp_path / 'test.db'))
obj1 = Greeter('Kermit')
db.put('hello_kermit', obj1)
obj2 = db.get('hello_kermit')
assert isinstance(obj2, Greeter)
assert obj2.name == 'Kermit'
def test_cleanup(tmp_path):
db = PyObjDB(
str(tmp_path / 'test.db'),
cleanup_interval=60,
compaction_interval=3600,
)
with freeze_time(datetime.utcnow()) as frozen_time:
db.put('foo', 'bar', ttl=5)
db.put('baz', 'qux', ttl=7)
db.put('wibble', 'wobble', ttl=3600)
assert db.get('foo') == 'bar'
frozen_time.tick(timedelta(seconds=3))
assert db.get('foo') == 'bar'
frozen_time.tick(timedelta(seconds=5))
assert db.get('foo') is None
assert db.cleanup() == 0
assert db.get('wibble') == 'wobble'
frozen_time.tick(timedelta(seconds=120))
assert db.cleanup() == 2
assert db.get('wibble') == 'wobble'
frozen_time.tick(timedelta(seconds=7200))
db.cleanup()
| python |
""" This file contains tests for partition explainer.
"""
import tempfile
import pytest
import numpy as np
import shap
def test_serialization_partition():
""" This tests the serialization of partition explainers.
"""
AutoTokenizer = pytest.importorskip("transformers").AutoTokenizer
AutoModelForSeq2SeqLM = pytest.importorskip("transformers").AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es")
# define the input sentences we want to translate
data = [
"In this picture, there are four persons: my father, my mother, my brother and my sister.",
"Transformers have rapidly become the model of choice for NLP problems, replacing older recurrent neural network models"
]
explainer_original = shap.Explainer(model, tokenizer)
shap_values_original = explainer_original(data)
temp_serialization_file = tempfile.TemporaryFile()
# Serialization
explainer_original.save(temp_serialization_file)
temp_serialization_file.seek(0)
# Deserialization
explainer_new = shap.Explainer.load(temp_serialization_file)
temp_serialization_file.close()
shap_values_new = explainer_new(data)
assert np.array_equal(shap_values_original[0].base_values,shap_values_new[0].base_values)
assert np.array_equal(shap_values_original[0].values,shap_values_new[0].values)
assert type(explainer_original) == type(explainer_new)
assert type(explainer_original.masker) == type(explainer_new.masker)
| python |
"""
This module handles teams - collections of Characters
"""
from maelstrom.util.serialize import AbstractJsonSerialable
import functools
class Team(AbstractJsonSerialable):
"""
stores and manages Characters
"""
def __init__(self, **kwargs):
"""
Required kwargs:
- name: str
- members: list of Characters. Expects at least 1 member
"""
super().__init__(**dict(kwargs, type="Team"))
self.name = kwargs["name"]
self.members = []
self.membersRemaining = []
for member in kwargs["members"]:
self.addMember(member)
self.addSerializedAttributes("name", "members")
def __str__(self):
return self.name
def addMember(self, member: "Character"):
if member in self.members:
raise Exception(f'cannot add duplicate member {str(member)}')
member.team = self
self.members.append(member)
self.membersRemaining.append(member)
def getXpGiven(self)->int:
"""
provides how much XP this Team provides when encountered
"""
totalLevel = functools.reduce(lambda xp, member: member.level + xp, self.members, 0)
return int(10 * totalLevel / len(self.members))
def eachMember(self, consumer: "function(Character)"):
"""
calls the given consumer on each member of this Team
"""
for member in self.members:
consumer(member)
def eachMemberRemaining(self, consumer: "function(Character)"):
"""
calls the given consumer on each member of this Team who isn't out of
the game
"""
for member in self.membersRemaining:
consumer(member)
def getMembersRemaining(self)->"List<Character>":
"""
returns a shallow copy of this Team's remaining members
"""
return [member for member in self.membersRemaining]
def initForBattle(self):
"""
this method must be called at the start of each Battle
"""
self.membersRemaining.clear()
for member in self.members: # can't use lambda with "each" here
member.initForBattle()
self.membersRemaining.append(member)
self.updateMembersRemaining() # updates ordinals
def isDefeated(self)->bool:
return len(self.membersRemaining) == 0
def updateMembersRemaining(self)->"List<str>":
msgs = []
newList = []
nextOrdinal = 0 # records which index of the array each member is in
for member in self.membersRemaining:
if member.isKoed():
msgs.append(f'{member.name} is out of the game!')
else:
newList.append(member)
member.ordinal = nextOrdinal
nextOrdinal += 1
member.update()
self.membersRemaining = newList
return msgs
| python |
# tifffile/__main__.py
"""Tifffile package command line script."""
import sys
from .tifffile import main
sys.exit(main())
| python |
import pygame
import sys
import numpy as np
pygame.init()
WIDTH = 600
HEIGHT = 600
LINE_WIDTH = 15
WIN_LINE_WIDTH = 15
BOARD_ROWS = 3
BOARD_COLS = 3
SQUARE_SIZE = 200
CIRCLE_RADIUS = 60
CIRCLE_WIDTH = 15
CROSS_WIDTH = 25
SPACE = 55
BG_COLOR = (255,0,0)
LINE_COLOR = (0,0,0)
CIRCLE_COLOR = (239, 231, 200)
CROSS_COLOR = (0,0,0)
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'TIC TAC TOE' )
screen.fill( BG_COLOR )
board = np.zeros( (BOARD_ROWS, BOARD_COLS) )
def draw_lines():
pygame.draw.line( screen, LINE_COLOR, (0, SQUARE_SIZE), (WIDTH, SQUARE_SIZE), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (0, 2 * SQUARE_SIZE), (WIDTH, 2 * SQUARE_SIZE), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (SQUARE_SIZE, 0), (SQUARE_SIZE, HEIGHT), LINE_WIDTH )
pygame.draw.line( screen, LINE_COLOR, (2 * SQUARE_SIZE, 0), (2 * SQUARE_SIZE, HEIGHT), LINE_WIDTH )
def draw_figures():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 1:
pygame.draw.circle( screen, CIRCLE_COLOR, (int( col * SQUARE_SIZE + SQUARE_SIZE//2 ), int( row * SQUARE_SIZE + SQUARE_SIZE//2 )), CIRCLE_RADIUS, CIRCLE_WIDTH )
elif board[row][col] == 2:
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH )
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH )
def mark_square(row, col, player):
board[row][col] = player
def available_square(row, col):
return board[row][col] == 0
def is_board_full():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return False
return True
def check_win(player):
# vertical win check
for col in range(BOARD_COLS):
if board[0][col] == player and board[1][col] == player and board[2][col] == player:
draw_vertical_winning_line(col, player)
return True
# horizontal win check
for row in range(BOARD_ROWS):
if board[row][0] == player and board[row][1] == player and board[row][2] == player:
draw_horizontal_winning_line(row, player)
return True
# asc diagonal win check
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
draw_asc_diagonal(player)
return True
# desc diagonal win chek
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
draw_desc_diagonal(player)
return True
return False
def draw_vertical_winning_line(col, player):
posX = col * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (posX, 15), (posX, HEIGHT - 15), LINE_WIDTH )
def draw_horizontal_winning_line(row, player):
posY = row * SQUARE_SIZE + SQUARE_SIZE//2
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, posY), (WIDTH - 15, posY), WIN_LINE_WIDTH )
def draw_asc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, HEIGHT - 15), (WIDTH - 15, 15), WIN_LINE_WIDTH )
def draw_desc_diagonal(player):
if player == 1:
color = CIRCLE_COLOR
elif player == 2:
color = CROSS_COLOR
pygame.draw.line( screen, color, (15, 15), (WIDTH - 15, HEIGHT - 15), WIN_LINE_WIDTH )
def restart():
screen.fill( BG_COLOR )
draw_lines()
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = 0
draw_lines()
player = 1
game_over = False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN and not game_over:
mouseX = event.pos[0] # x
mouseY = event.pos[1] # y
clicked_row = int(mouseY // SQUARE_SIZE)
clicked_col = int(mouseX // SQUARE_SIZE)
if available_square( clicked_row, clicked_col ):
mark_square( clicked_row, clicked_col, player )
if check_win( player ):
game_over = True
player = player % 2 + 1
draw_figures()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
player = 1
game_over = False
pygame.display.update() | python |
from nnrecsys.models.metrics import mean_reciprocal_rank
import tensorflow as tf
def model_fn(features, labels, mode, params):
print(features)
input_layer, sequence_length = tf.contrib.feature_column.sequence_input_layer(features, params['feature_columns'])
with tf.name_scope('encoder'):
def rnn_cell():
with tf.name_scope('recurrent_layer'):
cell = tf.nn.rnn_cell.GRUCell(params['rnn_units'], activation=params['hidden_activation'])
drop_cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=params['dropout'])
return drop_cell
stacked_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell() for _ in range(params['rnn_layers'])])
x, states = tf.nn.dynamic_rnn(stacked_cell,
inputs=input_layer,
dtype=tf.float32,
sequence_length=sequence_length)
tf.summary.histogram('rnn_outputs', x)
tf.summary.histogram('rnn_state', states)
for variable in stacked_cell.variables:
tf.summary.histogram('gru_vars/' + variable.name, variable)
logits = tf.layers.dense(x, params['n_items'], activation=None)
if mode == tf.estimator.ModeKeys.PREDICT:
scores, predicted_items = tf.nn.top_k(logits,
k=params['k'],
sorted=True,
name='top_k')
predictions = {
'scores': scores,
'item_ids': predicted_items,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
padding_mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
loss = tf.contrib.seq2seq.sequence_loss(logits, labels, weights=padding_mask, name='seq_loss')
recall_at_k = tf.metrics.recall_at_k(labels, logits, name='recall_at_k', k=params['k'])
reshaped_logits = tf.reshape(logits, (-1, logits.shape[-1]))
reshaped_labels = tf.reshape(labels, (-1,))
one_hot_labels = tf.one_hot(reshaped_labels, depth=logits.shape[-1])
mrr = mean_reciprocal_rank(one_hot_labels, reshaped_logits, topn=params['k'], name='mrr_at_k')
metrics = {'recall_at_k': recall_at_k, 'mrr': mrr}
tf.summary.scalar('recall_at_k', recall_at_k[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
| python |
import numpy as np
import torch
def default_collate_fn(batch):
batch, targets = zip(*batch)
batch = np.stack(batch, axis=0).astype(np.float32)
batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()
for i, target in enumerate(targets):
for k, v in target.items():
if isinstance(v, np.ndarray):
targets[i][k] = torch.from_numpy(v)
return batch, targets
| python |
from random import randint
from django.contrib.auth.models import User
from .models import Analytic, Group
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR")
return ip
def log_analytic(request):
new_analytic = Analytic(
querystring=request.GET.urlencode(),
ip=get_client_ip(request),
path=request.path,
)
if request.user.is_authenticated:
new_analytic.user = User.objects.get(id=request.user.id)
new_analytic.save()
def get_group_route():
new_route = 0
while True:
route_candidate = randint(1000, 9999)
if not Group.objects.filter(route=route_candidate).exists():
new_route = route_candidate
break
return str(new_route)
| python |
from __future__ import absolute_import, print_function, division
import os
import numpy
import theano
from theano.compat import PY3
from theano import config
from theano.compile import DeepCopyOp
from theano.misc.pkl_utils import CompatUnpickler
from .config import test_ctx_name
from .test_basic_ops import rand_gpuarray
from ..type import GpuArrayType, gpuarray_shared_constructor
import pygpu
def test_deep_copy():
a = rand_gpuarray(20, dtype='float32')
g = GpuArrayType(dtype='float32', broadcastable=(False,))('g')
f = theano.function([g], g)
assert isinstance(f.maker.fgraph.toposort()[0].op, DeepCopyOp)
res = f(a)
assert GpuArrayType.values_eq(res, a)
def test_values_eq_approx():
a = rand_gpuarray(20, dtype='float32')
assert GpuArrayType.values_eq_approx(a, a)
b = a.copy()
b[0] = numpy.asarray(b[0]) + 1.
assert not GpuArrayType.values_eq_approx(a, b)
b = a.copy()
b[0] = -numpy.asarray(b[0])
assert not GpuArrayType.values_eq_approx(a, b)
def test_specify_shape():
a = rand_gpuarray(20, dtype='float32')
g = GpuArrayType(dtype='float32', broadcastable=(False,))('g')
f = theano.function([g], theano.tensor.specify_shape(g, [20]))
f(a)
def test_filter_float():
theano.compile.shared_constructor(gpuarray_shared_constructor)
try:
s = theano.shared(numpy.array(0.0, dtype='float32'),
target=test_ctx_name)
theano.function([], updates=[(s, 0.0)])
finally:
del theano.compile.sharedvalue.shared.constructors[-1]
def test_unpickle_gpuarray_as_numpy_ndarray_flag0():
""" Test when pygpu isn't there for unpickle are in test_pickle.py"""
oldflag = config.experimental.unpickle_gpu_on_cpu
config.experimental.unpickle_gpu_on_cpu = False
try:
testfile_dir = os.path.dirname(os.path.realpath(__file__))
fname = 'GpuArray.pkl'
with open(os.path.join(testfile_dir, fname), 'rb') as fp:
if PY3:
u = CompatUnpickler(fp, encoding="latin1")
else:
u = CompatUnpickler(fp)
mat = u.load()
assert isinstance(mat, pygpu.gpuarray.GpuArray)
assert numpy.asarray(mat)[0] == -42.0
finally:
config.experimental.unpickle_gpu_on_cpu = oldflag
| python |
import os
import sys
myfolder = os.path.dirname(os.path.abspath(__file__))
def rpienv_source():
import subprocess
if not os.path.exists(str(myfolder) + '/.rpienv'):
print("[ ENV ERROR ] " + str(myfolder) + "/.rpienv path not exits!")
sys.exit(1)
command = ['bash', '-c', 'source ' + str(myfolder) + '/.rpienv -s && env']
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
for line in proc.stdout:
if type(line) is bytes:
line = line.decode("utf-8")
try:
name = line.partition("=")[0]
value = line.partition("=")[2]
if type(value) is unicode:
value = value.encode('ascii','ignore')
value = value.rstrip()
os.environ[name] = value
except Exception as e:
if "name 'unicode' is not defined" != str(e):
print(e)
proc.communicate()
rpienv_source()
lib_path = os.path.join(os.path.dirname(os.environ['CONFIGHANDLERPY']))
sys.path.append(lib_path)
import ConfigHandler
SECTION = "HALARM"
CFG = None
def get_confighandler_object():
global CFG
if CFG is None:
CFG = ConfigHandler.init(validate_print=False)
return CFG
def get_HALARM_value_by_key(option):
global SECTION
cfg = get_confighandler_object()
value = cfg.get(SECTION, option, reparse=False)
return value
if __name__ == "__main__":
print(get_HALARM_value_by_key("cpu_max_temp_alarm_celsius"))
| python |
# ===============================================================================
# Copyright 2020 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from enable.component_editor import ComponentEditor
from traits.api import Instance, HasTraits, Float, List, Int, on_trait_change, Button
from traitsui.api import UItem, TableEditor, HGroup, HSplit
from traitsui.table_column import ObjectColumn
from pychron.canvas.canvas2D.irradiation_canvas import IrradiationCanvas
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import RestrictedStr
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.loggable import Loggable
class Position(HasTraits):
id = Int
x = Float
y = Float
radius = Float(0.1)
def totuple(self):
return self.x, self.y, self.radius, str(self.id)
def dump(self, inches=False):
x, y = self.x, self.y
if inches:
x /= 25.4
y /= 25.4
return '{},{:0.5f},{:0.5f}'.format(self.id, x, y)
class TrayMaker(Loggable):
canvas = Instance(IrradiationCanvas, ())
positions = List
add_position_button = Button
refresh_button = Button
names = List
name = RestrictedStr(name='names')
save_button = Button
def gen(self):
rows = [(5, -2),
(9, -4),
(13, -6),
(15, -7),
(17, -8),
(19, -9),
(19, -9),
(21, -10),
(21, -10),
(23, -11),
(23, -11),
(23, -11),
(23, -11),
(23, -11),
(21, -10),
(21, -10),
(19, -9),
(19, -9),
(17, -8),
(15, -7),
(13, -6),
(9, -4),
(5, -2)]
space = 2
oy = 24
ps = []
for ri, (rc, ox) in enumerate(rows):
y = oy - ri * space
for ji in range(rc):
x = (ox * space) + ji * space
p = Position(x=x, y=y, radius=1)
ps.append(p)
print(x, y)
self.positions = ps
def holes(self):
return [p.totuple() for p in self.positions]
def _add_position_button_fired(self):
p = Position()
self.positions.append(p)
def _save_button_fired(self):
out = 'out.txt'
with open(out, 'w') as wfile:
wfile.write('circle, 0.02\n')
wfile.write('\n\n')
for p in self.positions:
wfile.write('{}\n'.format(p.dump('inches')))
@on_trait_change('positions[], positions:[x,y]')
def _positions_changed(self):
for i, p in enumerate(self.positions):
p.id = i + 1
self.canvas.load_scene(self.holes())
self.canvas.invalidate_and_redraw()
def traits_view(self):
cols = [ObjectColumn(name='id'),
ObjectColumn(name='x'),
ObjectColumn(name='y')]
v = okcancel_view(HGroup(icon_button_editor('add_position_button', 'add'),
icon_button_editor('save_button', 'save')),
UItem('name'),
HSplit(UItem('positions', width=0.25,
editor=TableEditor(columns=cols)),
UItem('canvas',
width=0.75,
editor=ComponentEditor())),
width=900,
height=900,
)
return v
if __name__ == '__main__':
t = TrayMaker()
t.gen()
t.names = ['a', 'bc']
t.configure_traits()
# ============= EOF =============================================
| python |
from lib import action
class ConsulParseNodesAction(action.ConsulBaseAction):
def run(self, data):
nodes = []
# Loop through the keys, and return the needful
return nodes
| python |
from fastapi import FastAPI
import routes
from middleware import auth_check
from starlette.middleware.base import BaseHTTPMiddleware
app = FastAPI()
# TO RUN THE APP SPECIFY THIS INSTANCE OF THE FastApi class
# uvicorn file_name:instance name --reload
app.include_router(routes.router)
app.add_middleware(BaseHTTPMiddleware, dispatch=auth_check)
| python |
import os
import os.path as op
from ..externals.six.moves import cPickle as pickle
import glob
import warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_true, assert_raises
from mne.datasets import sample
from mne import (label_time_courses, read_label, stc_to_label,
read_source_estimate, read_source_spaces, grow_labels,
labels_from_parc, parc_from_labels)
from mne.label import Label
from mne.utils import requires_mne, run_subprocess, _TempDir, requires_sklearn
from mne.fixes import in1d
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
src_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis-eeg-oct-6p-fwd.fif')
test_path = op.join(op.split(__file__)[0], '..', 'fiff', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
tempdir = _TempDir()
# This code was used to generate the "fake" test labels:
#for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
def assert_labels_equal(l0, l1, decimal=5):
for attr in ['comment', 'hemi', 'subject']:
assert_true(getattr(l0, attr) == getattr(l1, attr))
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_label_subject():
"""Test label subject name extraction
"""
label = read_label(label_fname)
assert_true(label.subject is None)
assert_true('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert_true(label.subject == 'fsaverage')
assert_true('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition
"""
pos = np.random.rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh')
assert len(l0) == len(idx0)
# adding non-overlapping labels
l01 = l0 + l1
assert len(l01) == len(l0) + len(l1)
assert_array_equal(l01.values[:len(l0)], l0.values)
# adding overlappig labels
l = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l.vertices == 6)[0][0]
assert l.values[i] == l0.values[i0] + l2.values[i2]
assert l.values[0] == l0.values[0]
assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
# adding lh and rh
l2.hemi = 'rh'
# this now has deprecated behavior
bhl = l0 + l2
assert bhl.hemi == 'both'
assert len(bhl) == len(l0) + len(l2)
bhl = l1 + bhl
assert_labels_equal(bhl.lh, l01)
@sample.requires_sample_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files
"""
values, times, vertices = label_time_courses(real_label_fname, stc_fname)
assert_true(len(times) == values.shape[1])
assert_true(len(vertices) == values.shape[0])
def test_label_io():
"""Test IO of label files
"""
label = read_label(label_fname)
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Make sure two sets of labels are equal"""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert_true(label_a.name == label_b.name)
assert_true(label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@sample.requires_sample_data
def test_labels_from_parc():
"""Test reading labels from FreeSurfer parcellation
"""
# test some invalid inputs
assert_raises(ValueError, labels_from_parc, 'sample', hemi='bla',
subjects_dir=subjects_dir)
assert_raises(ValueError, labels_from_parc, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh, colors_lh = labels_from_parc('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert_true(label.name.endswith('-lh'))
assert_true(label.hemi == 'lh')
assert_true(len(labels_lh) == len(colors_lh))
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh, colors_rh = labels_from_parc('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
assert_true(len(labels_rh) == len(colors_rh))
for label in labels_rh:
assert_true(label.name.endswith('-rh'))
assert_true(label.hemi == 'rh')
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)
assert_true(len(labels_both) == len(colors))
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert_true(len(labels_both) == 68)
# test regexp
label = labels_from_parc('sample', parc='aparc.a2009s', regexp='Angu',
subjects_dir=subjects_dir)[0][0]
assert_true(label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = labels_from_parc('sample', parc='aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0][0]
assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
assert_raises(RuntimeError, labels_from_parc, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@sample.requires_sample_data
@requires_mne
def test_labels_from_parc_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels
"""
def _mne_annot2labels(subject, subjects_dir, parc):
"""Get labels using mne_annot2lables"""
label_dir = _TempDir()
cwd = os.getcwd()
try:
os.chdir(label_dir)
env = os.environ.copy()
env['SUBJECTS_DIR'] = subjects_dir
cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
run_subprocess(cmd, env=env)
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels = [read_label(fname) for fname in label_fnames]
finally:
del label_dir
os.chdir(cwd)
return labels
labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@sample.requires_sample_data
def test_parc_from_labels():
"""Test writing FreeSurfer parcellation from labels"""
labels, colors = labels_from_parc('sample', subjects_dir=subjects_dir)
# write left and right hemi labels:
fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]
for fname in fnames:
parc_from_labels(labels, colors, annot_fname=fname)
# read it back
labels2, colors2 = labels_from_parc('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22, colors22 = labels_from_parc('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
colors2.extend(colors22)
names = [label.name for label in labels2]
for label, color in zip(labels, colors):
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
assert_array_almost_equal(np.array(color), np.array(colors2[idx]))
# make sure we can't overwrite things
assert_raises(ValueError, parc_from_labels, labels, colors,
annot_fname=fnames[0])
# however, this works
parc_from_labels(labels, colors=None, annot_fname=fnames[0],
overwrite=True)
# test some other invalid inputs
assert_raises(ValueError, parc_from_labels, labels[:-1], colors,
annot_fname=fnames[0], overwrite=True)
colors2 = np.asarray(colors)
assert_raises(ValueError, parc_from_labels, labels, colors2[:, :3],
annot_fname=fnames[0], overwrite=True)
colors2[0] = 1.1
assert_raises(ValueError, parc_from_labels, labels, colors2,
annot_fname=fnames[0], overwrite=True)
@sample.requires_sample_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label
"""
src = read_source_spaces(src_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = stc_to_label(stc, src='sample', smooth=3)
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels2 = stc_to_label(stc, src=src, smooth=3)
assert_true(len(w) == 1)
assert_true(len(labels1) == len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=3,
connected=True)
assert_true(len(w) == 1)
assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=3,
connected=True)
assert_true(len(labels_lh) == 1)
assert_true(len(labels_rh) == 1)
@sample.requires_sample_data
def test_morph():
"""Test inter-subject label morphing
"""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label.morph(None, 'fsaverage', 5, grade, subjects_dir, 2,
copy=False)
label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2,
copy=False)
assert_true(np.mean(in1d(label_orig.vertices, label.vertices)) == 1.0)
assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
label.morph(label.subject, 'fsaverage', 5,
[np.arange(10242), np.arange(10242)], subjects_dir, 2,
copy=False)
# subject name should be inferred now
label.smooth(subjects_dir=subjects_dir)
@sample.requires_sample_data
def test_grow_labels():
"""Test generation of circular source labels"""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
labels = grow_labels('sample', seeds, 3, hemis, n_jobs=2)
for label, seed, hemi, sh in zip(labels, seeds, hemis, should_be_in):
assert(np.any(label.vertices == seed))
assert np.all(in1d(sh, label.vertices))
if hemi == 0:
assert(label.hemi == 'lh')
else:
assert(label.hemi == 'rh')
@sample.requires_sample_data
def test_label_time_course():
"""Test extracting label data from SourceEstimate"""
values, times, vertices = label_time_courses(real_label_fname, stc_fname)
stc = read_source_estimate(stc_fname)
label_lh = read_label(real_label_fname)
stc_lh = stc.in_label(label_lh)
assert_array_almost_equal(stc_lh.data, values)
assert_array_almost_equal(stc_lh.times, times)
assert_array_almost_equal(stc_lh.vertno[0], vertices)
label_rh = read_label(real_label_rh_fname)
stc_rh = stc.in_label(label_rh)
label_bh = label_rh + label_lh
stc_bh = stc.in_label(label_bh)
assert_array_equal(stc_bh.data, np.vstack((stc_lh.data, stc_rh.data)))
| python |
from django.contrib import admin
from .models import MataKuliah, Tugas
# Register your models here.
admin.site.register(MataKuliah)
admin.site.register(Tugas)
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-02-16 14:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('folio', '0007_auto_20200216_1720'),
]
operations = [
migrations.AddField(
model_name='profile',
name='profile_pic',
field=models.ImageField(default='kent.jpg', upload_to='pictures/'),
),
migrations.AddField(
model_name='project',
name='project_pic',
field=models.ImageField(default='kent.jpg', upload_to='pictures/'),
),
]
| python |
@bot.command(brief="Kicks a server member", description="b!kick <member> [reason]")
@commands.has_permissions(kick_members=True)
async def kick(ctx, member: discord.Member, *, reason=None):
try:
await member.kick(reason=reason)
await ctx.send(f'User {member} has been kicked.')
except:
await ctx.send("The bot has missing permissions\n\nMake sure the Bot's top-most role is above the member's top-most role (the member who you are going to kick)")
| python |
#!/usr/bin/env python3
""" Python module to assist creating and maintaining docker openHab stacks."""
import crypt
from enum import Enum
from typing import NamedTuple
import logging
import os
import sys
import json as pyjson
from hashlib import md5
from shutil import copy2
from subprocess import PIPE, run
from time import sleep
import bcrypt
import docker
import questionary as qust
from ruamel.yaml import YAML
from prompt_toolkit.styles import Style
# Configure YAML
yaml = YAML()
yaml.indent(mapping=4, sequence=4, offset=2)
# Log level during development is info
logging.basicConfig(level=logging.WARNING)
# Prompt style
st = Style([
('qmark', 'fg:#00c4b4 bold'), # token in front of question
('question', 'bold'), # question text
('answer', 'fg:#00c4b4 bold'), # submitted answer question
('pointer', 'fg:#00c4b4 bold'), # pointer for select and checkbox
('selected', 'fg:#00c4b4'), # selected item checkbox
('separator', 'fg:#00c4b4'), # separator in lists
('instruction', '') # user instructions for selections
])
# ******************************
# Constants <<<
# ******************************
# Directories for config generation
CUSTOM_DIR = 'custom_configs'
TEMPLATE_DIR = 'template_configs'
COMPOSE_NAME = 'docker-stack.yml'
SKELETON_NAME = 'docker-skeleton.yml'
TEMPLATES_NAME = 'docker-templates.yml'
CONFIG_DIRS = ['mosquitto', 'nodered', 'ssh', 'filebrowser',
'traefik', 'volumerize', 'postgres', 'pb-framr']
TEMPLATE_FILES = [
'mosquitto/mosquitto.conf', 'nodered/nodered_package.json',
'pb-framr/logo.svg', 'nodered/nodered_settings.js',
'ssh/sshd_config', 'traefik/traefik.toml'
]
EDIT_FILES = {
"mosquitto_passwords": "mosquitto/mosquitto_passwords",
"sftp_users": "ssh/sftp_users.conf",
"traefik_users": "traefik/traefik_users",
"id_rsa": "ssh/id_rsa",
"host_key": "ssh/ssh_host_ed25519_key",
"known_hosts": "ssh/known_hosts",
"backup_config": "volumerize/backup_config",
"postgres_user": "postgres/user",
"postgres_passwd": "postgres/passwd",
"pb_framr_pages": "pb-framr/pages.json",
"filebrowser_conf": "filebrowser/filebrowser.json"
}
CONSTRAINTS = {"building": "node.labels.building"}
# Default Swarm port
SWARM_PORT = 2377
# UID for admin
UID = 9001
# Username for admin
ADMIN_USER = 'ohadmin'
# USB DEVICES (e.g. Zwave stick)
USB_DEVICES = [{
"name": "Aeotec Z-Stick Gen5 (ttyACM0)",
"value": "zwave_stick"
}]
class ServiceBody(NamedTuple):
fullname: str
prefix: str
additional: bool
frontend: bool
sftp: bool = False
icon: str = None
class Service(ServiceBody, Enum):
SFTP = ServiceBody("SFTP", "sftp", False, False)
OPENHAB = ServiceBody("OpenHAB", "openhab", True,
True, icon='dashboard', sftp=True)
NODERED = ServiceBody("Node-RED", "nodered", False,
True, icon='ballot', sftp=True)
POSTGRES = ServiceBody("Postgre SQL", "postgres", True, False)
MQTT = ServiceBody("Mosquitto MQTT Broker", "mqtt", True, False)
FILES = ServiceBody("File Manager", "files", False, True, icon='folder')
BACKUP = ServiceBody("Volumerize Backups", "backup",
False, False, sftp=True)
@classmethod
def service_by_prefix(cls, prefix):
# cls here is the enumeration
return next(service for service in cls if service.prefix == prefix)
# >>>
# ******************************
# State Variables <<<
# ******************************
base_dir = sys.path[0]
template_path = f'{base_dir}/{TEMPLATE_DIR}'
custom_path = f'{base_dir}/{CUSTOM_DIR}'
# >>>
# ******************************
# Compose file functions <<<
# ******************************
# Functions to generate initial file
def generate_initial_compose():
"""Creates the initial compose using the skeleton
"""
# compose file
compose = custom_path + '/' + COMPOSE_NAME
# skeleton file
skeleton = template_path + '/' + SKELETON_NAME
with open(skeleton, 'r') as skeleton_f, open(compose, 'w+') as compose_f:
init_content = yaml.load(skeleton_f)
yaml.dump(init_content, compose_f)
def add_sftp_service(building, number=0):
"""Generates an sftp entry and adds it to the compose file
:building: names of building that the services is added to
:number: increment of exposed port to prevent overlaps
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'sftp_{building}'
# template
template = get_service_template(Service.SFTP.prefix)
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
template['ports'] = [f'{2222 + number}:22']
# attach volumes
volume_base = '/home/ohadmin/'
template['volumes'] = get_attachable_volume_list(volume_base, building)
add_or_update_compose_service(compose_path, service_name, template)
def add_openhab_service(building, host):
"""Generates an openhab entry and adds it to the compose file
:building: name of building that the services is added to
:host: host the building is added to, used for routing
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'openhab_{building}'
# template
template = get_service_template(Service.OPENHAB.prefix)
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
# include in backups of this building
template['deploy']['labels'].append(f'backup={building}')
# traefik backend
template['deploy']['labels'].append(f'traefik.backend={service_name}')
# traefik frontend domain->openhab
template['deploy']['labels'].extend(
generate_traefik_host_labels(host, segment='main'))
# traefik frontend subdomain openhab_hostname.* -> openhab
template['deploy']['labels'].append(
f'traefik.sub.frontend.rule=HostRegexp:'
f'{service_name}.{{domain:[a-zA-z0-9-]+}}')
template['deploy']['labels'].append('traefik.sub.frontend.priority=2')
# replace volumes with named entries in template
template['volumes'] = generate_named_volumes(
template['volumes'], service_name, compose_path)
add_or_update_compose_service(compose_path, service_name, template)
def move_openhab_service(building, new_host):
"""Updates an openhab entry to be accessible on another host
:building: name of building that the services is uses
:host: host the building service is moved to, used for routing
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'openhab_{building}'
# template
entry = get_service_entry(service_name)
# traefik remove old domain by filtering
old_labels = entry['deploy']['labels']
filtered_labels = [
l for l in old_labels
if not l.startswith('traefik.main.frontend')]
# traefik frontend new_domain->openhab
filtered_labels.extend(
generate_traefik_host_labels(new_host, segment='main'))
entry['deploy']['labels'] = filtered_labels
add_or_update_compose_service(compose_path, service_name, entry)
def add_nodered_service(building):
"""Generates an nodered entry and adds it to the compose file
:building: name of building that the services is added to
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'nodered_{building}'
# template
template = get_service_template(Service.NODERED.prefix)
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
template['deploy']['labels'].append(f'traefik.backend={service_name}')
template['deploy']['labels'].append(f'backup={building}')
template['deploy']['labels'].extend(
generate_traefik_path_labels(service_name, segment='main'))
template['deploy']['labels'].extend(
generate_traefik_subdomain_labels(service_name, segment='sub'))
# replace volumes with named entries in template
template['volumes'] = generate_named_volumes(
template['volumes'], service_name, compose_path)
add_or_update_compose_service(compose_path, service_name, template)
def add_mqtt_service(building, number=0):
"""Generates an mqtt entry and adds it to the compose file
:building: name of building that the services is added to
:number: increment of exposed port to prevent overlaps
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'mqtt_{building}'
# template
template = get_service_template(Service.MQTT.prefix)
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
# ports incremented by number of services
template['ports'] = [f'{1883 + number}:1883', f'{9001 + number}:9001']
# replace volumes with named entries in template
template['volumes'] = generate_named_volumes(
template['volumes'], service_name, compose_path)
add_or_update_compose_service(compose_path, service_name, template)
def add_postgres_service(building, postfix=None):
"""Generates an postgres entry and adds it to the compose file
:building: name of building that the services is added to
:postfix: an identifier for this service
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# use building as postfix when empty
if postfix is None:
service_name = f'postgres_{building}'
else:
service_name = f'postgres_{postfix}'
# template
template = get_service_template(Service.POSTGRES.prefix)
# only label constraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
# replace volumes with named entries in template
template['volumes'] = generate_named_volumes(
template['volumes'], service_name, compose_path)
add_or_update_compose_service(compose_path, service_name, template)
def add_file_service(building):
"""Generates a file manager entry and adds it to the compose file
:building: names of host that the services is added to
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'{Service.FILES.prefix}_{building}'
# template
template = get_service_template(Service.FILES.prefix)
# add command that sets base url
template['command'] = f'-b /{service_name}'
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
template['deploy']['labels'].append(f'traefik.backend={service_name}')
template['deploy']['labels'].extend(
generate_traefik_path_labels(service_name, segment='main',
redirect=False))
# attach volumes
volume_base = '/srv/'
template['volumes'] = get_attachable_volume_list(volume_base, building)
add_or_update_compose_service(compose_path, service_name, template)
def add_volumerize_service(building):
"""Generates a volumerize backup entry and adds it to the compose file
:building: names of host that the services is added to
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# service name
service_name = f'{Service.BACKUP.prefix}_{building}'
# template
template = get_service_template(Service.BACKUP.prefix)
# only label contraint is building
template['deploy']['placement']['constraints'][0] = (
f"{CONSTRAINTS['building']} == {building}")
# attach volumes
volume_base = '/source/'
template['volumes'].extend(
get_attachable_volume_list(volume_base, building))
# adjust config
config_list = template['configs']
# get backup entry from configs
index, entry = next((i, c) for i, c in enumerate(config_list)
if c['source'] == 'backup_config')
entry['source'] = f'backup_config_{building}'
template['configs'][index] = entry
add_or_update_compose_service(compose_path, service_name, template)
# Functions to delete services
def delete_service(service_name):
"""Deletes a service from the compose file
:returns: list of current services
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
with open(compose_path, 'r+') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# generate list of names
compose['services'].pop(service_name, None)
# start writing from file start
compose_f.seek(0)
# write new compose content
yaml.dump(compose, compose_f)
# reduce file to new size
compose_f.truncate()
# Functions to extract information
def get_current_services(placement=None):
"""Gets a list of currently used services may be restricted to a placement
:placement: placement contraint the service shall match
:returns: list of current services
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
with open(compose_path, 'r') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# generate list of names
service_names = []
for (name, entry) in compose['services'].items():
if placement is None or get_building_of_entry(entry) == placement:
service_names.append(name)
return service_names
def get_current_building_constraints():
"""Gets a list of currently used building constraints
:returns: set of current buildings
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
with open(compose_path, 'r') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# generate list of buildings
building_names = set()
for (name, entry) in compose['services'].items():
building = get_building_of_entry(entry)
if building:
building_names.add(building)
return building_names
def get_building_of_entry(service_dict):
"""Extract the configured building constraint from an yaml service entry
:service_dict: service dict from yaml
:returns: building that is set
"""
# get constraints
constraint_list = service_dict['deploy']['placement']['constraints']
# convert them to dicts
label_dict = {i.split("==")[0].strip(): i.split("==")[1].strip()
for i in constraint_list}
return label_dict.get('node.labels.building')
def get_service_entry_info(service_entry):
"""Gets service name and instance of a service entry
:service_entry: service entry name
:return: tuple with service_name and instance name
"""
entry_split = service_entry.split("_")
name = entry_split[0]
instance = entry_split[1]
return name, instance
def get_service_volumes(service_name):
"""Gets a list of volumes of a service
:returns: list of volumes
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
with open(compose_path, 'r') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# load service
service = compose['services'].get(service_name)
# extract volume names
volume_dict = yaml_list_to_dict(service['volumes'])
volumes = list(volume_dict.keys())
# filter only named volumes
named_volumes = [v for v in volumes if '/' not in v]
return named_volumes
# Helper functions
def get_attachable_volume_list(volume_base, building):
"""Get a list of volumes from a building that can be attatched for file acccess
:volume_base: Base path of volumes
:building: building to consider
:returns: list of attachable volume entries
"""
volume_list = []
host_services = get_current_services(building)
for host_service in host_services:
name, instance = get_service_entry_info(host_service)
volume_service = Service.service_by_prefix(name)
# only apply to services that want their volumes attatched
if volume_service.sftp:
volumes = get_service_volumes(host_service)
# collect volumes not already in list
vlist = [
f'{v}:{volume_base}{v}' for v in volumes
if f'{v}:{volume_base}{v}' not in volume_list]
volume_list.extend(vlist)
return volume_list
def generate_named_volumes(template_volume_list, service_name, compose_path):
"""Generates volumes including name of services and ads them to
the compose file
:template_volume_list: List of volume entries from template
:service_name: Name of the service instance
:compose_path: path to compose file
:returns: list of named entries
"""
volume_entries = yaml_list_to_dict(template_volume_list)
# add name to entries (that are named volumes
named_volume_entries = {}
for (volume, target) in volume_entries.items():
if "/" not in volume:
named_volume_entries[f"{service_name}_{volume}"] = target
else:
named_volume_entries[f"{volume}"] = target
for (volume, target) in named_volume_entries.items():
# declare volume if it is a named one
if "/" not in volume:
add_volume_entry(compose_path, volume)
return dict_to_yaml_list(named_volume_entries)
def yaml_list_to_dict(yaml_list):
"""Converts a yaml list (volumes, configs etc) into a python dict
:yaml_list: list of a yaml containing colon separated entries
:return: python dict
"""
return {i.split(":")[0]: i.split(":")[1] for i in yaml_list}
def dict_to_yaml_list(pdict):
"""Converts a python dict into a yaml list (volumes, configs etc)
:pdict: python dict
:return: list of a yaml containing colon separated entries
"""
return [f'{k}:{v}' for (k, v) in pdict.items()]
def get_service_entry(service_name):
"""Gets a service entry from the compose yaml
:return: yaml entry of a service
"""
# compose file
compose_path = f'{custom_path}/{COMPOSE_NAME}'
with open(compose_path, 'r') as templates_file:
compose_content = yaml.load(templates_file)
return compose_content['services'][service_name]
def get_service_template(service_name):
"""Gets a service template entry from the template yaml
:return: yaml entry of a service
"""
templates = template_path + '/' + TEMPLATES_NAME
with open(templates, 'r') as templates_file:
template_content = yaml.load(templates_file)
return template_content['services'][service_name]
def generate_traefik_host_labels(hostname, segment=None, priority=1):
"""Generates a traefik path url with necessary redirects
:hostname: Hostname that gets assigned by the label
:segment: Optional traefik segment when using multiple rules
:priority: Priority of frontend rule
:returns: list of labels for traefik
"""
label_list = []
# check segment
segment = f'.{segment}' if segment is not None else ''
# fill list
label_list.append(
f'traefik{segment}.frontend.rule=HostRegexp:{{domain:{hostname}}}')
label_list.append(f'traefik{segment}.frontend.priority={priority}')
return label_list
def generate_traefik_subdomain_labels(subdomain, segment=None, priority=2):
"""Generates a traefik subdomain with necessary redirects
:subdomain: subdomain that will be assigned to a service
:segment: Optional traefik segment when using multiple rules
:priority: Priority of frontend rule
:returns: list of labels for traefik
"""
label_list = []
# check segment
segment = f'.{segment}' if segment is not None else ''
# fill list
label_list.append(
f'traefik{segment}.frontend.rule='
f'HostRegexp:{subdomain}.{{domain:[a-zA-z0-9-]+}}')
label_list.append(f'traefik{segment}.frontend.priority={priority}')
return label_list
def generate_traefik_path_labels(url_path, segment=None, priority=2,
redirect=True):
"""Generates a traefik path url with necessary redirects
:url_path: path that should be used for the site
:segment: Optional traefik segment when using multiple rules
:priority: Priority of frontend rule
:redirect: Redirect to path with trailing slash
:returns: list of labels for traefik
"""
label_list = []
# check segment
segment = f'.{segment}' if segment is not None else ''
# fill list
label_list.append(f'traefik{segment}.frontend.priority={priority}')
if redirect:
label_list.append(
f'traefik{segment}.frontend.redirect.regex=^(.*)/{url_path}$$')
label_list.append(
f'traefik{segment}.frontend.redirect.replacement=$$1/{url_path}/')
label_list.append(
f'traefik{segment}.frontend.rule=PathPrefix:/{url_path};'
f'ReplacePathRegex:^/{url_path}/(.*) /$$1')
else:
label_list.append(
f'traefik{segment}.frontend.rule=PathPrefix:/{url_path}')
return label_list
def add_or_update_compose_service(compose_path, service_name, service_content):
"""Adds or replaces a service in a compose file
:compose_path: path of the compose file to change
:service_name: name of the service to add/replace
:service_content: service definition to add
"""
with open(compose_path, 'r+') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# add / update service with template
compose['services'][service_name] = service_content
# write content starting from first line
compose_f.seek(0)
# write new compose content
yaml.dump(compose, compose_f)
# reduce file to new size
compose_f.truncate()
def add_volume_entry(compose_path, volume_name):
"""Creates an additional volume entry in the stack file
:compose_path: path of the compose file to change
:volume_name: name of the additional volume
"""
with open(compose_path, 'r+') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# add volume
compose['volumes'][volume_name] = None
# write content starting from first line
compose_f.seek(0)
# write new compose content
yaml.dump(compose, compose_f)
# reduce file to new size
compose_f.truncate()
def add_config_entry(compose_path, config_name, config_path):
"""Creates an additional config entry in the stack file or updates it
:compose_path: path of the compose file to change
:config_name: name of the additional config
:config_path: path of the additional config
"""
with open(compose_path, 'r+') as compose_f:
# load compose file
compose = yaml.load(compose_f)
# add config
compose['configs'][config_name] = {"file": config_path}
# write content starting from first line
compose_f.seek(0)
# write new compose content
yaml.dump(compose, compose_f)
# reduce file to new size
compose_f.truncate()
# >>>
# ******************************
# Config file functions <<<
# ******************************
def generate_config_folders():
"""Generate folders for configuration files
"""
if not os.path.exists(custom_path):
os.makedirs(custom_path)
print(f'Initialize configuration in {custom_path}')
# generate empty config dirs
for d in CONFIG_DIRS:
new_dir = f'{custom_path}/{d}'
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# copy template configs
for template_file in TEMPLATE_FILES:
copy_template_config(template_file)
def copy_template_config(config_path):
"""Copies template configuration files into custom folder
:config_path: relative path of config to copy from template
"""
custom_config_path = f'{custom_path}/{config_path}'
template_config = f"{template_path}/{config_path}"
logging.info(
f'Copy {config_path} from {template_config} to {custom_path}')
copy2(template_config, custom_config_path)
def generate_mosquitto_user_line(username, password):
"""Generates a line for a mosquitto user with a crypt hashed password
:username: username to use
:password: password that will be hashed (SHA512)
:returns: a line as expected by mosquitto
"""
password_hash = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
line = f"{username}:{password_hash}"
return line
def generate_sftp_user_line(username, password, directories=None):
"""Generates a line for a sftp user with a hashed password
:username: username to use
:password: password that will be hashed (SHA512)
:directories: list of directories which the user should have
:returns: a line as expected by sshd
"""
# generate user line with hashed password
password_hash = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
line = f"{username}:{password_hash}:e:{UID}:{UID}"
# add directory entries when available
if directories:
# create comma separated string from list
dir_line = ','.join(d for d in directories)
line = f"{line}:{dir_line}"
return line
def generate_traefik_user_line(username, password):
"""Generates a line for a traefik user with a bcrypt hashed password
:username: username to use
:password: password that will be hashed (bcrypt)
:returns: a line as expected by traefik
"""
password_hash = get_bcrypt_hash(password)
line = f"{username}:{password_hash}"
return line
def generate_pb_framr_entry(building, host, service):
"""Generates a single entry of the framr file
:building: building this entry is intended for
:host: host this entry is intended for
:service: entry from service enum
:returns: a dict fitting the asked entry
"""
entry = {}
entry['title'] = service.fullname
if service == Service.OPENHAB:
entry['url'] = f'http://{host}/'
pass
else:
entry['url'] = f'/{service.prefix}_{building}/'
entry['icon'] = service.icon
return entry
def generate_mosquitto_file(username, password):
"""Generates a mosquitto password file using mosquitto_passwd system tool
:username: username to use
:password: password that will be used
"""
passwd_path = f"{custom_path}/{EDIT_FILES['mosquitto_passwords']}"
# ensure file exists
if not os.path.exists(passwd_path):
open(passwd_path, 'a').close()
# execute mosquitto passwd
mos_result = run(
['mosquitto_passwd', '-b', passwd_path, username, password],
universal_newlines=True)
return mos_result.returncode == 0
def generate_sftp_file(username, password, direcories=None):
"""Generates a sftp password file
:username: username to use
:password: password that will be used
:directories: list of directories which the user should have
"""
# generate line and save it into a file
file_content = generate_sftp_user_line(username, password, direcories)
create_or_replace_config_file(EDIT_FILES['sftp_users'], file_content)
def generate_postgres_files(username, password):
"""Generates postgres user and password files
:username: username to use
:password: password that will be used
"""
# content is purely username and (hashed) password
hashed_pass = (
f'md5{md5(username.encode() + password.encode()).hexdigest()}')
create_or_replace_config_file(EDIT_FILES['postgres_user'], username)
create_or_replace_config_file(EDIT_FILES['postgres_passwd'], hashed_pass)
def generate_id_rsa_files():
"""Generates id_rsa and id_rsa.pub private/public keys using ssh-keygen
"""
id_path = f"{custom_path}/{EDIT_FILES['id_rsa']}"
# execute ssh-keygen
id_result = run(
['ssh-keygen', '-m', 'PEM', '-t', 'rsa',
'-b', '4096', '-f', id_path, '-N', ''],
universal_newlines=True, stdout=PIPE)
return id_result.returncode == 0
def generate_host_key_files(hosts):
"""Generates ssh host keys and matching known_hosts using ssh-keygen
"""
key_path = f"{custom_path}/{EDIT_FILES['host_key']}"
# ssh-keygen generates public key with .pub postfix
pub_path = key_path + '.pub'
# host_names with sftp_ postfix
sftp_hosts = [f'sftp_{host}' for host in hosts]
# execute ssh-keygen
id_result = run(['ssh-keygen', '-t', 'ed25519', '-f', key_path, '-N', ''],
universal_newlines=True, stdout=PIPE)
# read content of public key as known line
known_line = ""
with open(pub_path, 'r') as pub_file:
pub_line = pub_file.readline()
split_line = pub_line.split()
# delete last list element
del split_line[-1]
# collect sftp hosts as comma separated string
hosts_line = ','.join(h for h in sftp_hosts)
split_line.insert(0, hosts_line)
# collect parts as space separated string
known_line = ' '.join(sp for sp in split_line)
# write new known_line file
create_or_replace_config_file(EDIT_FILES['known_hosts'], known_line)
return id_result.returncode == 0
def generate_filebrowser_file(username, password):
"""Generates a configuration for the filebrowser web app
:username: username to use
:password: password that will be used
"""
# generate line and save it into a file
file_content = {
"port": "80",
"address": "",
"username": f"{username}",
"password": f"{get_bcrypt_hash(password)}",
"log": "stdout",
"root": "/srv"
}
create_or_replace_config_file(EDIT_FILES['filebrowser_conf'],
file_content, json=True)
def generate_traefik_file(username, password):
"""Generates a traefik password file
:username: username to use
:password: password that will be used
"""
# generate line and save it into a file
file_content = generate_traefik_user_line(username, password)
create_or_replace_config_file(EDIT_FILES['traefik_users'], file_content)
def generate_volumerize_files(host_entries):
"""Generates config for volumerize backups
:host_entries: dickt of host entries
"""
compose_path = f'{custom_path}/{COMPOSE_NAME}'
# create one config per host
for h in host_entries:
configs = []
# Each host knows other hosts
for t in host_entries:
host_config = {
'description': f"'Backup Server on {t['building_name']}",
'url': f"sftp://ohadmin@sftp_{t['building_id']}:"
f"//home/ohadmin/backup_data/backup/{h['building_id']}"
}
configs.append(host_config)
config_file = f"{EDIT_FILES['backup_config']}_{h['building_id']}.json"
create_or_replace_config_file(config_file, configs, json=True)
add_config_entry(
compose_path,
f"backup_config_{h['building_id']}",
f"./{config_file}")
def generate_pb_framr_file(frames):
"""Generates config for pb framr landing page
:frames: a dict that contains hosts with matching name and services
"""
configs = []
for f in frames:
building = {
'instance': f['building_name'],
'entries': [generate_pb_framr_entry(f['building_id'], f['host'], s)
for s in f['services'] if s.frontend]
}
configs.append(building)
create_or_replace_config_file(
EDIT_FILES['pb_framr_pages'], configs, json=True)
def update_pb_framr_host(old_host, new_host):
"""Updates framr config to use changed host name
:old_host: old host that shall be replaced
:new_host: host that will be the new target
"""
configs = []
config_path = EDIT_FILES['pb_framr_pages']
custom_config_path = f'{custom_path}/{config_path}'
with open(custom_config_path, 'r') as file:
configs = pyjson.load(file)
for c in configs:
for e in c['entries']:
if e['url'] == f"http://{old_host}/":
e['url'] = f"http://{new_host}/"
if configs:
create_or_replace_config_file(
EDIT_FILES['pb_framr_pages'], configs, json=True)
def create_or_replace_config_file(config_path, content, json=False):
"""Creates or replaces a config file with new content
:config_path: relative path of config
:content: content of the file as a string
"""
custom_config_path = f'{custom_path}/{config_path}'
with open(custom_config_path, 'w+') as file:
if json:
import json
json.dump(content, file, indent=2)
else:
file.write(content)
# Functions to modify existing files
def add_user_to_traefik_file(username, password):
"""Adds or modifies user in traefik file
:username: username to use
:password: password that will be used
"""
# get current users
current_users = get_traefik_users()
# ensure to delete old entry if user exists
users = [u for u in current_users if u['username'] != username]
# collect existing users lines
user_lines = []
for u in users:
user_lines.append(f"{u['username']}:{u['password']}")
# add new/modified user
user_lines.append(generate_traefik_user_line(username, password))
# generate content
file_content = "\n".join(user_lines)
create_or_replace_config_file(EDIT_FILES['traefik_users'], file_content)
def remove_user_from_traefik_file(username):
"""Removes user from traefik file
:username: username to delete
"""
# get current users
current_users = get_traefik_users()
# ensure to delete entry if user exists
users = [u for u in current_users if u['username'] != username]
# collect other user lines
user_lines = []
for u in users:
user_lines.append(f"{u['username']}:{u['password']}")
# generate content and write file
file_content = "\n".join(user_lines)
create_or_replace_config_file(EDIT_FILES['traefik_users'], file_content)
# Functions to get content from files
def get_users_from_files():
"""Gets a list of users in files
:returns: list of users
"""
users = []
# add treafik users
users.extend([u['username'] for u in get_traefik_users()])
return users
def get_traefik_users():
"""Gets a list of dicts containing users and password hashes
:returns: list of users / password dicts
"""
users = []
# get treafik users
traefik_file = f"{custom_path}/{EDIT_FILES['traefik_users']}"
with open(traefik_file, 'r') as file:
lines = file.read().splitlines()
for line in lines:
# username in traefik file is first entry unitl colon
username = line.split(':')[0]
password = line.split(':')[1]
users.append({"username": username, "password": password})
return users
# Additional helper functions
def get_bcrypt_hash(password):
"""Returns bcrypt hash for a password
:password: password to hash
:returns: bcrypt hash of password
"""
return bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
# >>>
# ******************************
# Docker machine functions <<<
# ******************************
def get_machine_list():
"""Get a list of docker machine names using the docker-machine system command
:returns: a list of machine names managed by docker-machine
"""
machine_result = run(['docker-machine', 'ls', '-q'],
universal_newlines=True,
stdout=PIPE)
return machine_result.stdout.splitlines()
def check_machine_exists(machine_name):
"""Checks weather a docker machine exists and is available
:machine_name: Name of the machine to check
:returns: True when machine is available
"""
machines = get_machine_list()
return machine_name in machines
def get_machine_env(machine_name):
"""Gets dict of env settings from a machine
:machine_name: Name of the machine to check
:returns: Dict of env variables for this machine
"""
env_result = run(['docker-machine', 'env', machine_name],
universal_newlines=True,
stdout=PIPE)
machine_envs = {}
lines = env_result.stdout.splitlines()
for line in lines:
if 'export' in line:
assign = line.split('export ', 1)[1]
env_entry = [a.strip('"') for a in assign.split('=', 1)]
machine_envs[env_entry[0]] = env_entry[1]
return machine_envs
def get_machine_ip(machine_name):
"""Asks for the ip of the docker machine
:machine_name: Name of the machine to use for init
"""
machine_result = run(['docker-machine', 'ip', machine_name],
universal_newlines=True,
stdout=PIPE)
return machine_result.stdout.strip()
def init_swarm_machine(machine_name):
"""Creates a new swarm with the specified machine as leader
:machine_name: Name of the machine to use for init
:return: True if swarm init was successful
"""
machine_ip = get_machine_ip(machine_name)
init_command = 'docker swarm init --advertise-addr ' + machine_ip
init_result = run(['docker-machine', 'ssh', machine_name, init_command],
universal_newlines=True)
return init_result.returncode == 0
def join_swarm_machine(machine_name, leader_name):
"""Joins the swarm of the specified leader
:machine_name: Name of the machine to join a swarm
:leader_name: Name of the swarm leader machine
:return: True if join to swarm was successful
"""
token_command = 'docker swarm join-token manager -q'
token_result = run(['docker-machine', 'ssh', leader_name, token_command],
universal_newlines=True,
stdout=PIPE)
token = token_result.stdout.strip()
leader_ip = get_machine_ip(leader_name)
logging.info(f"Swarm leader with ip {leader_ip} uses token {token}")
join_cmd = f'docker swarm join --token {token} {leader_ip}:{SWARM_PORT}'
logging.info(f'Machine {machine_name} joins using command {join_cmd}')
join_result = run(['docker-machine', 'ssh', machine_name, join_cmd],
universal_newlines=True)
return join_result.returncode == 0
def generate_swarm(machines):
"""Generates a swarm, the first machine will be the initial leader
:machines: list of machines in the swarm
"""
leader = None
for machine in machines:
# init swarm with first machine
if leader is None:
leader = machine
print(f'Create initial swarm with leader {leader}')
if init_swarm_machine(leader):
print('Swarm init successful\n')
assign_label_to_node(leader, 'building',
leader, manager=leader)
else:
print(f'Machine {machine} joins swarm of leader {leader}')
if (join_swarm_machine(machine, leader)):
print('Joining swarm successful\n')
assign_label_to_node(machine, 'building',
machine, manager=leader)
def check_dir_on_machine(dirpath, machine):
"""Checks weather a dir exists on a machine
:dirpath: Directory to check
:machine: Machine to check
:returns: True when dir exists false otherwise
"""
check_command = f"[ -d {dirpath} ]"
check_result = run(['docker-machine', 'ssh', machine, check_command])
return check_result.returncode == 0
def check_file_on_machine(filepath, machine):
"""Checks weather a file exists on a machine
:filepath: File to check
:machine: Machine to check
:returns: True when file exists false otherwise
"""
check_command = f"[ -f {filepath} ]"
check_result = run(['docker-machine', 'ssh', machine, check_command])
return check_result.returncode == 0
def copy_files_to_machine(filepath, machine):
"""Copyies a directory and its content or a file to a machine
:filepath: Direcotry or file to copy
:machine: Machine to copy to
"""
run(['docker-machine', 'scp', '-r', filepath, f'{machine}:'])
def execute_command_on_machine(command, machine):
"""Executes a command on a docker machine
:command: Command to execute
:machine: Machine to execute command
"""
run([f'docker-machine ssh {machine} {command}'], shell=True)
# >>>
# ******************************
# Systemd functions <<<
# ******************************
def list_enabled_devices():
"""Presents a list of enabled devices (systemd services)
:returns: list of enabled devices
"""
list_result = run(['systemctl', 'list-units'],
stdout=PIPE, universal_newlines=True)
device_list = list_result.stdout.splitlines()
# Filter out only swarm-device services
device_list = [d.strip() for d in device_list if 'swarm-device' in d]
# Extract service name
device_list = [d.split()[0] for d in device_list]
return device_list
# >>>
# ******************************
# Docker client commands <<<
# ******************************
def deploy_docker_stack(machine):
"""Deploys the custom stack in the custom_path
:machine: Docker machine to execute command
"""
# Set CLI environment to target docker machine
machine_env = get_machine_env(machine)
os_env = os.environ.copy()
os_env.update(machine_env)
# Get compose file and start stack
compose_file = f'{custom_path}/{COMPOSE_NAME}'
deploy_command = f'docker stack deploy -c {compose_file} ohpb'
run([f'{deploy_command}'], shell=True, env=os_env)
def remove_docker_stack(machine):
"""Removes the custom stack in the custom_path
:machine: Docker machine to execute command
"""
# Set CLI environment to target docker machine
machine_env = get_machine_env(machine)
os_env = os.environ.copy()
os_env.update(machine_env)
remove_command = f'docker stack rm ohpb'
run([f'{remove_command}'], shell=True, env=os_env)
def resolve_service_nodes(service):
"""Returnes nodes running a specified service
:service: name or id of a service
:returns: list of nodes running the service
"""
node_result = run(['docker', 'service', 'ps', service,
'--format', '{{.Node}}',
'-f', 'desired-state=running'],
universal_newlines=True,
stdout=PIPE)
return node_result.stdout.splitlines()
def get_container_list(manager=None):
"""Return a list of containers running on a machine
:manager: Docker machine to use for command, otherwise local
:returns: list of containers
"""
client = get_docker_client(manager)
return [c.name for c in client.containers.list()]
def get_service_list(manager=None):
"""Return a list of services managed by a machine
:manager: Docker machine to use for command, otherwise local
:returns: list of services
"""
client = get_docker_client(manager)
return [s.name for s in client.services.list()]
def remove_label_from_nodes(label, value, manager=None):
"""Removes label with matching value from all nodes
:label: Label you want to remove
:value: The value to match before removing
:manager: Docker machine to use for command, otherwise local
:return: Nodes with removed label
"""
client = get_docker_client(manager)
nodes = client.nodes.list()
matching_nodes = [n for n in nodes
if label in n.attrs['Spec']['Labels']
and n.attrs['Spec']['Labels'][label] == value]
print(f'Matches {matching_nodes}')
for m in matching_nodes:
spec = m.attrs['Spec']
spec['Labels'].pop(label)
m.update(spec)
logging.info(f'Remove label {label} with value {value} from {m}')
client.close()
return [n.attrs['Description']['Hostname'] for n in matching_nodes]
def assign_label_to_node(nodeid, label, value, manager=None):
"""Assigns a label to a node (e.g. building)
:nodeid: Id or name of the node
:label: Label you want to add
:value: The value to assign to the label
:manager: Docker machine to use for command, otherwise local
"""
client = get_docker_client(manager)
node = client.nodes.get(nodeid)
spec = node.attrs['Spec']
spec['Labels'][label] = value
node.update(spec)
logging.info(f'Assign label {label} with value {value} to {nodeid}')
client.close()
def run_command_in_service(service, command, building=None):
"""Runs a command in a service based on its name.
When no matching container is found or the service name is ambigous
an error will be displayed and the function exits
:param service: Name of the service to execute command
:param command: Command to execute
:param building: Optional building, make service unambigous (Default: None)
"""
client = get_docker_client(building)
# Find containers matching name
service_name_filter = {"name": service}
containers = client.containers.list(filters=service_name_filter)
# Ensure match is unambigous
if (len(containers) > 1):
print(f'Found multiple containers matching service name {service}, '
'ensure service is unambigous')
elif (len(containers) < 1):
print(f'Found no matching container for service name {service}')
else:
service_container = containers[0]
print(f'Executing {command} in container {service_container.name}'
f'({service_container.id}) on building {building}\n')
command_exec = service_container.exec_run(command)
print(command_exec.output.decode())
client.close()
def get_docker_client(manager=None):
"""Returns docker client instance
:manager: Optional machine to use, local otherwise
:returns: Docker client instance
"""
if manager:
machine_env = get_machine_env(manager)
client = docker.from_env(environment=machine_env)
else:
client = docker.from_env()
return client
def restore_building_backup(manager, building, new_machine=None):
client = get_docker_client(manager)
# get backup services of the building
services = client.services.list(filters={'label': f'backup={building}'})
# scale down services (to prevent writes during restore)
for s in services:
s.scale(0)
# Give services 10 seconds to shutdown
print("Wait for services to shutdown...")
sleep(10)
# When a new machine is used, (un-)assign labels
if new_machine:
# Remove old node labels and add new
old_nodes = remove_label_from_nodes('building', building, manager)
assign_label_to_node(new_machine, 'building', building, manager)
print("Wait for services to start on new machine")
if wait_for_containers(new_machine, 'backup|sftp', expected_count=2):
run_command_in_service('backup', 'restore', new_machine)
# When building was moved update host entry of openhab in compose
move_openhab_service(building, new_machine)
update_pb_framr_host(old_nodes[0], new_machine)
else:
logging.error(
f"Failed to start services on {new_machine}, "
" rolling back changes")
# restore labels to old nodes
remove_label_from_nodes('building', building, manager)
for on in old_nodes:
assign_label_to_node(on, 'building', building, manager)
update_pb_framr_host(new_machine, on)
else:
# execute restore command in backup service
run_command_in_service('backup', 'restore', manager)
# reload and scale up services again
for s in services:
s.reload()
s.scale(1)
# close client
client.close()
def wait_for_containers(machine, name_filter, expected_count=1, timeout=60):
"""Waits until containers matching filters are available
:machine: machine to check for container
:name_filter: regexp to filter names by
:expected_count: number of services that are expected to match
:timeout: Time to at least wait for before abborting check
:returns: true if found, false when timed out
"""
client = get_docker_client(machine)
for t in range(timeout):
cl = client.containers.list(filters={'name': name_filter})
if len(cl) >= expected_count:
logging.info("Let serivces boot up")
sleep(3)
return True
else:
sleep(1)
logging.error(f"Timed out wait for containers matching {name_filter}.")
return False
# >>>
# ******************************
# CLI base commands <<<
# ******************************
def init_config_dirs_command(args):
"""Initialize config directories
:args: parsed commandline arguments
"""
# generate basic config folder
generate_config_folders()
def assign_building_command(args):
"""Assigns the role of a building to a node
:args: parsed commandline arguments
"""
node = args.node
building = args.building
print(f'Assign role of building {building} to node {node}')
assign_label_to_node(node, 'building', building)
def execute_command(args):
"""Top level function to manage command executions from CLI
:args: parsed commandline arguments
"""
service = args.service
command = " ".join(str(x) for x in args.command) # list to string
building = args.building
run_command_in_service(service, command, building)
def restore_command(args):
"""Top level function to manage command executions from CLI
:args: parsed commandline arguments
"""
building = args.building
target = args.target
if not check_machine_exists(target):
print(f'Machine with name {target} not found')
return
print(f'Restoring building {building} on machine {target}')
get_machine_env(target)
def interactive_command(args):
"""Top level function to start the interactive mode
:args: parsed command line arguments
"""
main_menu(args)
# >>>
# ******************************
# Interactive menu entries <<<
# ******************************
def main_menu(args):
""" Display main menu
"""
# Main menu prompts selection contains function
choice = qust.select('Public Building Manager - Main Menu',
choices=load_main_entires(), style=st).ask()
# Call funtion of menu entry
if choice:
choice(args)
def load_main_entires():
"""Loads entries for main menu depending on available files
:returns: entries of main menu
"""
entries = []
if not os.path.exists(custom_path):
entries.append({'name': 'Create initial structure',
'value': init_menu})
else:
entries.append({'name': 'Manage Services',
'value': service_menu})
entries.append({'name': 'Manage Users',
'value': user_menu})
entries.append({'name': 'Manage Devices',
'value': device_menu})
entries.append({'name': 'Manage Backups',
'value': backup_menu})
entries.append({'name': 'Execute a command in a service container',
'value': exec_menu})
entries.append({'name': 'Exit', 'value': sys.exit})
return entries
def exit_menu(args):
"""Exits the programm
"""
sys.exit()
# *** Init Menu Entries ***
def init_menu(args):
"""Menu entry for initial setup and file generation
:args: Passed commandline arguments
"""
# Prompts
stack_name = qust.text('Choose a name for your setup', style=st).ask()
hosts = (qust.checkbox(
'What docker machines will be used?',
choices=generate_cb_choices(get_machine_list()),
style=st)
.skip_if(not stack_name)
.ask())
# Cancel init if no hosts selected
if not hosts:
return
# Ensure passwords match
password_match = False
while not password_match:
password = qust.password(
'Choose a password for the ohadmin user:', style=st).ask()
confirm = qust.password(
'Repeat password for the ohadmin user:', style=st).ask()
if password == confirm:
password_match = True
else:
print("Passwords did not match, try again")
# Initialize custom configuration dirs and templates
generate_config_folders()
generate_initial_compose()
frames = []
for i, host in enumerate(hosts):
building_id, building_name, services = init_machine_menu(host, i)
if building_id and building_name and services:
frames.append({'host': host,
'building_id': building_id,
'building_name': building_name,
'services': services})
else:
return
# When frames is not empty generate frame config
if frames:
generate_pb_framr_file(frames)
generate_volumerize_files(frames)
building_ids = [f['building_id'] for f in frames]
generate_host_key_files(building_ids)
# Generate config files based on input
username = ADMIN_USER
generate_sftp_file(username, password, ['backup_data/backup'])
generate_postgres_files(username, password)
generate_mosquitto_file(username, password)
generate_traefik_file(username, password)
generate_filebrowser_file(username, password)
generate_id_rsa_files()
# print(answers)
print(f"Configuration files for {stack_name} created in {custom_path}")
# Check if changes shall be applied to docker environment
generate = (qust.confirm(
'Apply changes to docker environment?',
default=True,
style=st)
.ask())
if generate:
generate_swarm(hosts)
def init_machine_menu(host, increment):
"""Prompts to select server services
:host: docker-machine host
:increment: incrementing number to ensure ports are unique
:return: choosen building id, name and services or None if canceld
"""
# Print divider
print('----------')
# Prompt for services
building_id = (qust.text(
f'Choose an identifier for the building on server {host} '
'(lowercase no space)',
default=f'{host}', style=st)
.skip_if(not host)
.ask())
building = (qust.text(
f'Choose a display name for building on server {host}',
default=f'{host.capitalize()}', style=st)
.skip_if(not building_id)
.ask())
services = (qust.checkbox(
f'What services shall {host} provide?',
choices=generate_cb_service_choices(checked=True),
style=st)
.skip_if(not building)
.ask())
if services is None:
return None, None, None
if Service.OPENHAB in services:
add_openhab_service(building_id, host)
if Service.NODERED in services:
add_nodered_service(building_id)
if Service.MQTT in services:
add_mqtt_service(building_id, increment)
if Service.POSTGRES in services:
add_postgres_service(building_id)
if Service.BACKUP in services:
add_volumerize_service(building_id)
if Service.FILES in services:
add_file_service(building_id)
if Service.SFTP in services:
add_sftp_service(building_id, increment)
return building_id, building, services
# *** Exec Menu Entries ***
def exec_menu(args):
"""Menu entry for executing commands in services
:args: Passed commandline arguments
"""
machine = docker_client_prompt(" to execute command at")
service_name = qust.select(
'Which service container shall execute the command?',
choices=get_container_list(machine), style=st).ask()
command = qust.text('What command should be executed?', style=st).ask()
run_command_in_service(service_name, command, machine)
# *** User Menu Entries ***
def user_menu(args):
"""Menu entry for user managment
:args: Passed commandline arguments
"""
# Ask for action
choice = qust.select("What do you want to do?", choices=[
'Add a new user', 'Modify existing user', 'Exit'],
style=st).ask()
if "Add" in choice:
new_user_menu()
elif "Modify" in choice:
modify_user_menu()
def new_user_menu():
"""Menu entry for new users
"""
current_users = get_users_from_files()
new_user = False
while not new_user:
username = qust.text("Choose a new username:", style=st).ask()
if username not in current_users:
new_user = True
else:
print(f"User with name {username} already exists, try again")
# Ensure passwords match (only if username was selected)
password_match = False
password = None
while username and not password_match:
password = qust.password(
f'Choose a password for the user {username}:', style=st).ask()
confirm = (qust.password(
f'Repeat password for the user {username}:',
style=st)
.skip_if(not password)
.ask())
if password == confirm:
password_match = True
else:
print("Passwords did not match, try again")
if password and username:
add_user_to_traefik_file(username, password)
def modify_user_menu():
"""Menu entry to remove users or change passwords
"""
current_users = get_users_from_files()
user = qust.select("Choose user to modify:",
choices=current_users, style=st).ask()
if user is None:
return
elif user == 'ohadmin':
choices = [{'name': 'Delete user',
'disabled': 'Disabled: cannot delete admin user'},
'Change password', 'Exit']
else:
choices = ['Delete user', 'Change password', 'Exit']
action = qust.select(
f"What should we do with {user}?", choices=choices, style=st).ask()
if action is None:
return
if 'Delete' in action:
is_sure = qust.confirm(
f"Are you sure you want to delete user {user}?", style=st).ask()
if is_sure:
remove_user_from_traefik_file(user)
elif 'Change' in action:
password_match = False
while not password_match:
password = qust.password(
f'Choose a password for the user {user}:', style=st).ask()
confirm = (qust.password(
f'Repeat password for the user {user}:', style=st)
.skip_if(password is None)
.ask())
if password == confirm:
password_match = True
else:
print("Passwords did not match, try again")
if password:
add_user_to_traefik_file(user, password)
# *** Service Menu Entries ***
def service_menu(args):
"""Menu entry for service managment
:args: Passed commandline arguments
"""
# Ask for action
choice = qust.select("What do you want to do?", choices=[
'Re-/Start docker stack', 'Stop docker stack',
'Modify existing services', 'Add additional service',
'Exit'], style=st).ask()
if "Add" in choice:
service_add_menu()
elif "Modify" in choice:
service_modify_menu()
elif "Start" in choice:
machine = docker_client_prompt(" to execute deploy")
if machine:
deploy_docker_stack(machine)
elif "Stop" in choice:
machine = docker_client_prompt(" to execute remove")
if machine:
remove_docker_stack(machine)
def service_add_menu():
"""Menu to add additional services
"""
services = [s for s in Service if s.additional]
service = qust.select(
'What service do you want to add?', style=st,
choices=generate_cb_service_choices(service_list=services)).ask()
host = (qust.select('Where should the service be located?',
choices=generate_cb_choices(
get_machine_list()), style=st)
.skip_if(not service)
.ask())
identifier = (qust.text(
'Input an all lower case identifier:',
style=st)
.skip_if(not host)
.ask())
if service and host and identifier:
if service == Service.POSTGRES:
add_postgres_service(host, postfix=identifier)
def service_modify_menu():
"""Menu to modify services
"""
services = get_current_services()
service = qust.select(
'What service do you want to modify?', choices=services).ask()
if service is None:
return
elif service in ['proxy', 'landing']:
choices = [{'name': 'Remove service',
'disabled': 'Disabled: cannot remove framework services'},
'Exit']
else:
choices = ['Remove service', 'Exit']
action = (qust.select(
f"What should we do with {service}?", choices=choices, style=st)
.skip_if(not service)
.ask())
if action is None:
return
elif 'Remove' in action:
delete_service(service)
# *** Device Menu Functions ***
def device_menu(args):
"""Menu to manage devices
:args: Arguments form commandline
"""
# Check if device scripts are installed
bin_path = '/usr/bin/enable-swarm-device'
choices = ['Install device scripts']
if os.path.exists(bin_path):
choices.append('Link device to service')
choices.append('Unlink device')
choices.append('Exit')
# Ask for action
choice = qust.select("What do you want to do? (root required)",
choices=choices, style=st).ask()
if "Install" in choice:
print("Installing device scripts (needs root)")
device_install_menu()
elif "Link" in choice:
device_link_menu()
elif "Unlink" in choice:
device_unlink_menu()
def device_install_menu():
"""Install scripts to link devices
"""
machine = docker_client_prompt(" to install usb support")
if machine:
# Name of base dir on machines
external_base_dir = os.path.basename(base_dir)
# Check if files are available on targeted machine
machine_dir = f"{external_base_dir}/install-usb-support.sh"
print(machine_dir)
if not check_file_on_machine(machine_dir, machine):
print("Scripts missing on machine, will be copied")
copy_files_to_machine(base_dir, machine)
else:
print("Scripts available on machine")
execute_command_on_machine(f'sudo {machine_dir}', machine)
else:
print("Cancelled device script installation")
def device_link_menu():
"""Link device to a service
"""
machine = docker_client_prompt(" to link device on")
device = (qust.select("What device should be linked?",
choices=USB_DEVICES,
style=st)
.skip_if(not machine)
.ask())
if machine and device:
# Start systemd service that ensures link (escapes of backslash needed)
link_cmd = f"sudo systemctl enable --now swarm-device@" + \
f"{device}\\\\\\\\x20openhab.service"
# Needs enable to keep after reboot
execute_command_on_machine(link_cmd, machine)
print(f"Linked device {device} to openHAB service on {machine}")
else:
print("Cancelled device linking")
def device_unlink_menu():
"""Unlink a device from a service
"""
machine = docker_client_prompt(" to unlink device from")
device = (qust.select("What device should be unlinked?",
choices=USB_DEVICES, style=st)
.skip_if(not machine)
.ask())
if machine and device:
# Stop systemd service that ensures link (escapes of backslash needed)
link_cmd = f"sudo systemctl disable --now swarm-device@" + \
f"{device}\\\\\\\\x20openhab.service"
execute_command_on_machine(link_cmd, machine)
print(f"Unlinked device {device} on machine {machine}")
else:
print("Cancelled device unlinking")
# *** Backup Menu Entries ***
def backup_menu(args):
"""Menu entry for backup managment
:args: Passed commandline arguments
"""
# Ask for action
choice = qust.select("What do you want to do?", choices=[
'Execute backup', 'Restore backup', 'Move building', 'Exit'],
style=st).ask()
if "Execute" in choice:
execute_backup_menu()
elif "Restore" in choice:
restore_backup_menu()
elif "Move" in choice:
restore_new_building_menu()
def execute_backup_menu():
"""Submenu for backup execution
"""
machine = docker_client_prompt(" to backup")
full = (qust.confirm("Execute full backup (otherwise partial)?",
default=False, style=st)
.skip_if(not machine)
.ask())
if full is None:
return
elif full:
run_command_in_service('backup', 'backupFull', machine)
print("Full backup completed")
else:
run_command_in_service('backup', 'backup', machine)
print("Partial backup completed")
def restore_backup_menu():
"""Submenu for backup execution
"""
machine = docker_client_prompt(" to restore")
confirm = (qust.confirm(
f'Restore services from last backup on machine {machine} '
'(current data will be lost)?',
default=False,
style=st)
.skip_if(not machine)
.ask())
if confirm:
restore_building_backup(machine, machine)
print("Restore completed")
else:
print("Restore canceled")
def restore_new_building_menu():
"""Submenu for backup execution on a new building
"""
machine = docker_client_prompt(" to execute restores with.")
current_building = compose_building_prompt(" to move", skip_if=not machine)
new_machine = docker_client_prompt(" to move building to",
skip_if=not current_building)
confirm = (qust.confirm(
f'Recreate {current_building} from last backup'
f' on machine {new_machine}',
default=False,
style=st)
.skip_if(not new_machine, default=False)
.ask())
if confirm:
restore_building_backup(machine, current_building, new_machine)
else:
print("Restore canceled")
# *** Menu Helper Functions ***
def generate_cb_choices(list, checked=False):
"""Generates checkbox entries for lists of strings
:list: pyhton list that shall be converted
:checked: if true, selections will be checked by default
:returns: A list of dicts with name keys
"""
return [{'name': m, 'checked': checked} for m in list]
def generate_cb_service_choices(checked=False, service_list=None):
"""Generates checkbox entries for the sevice enum
:checked: if true, selections will be checked by default
:service_list: optional list of services, use all if empty
:returns: A list of dicts with name keys
"""
services = service_list if service_list is not None else Service
return [
{'name': s.fullname, 'value': s, 'checked': checked} for s in services
]
def docker_client_prompt(message_details='', skip_if=False):
"""Show list of docker machines and return selection
:manager: Optional machine to use, prompt otherwise
:returns: Docker client instance
"""
machine = (qust.select(f'Choose manager machine{message_details}',
choices=get_machine_list(), style=st)
.skip_if(skip_if)
.ask())
return machine
def compose_building_prompt(message_details='', skip_if=False):
"""Show list of building contraints used in compose
:returns: Docker client instance
"""
building = qust.select(f'Choose building{message_details}:',
choices=get_current_building_constraints(),
style=st).skip_if(skip_if).ask()
return building
# >>>
# ******************************
# Script main (entry) <<<
# ******************************
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
prog='building_manager',
description='Generate and manage multi'
'building configurations of openHAB with docker swarm')
parser.add_argument(
'--config_dir',
'-d',
help='Directory to creat config folders in, default is current dir')
subparsers = parser.add_subparsers()
# Interactive mode
parser_interactive = subparsers.add_parser(
'interactive',
help='Starts the interactive mode of the building manager')
parser_interactive.set_defaults(func=interactive_command)
# Restore command
parser_restore = subparsers.add_parser('restore', help='Restore backups')
parser_restore.add_argument(
'building', help='Name (label) of the building that shall be restored')
parser_restore.add_argument(
'target', help='Name of the machine to restore to')
parser_restore.set_defaults(func=restore_command)
# Assign building command
parser_assign_building = subparsers.add_parser(
'assign_building', help='Assign the role of a building to a node')
parser_assign_building.add_argument(
'node', help='Name (or ID) of the node that gets the role assigned')
parser_assign_building.add_argument(
'building', help='Name of the building that will be assigned')
parser_assign_building.set_defaults(func=assign_building_command)
# Execute command
parser_exec = subparsers.add_parser(
'exec', help='Execute commands in a service container')
parser_exec.add_argument(
'service', help='Name of the service that will run the command')
parser_exec.add_argument(
'command', help='Command to be executed', nargs=argparse.REMAINDER)
parser_exec.add_argument(
'--building',
'-b',
help='Building name (label) of the service if '
'service location is ambiguous')
parser_exec.set_defaults(func=execute_command)
# Config commands
parser_config = subparsers.add_parser(
'config', help='Manage configuration files')
parser_config_subs = parser_config.add_subparsers()
# - Config init
parser_config_init = parser_config_subs.add_parser(
'init', help='Initialize config file directories')
parser_config_init.set_defaults(func=init_config_dirs_command)
# Parse arguments into args dict
args = parser.parse_args()
# Check if custom config dir is used
if args.config_dir:
custom_path = args.config_dir
# when no subcommand is defined show interactive menu
try:
args.func(args)
except AttributeError:
interactive_command(args)
# >>>
# --- vim settings ---
# vim:foldmethod=marker:foldlevel=0:foldmarker=<<<,>>>
| python |
import os
from bs4 import BeautifulSoup
bicycle = {'Price':'------','Brand':'------','Model':'------','Frame': '------', 'Color': '------', 'Size': '------', 'Fork': '------', 'Headset': '------', 'Stem': '------', 'Handlebar': '------', 'Grips': '------', 'Rear Derailleur': '------', 'Front Derailleur': '------', 'Shifter': '------', 'Brake': '------', 'Crankset': '------', 'Cassette': '------', 'Chain': '------', 'Rims': '------', 'Hub Front': '------', 'Hub Rear': '------', 'Tires': '------', 'Pedals': '------', 'Saddle': '------', 'Seat Post': '------', 'Seat Post Clamp': '------', 'Weight (KG)': '------', 'Bike Type:': '------', 'Target Group:': '------', 'Material:': '------', 'Wheel Size:': '------', 'Model year:': '------'}
parsed = BeautifulSoup(open('Cube Access WS Exc black n blue - Hardtail Mountainbike Women.html'), 'html.parser')
description = parsed.find(attrs={'class':'product--description'}).findAll('tr')
properties = parsed.find(attrs={'class':'product--properties'}).findAll('tr')
for d in description:
data = d.findAll('td')
try:
key = data[0].text.strip()
value = data[1].text.strip()
except:
print(data)
else:
bicycle[key] = value
for p in properties:
data = p.findAll('td')
try:
key = data[0].text.strip()
value = data[1].text.strip()
except:
print(data)
else:
bicycle[key] = value | python |
a=int(input("enter a number"))
for i in range(a+1):
if(i>1):
for j in range(2,i):
if(i%j==0):
break
else:
print(i)
| python |
from spinn_front_end_common.utilities.notification_protocol.\
notification_protocol import NotificationProtocol
import logging
logger = logging.getLogger(__name__)
class FrontEndCommonNotificationProtocol(object):
""" The notification protocol for external device interaction
"""
def __call__(
self, wait_for_read_confirmation,
socket_addresses, database_file_path):
"""
:param wait_for_read_confirmation:
:param socket_addresses:
:param database_interface:
:return:
"""
# notification protocol
self._notification_protocol = \
NotificationProtocol(socket_addresses, wait_for_read_confirmation)
self.send_read_notification(database_file_path)
return {"notification_interface": self}
def wait_for_confirmation(self):
""" Waits for devices to confirm they have read the database via the\
notification protocol
:return:
"""
self._notification_protocol.wait_for_confirmation()
def send_read_notification(self, database_directory):
""" Send the read notifications via the notification protocol
:param database_directory: the path to the database
:return:
"""
self._notification_protocol.send_read_notification(database_directory)
def send_start_notification(self):
""" Send the start notifications via the notification protocol
:return:
"""
self._notification_protocol.send_start_notification()
def stop(self):
""" Ends the notification protocol
:return:
"""
logger.debug("[data_base_thread] Stopping")
self._notification_protocol.close()
| python |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import optparse
import os
import urllib2
import sys
import time
# Print a dot every time this number of bytes is read.
PROGRESS_SPACING = 128 * 1024
def ReadFile(filename):
fh = open(filename, 'r')
try:
return fh.read()
finally:
fh.close()
def WriteFile(filename, data):
fh = open(filename, 'w')
try:
fh.write(data)
finally:
fh.close()
def HashFile(filename):
hasher = hashlib.sha1()
fh = open(filename, 'rb')
try:
while True:
data = fh.read(4096)
if len(data) == 0:
break
hasher.update(data)
finally:
fh.close()
return hasher.hexdigest()
def CopyStream(input_stream, output_stream):
"""Copies the contents of input_stream to output_stream. Prints
dots to indicate progress.
"""
bytes_read = 0
dots_printed = 0
while True:
data = input_stream.read(4096)
if len(data) == 0:
break
output_stream.write(data)
bytes_read += len(data)
if bytes_read / PROGRESS_SPACING > dots_printed:
sys.stdout.write('.')
sys.stdout.flush()
dots_printed += 1
def RenameWithRetry(old_path, new_path):
# Renames of files that have recently been closed are known to be
# unreliable on Windows, because virus checkers like to keep the
# file open for a little while longer. This tends to happen more
# for files that look like Windows executables, which does not apply
# to our files, but we retry the rename here just in case.
if sys.platform in ('win32', 'cygwin'):
for i in range(5):
try:
if os.path.exists(new_path):
os.remove(new_path)
os.rename(old_path, new_path)
return
except Exception, exn:
sys.stdout.write('Rename failed with %r. Retrying...\n' % str(exn))
sys.stdout.flush()
time.sleep(1)
raise Exception('Unabled to rename irt file')
else:
os.rename(old_path, new_path)
def DownloadFile(dest_path, url):
url_path = '%s.url' % dest_path
temp_path = '%s.temp' % dest_path
if os.path.exists(url_path) and ReadFile(url_path).strip() == url:
# The URL matches that of the file we previously downloaded, so
# there should be nothing to do.
return
sys.stdout.write('Downloading %r to %r\n' % (url, dest_path))
output_fh = open(temp_path, 'wb')
stream = urllib2.urlopen(url)
CopyStream(stream, output_fh)
output_fh.close()
sys.stdout.write(' done\n')
if os.path.exists(url_path):
os.unlink(url_path)
RenameWithRetry(temp_path, dest_path)
WriteFile(url_path, url + '\n')
stream.close()
def DownloadFileWithRetry(dest_path, url):
for i in range(5):
try:
DownloadFile(dest_path, url)
break
except urllib2.HTTPError, exn:
if exn.getcode() == 404:
raise
sys.stdout.write('Download failed with error %r. Retrying...\n'
% str(exn))
sys.stdout.flush()
time.sleep(1)
def EvalDepsFile(path):
scope = {'Var': lambda name: scope['vars'][name]}
execfile(path, {}, scope)
return scope
def Main():
parser = optparse.OptionParser()
parser.add_option(
'--base_url', dest='base_url',
# For a view of this site that includes directory listings, see:
# http://gsdview.appspot.com/nativeclient-archive2/
# (The trailing slash is required.)
default=('http://commondatastorage.googleapis.com/'
'nativeclient-archive2/irt'),
help='Base URL from which to download.')
parser.add_option(
'--nacl_revision', dest='nacl_revision',
help='Download an IRT binary that was built from this '
'SVN revision of Native Client.')
parser.add_option(
'--file_hash', dest='file_hashes', action='append', nargs=2, default=[],
metavar='ARCH HASH',
help='ARCH gives the name of the architecture (e.g. "x86_32") for '
'which to download an IRT binary. '
'HASH gives the expected SHA1 hash of the file.')
options, args = parser.parse_args()
if len(args) != 0:
parser.error('Unexpected arguments: %r' % args)
if options.nacl_revision is None and len(options.file_hashes) == 0:
# The script must have been invoked directly with no arguments,
# rather than being invoked by gclient. In this case, read the
# DEPS file ourselves rather than having gclient pass us values
# from DEPS.
deps_data = EvalDepsFile(os.path.join('src', 'DEPS'))
options.nacl_revision = deps_data['vars']['nacl_revision']
options.file_hashes = [
('x86_32', deps_data['vars']['nacl_irt_hash_x86_32']),
('x86_64', deps_data['vars']['nacl_irt_hash_x86_64']),
]
nacl_dir = os.path.join('src', 'native_client')
if not os.path.exists(nacl_dir):
# If "native_client" is not present, this might be because the
# developer has put '"src/native_client": None' in their
# '.gclient' file, because they don't want to build Chromium with
# Native Client support. So don't create 'src/native_client',
# because that would interfere with checking it out from SVN
# later.
sys.stdout.write(
'The directory %r does not exist: skipping downloading binaries '
'for Native Client\'s IRT library\n' % nacl_dir)
return
if len(options.file_hashes) == 0:
sys.stdout.write('No --file_hash arguments given: nothing to update\n')
new_deps = []
for arch, expected_hash in options.file_hashes:
url = '%s/r%s/irt_%s.nexe' % (options.base_url,
options.nacl_revision,
arch)
dest_dir = os.path.join(nacl_dir, 'irt_binaries')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_path = os.path.join(dest_dir, 'nacl_irt_%s.nexe' % arch)
DownloadFileWithRetry(dest_path, url)
downloaded_hash = HashFile(dest_path)
if downloaded_hash != expected_hash:
sys.stdout.write(
'Hash mismatch: the file downloaded from URL %r had hash %r, '
'but we expected %r\n' % (url, downloaded_hash, expected_hash))
new_deps.append(' "nacl_irt_hash_%s": "%s",\n'
% (arch, downloaded_hash))
if len(new_deps) > 0:
sys.stdout.write('\nIf you have changed nacl_revision, the DEPS file '
'probably needs to be updated with the following:\n%s\n'
% ''.join(new_deps))
sys.exit(1)
if __name__ == '__main__':
Main()
| python |
import RPi.GPIO as GPIO
from queue import Queue
EventClick = 'C'
class ButtonWorker(object):
def __init__(self, pin):
self.gpio = GPIO
self.gpio.setwarnings(False)
self.queue = Queue()
self.pin = pin
self.gpio.setmode(GPIO.BCM)
self.gpio.setup(self.pin, self.gpio.IN, pull_up_down=self.gpio.PUD_UP)
self.gpio.add_event_detect(self.pin, GPIO.RISING, callback=self.Call, bouncetime=500)
def Call(self, pin):
state = self.gpio.input(pin)
self.queue.put(EventClick)
def check(self):
result = False
while not (self.queue.empty()):
m = self.queue.get_nowait()
if m == EventClick:
print ("Clicked")
result = True
self.queue.task_done()
return result | python |
from pyrogram.types import InlineQueryResultArticle,InputTextMessageContent
from uuid import uuid4
class InlineQueryResults(list):
def __init__(self):
self.results = list()
super().__init__(self.results)
def add(self,title,message_text,message_parse_mode = None,message_disable_web_page_preview = None, url = None, description = None, thumb_url = None,reply_markup = None):
self.results.append(
InlineQueryResultArticle(
id = uuid4(),
title = title,
input_message_content = InputTextMessageContent(message_text=message_text,parse_mode=message_parse_mode,disable_web_page_preview=message_disable_web_page_preview),
url = url,
description = description,
thumb_url = thumb_url,
reply_markup = reply_markup
)
)
super().__init__(self.results)
| python |
from .FSError import *
class ProtectFlags:
FIBF_DELETE = 1
FIBF_EXECUTE = 2
FIBF_WRITE = 4
FIBF_READ = 8
FIBF_ARCHIVE = 16
FIBF_PURE = 32
FIBF_SCRIPT = 64
flag_txt = "HSPArwed"
flag_num = len(flag_txt)
flag_none = 0xF # --------
empty_string = "-" * flag_num
def __init__(self, mask=0):
self.mask = mask
def get_mask(self):
return self.mask
def __str__(self):
txt = ""
pos = self.flag_num - 1
m = 1 << pos
for i in range(self.flag_num):
bit = self.mask & m == m
show = "-"
flg = self.flag_txt[i]
flg_low = flg.lower()
if bit:
if flg_low != flg:
show = flg_low
else:
if flg_low == flg:
show = flg_low
txt += show
m >>= 1
pos -= 1
return txt
def bin_str(self):
res = ""
m = 1 << (self.flag_num - 1)
for i in range(self.flag_num):
if m & self.mask == m:
res += "1"
else:
res += "0"
m >>= 1
return res
def short_str(self):
return str(self).replace("-", "")
def parse_full(self, s):
"""parse a string with all flags"""
n = len(self.flag_txt)
if len(s) != n:
raise ValueError("full string size mismatch!")
mask = 0
for i in range(n):
val = s[i]
ref = self.flag_txt[i]
ref_lo = ref.lower()
if val not in (ref, ref_lo, "-"):
raise ValueError("invalid protect char: " + val)
is_lo = ref == ref_lo
is_blank = val == "-"
if is_lo:
do_set = is_blank
else:
do_set = not is_blank
if do_set:
bit_pos = n - i - 1
bit_mask = 1 << bit_pos
mask |= bit_mask
self.mask = mask
def parse(self, s):
if len(s) == 0:
return
# allow to add with '+' or sub with '-'
n = self.flag_txt
mode = "+"
self.mask = self.flag_none
for a in s.lower():
if a in "+-":
mode = a
else:
mask = None
is_low = None
for i in range(self.flag_num):
flg = self.flag_txt[i]
flg_low = flg.lower()
if flg_low == a:
mask = 1 << (self.flag_num - 1 - i)
is_low = flg_low == flg
break
if mask == None:
raise FSError(INVALID_PROTECT_FORMAT, extra="char: " + a)
# apply mask
if mode == "+":
if is_low:
self.mask &= ~mask
else:
self.mask |= mask
else:
if is_low:
self.mask |= mask
else:
self.mask &= ~mask
def is_set(self, mask):
return self.mask & mask == 0 # LO active
def set(self, mask):
self.mask &= ~mask
def clr(self, mask):
self.mask |= mask
def is_d(self):
return self.is_set(self.FIBF_DELETE)
def is_e(self):
return self.is_set(self.FIBF_EXECUTE)
def is_w(self):
return self.is_set(self.FIBF_WRITE)
def is_r(self):
return self.is_set(self.FIBF_READ)
if __name__ == "__main__":
inp = ["h", "s", "p", "a", "r", "w", "e", "d"]
for i in inp:
p = ProtectFlags()
p.parse(i)
s = str(p)
if not i in s:
print(s)
| python |
# -*- coding: utf-8 -*-
"""
tipfyext.mako
~~~~~~~~~~~~~
Mako template support for Tipfy.
Learn more about Mako at http://www.makotemplates.org/
:copyright: 2011 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from __future__ import absolute_import
from cStringIO import StringIO
from mako.lookup import TemplateLookup
from mako.runtime import Context
from werkzeug import cached_property
#: Default configuration values for this module. Keys are:
#:
#: templates_dir
#: Directory for templates. Default is `templates`.
default_config = {
'templates_dir': 'templates',
}
class Mako(object):
def __init__(self, app, _globals=None, filters=None):
self.app = app
config = app.config[__name__]
dirs = config.get('templates_dir')
if isinstance(dirs, basestring):
dirs = [dirs]
self.environment = TemplateLookup(directories=dirs,
output_encoding='utf-8', encoding_errors='replace')
def render(self, _filename, **context):
"""Renders a template and returns a response object.
:param _filename:
The template filename, related to the templates directory.
:param context:
Keyword arguments used as variables in the rendered template.
These will override values set in the request context.
:returns:
A rendered template.
"""
template = self.environment.get_template(_filename)
return template.render_unicode(**context)
def render_template(self, _handler, _filename, **context):
"""Renders a template and returns a response object.
:param _filename:
The template filename, related to the templates directory.
:param context:
Keyword arguments used as variables in the rendered template.
These will override values set in the request context.
:returns:
A rendered template.
"""
ctx = _handler.context.copy()
ctx.update(context)
return self.render(_filename, **ctx)
def render_response(self, _handler, _filename, **context):
"""Returns a response object with a rendered template.
:param _filename:
The template filename, related to the templates directory.
:param context:
Keyword arguments used as variables in the rendered template.
These will override values set in the request context.
"""
res = self.render_template(_handler, _filename, **context)
return self.app.response_class(res)
@classmethod
def factory(cls, _app, _name, **kwargs):
if _name not in _app.registry:
_app.registry[_name] = cls(_app, **kwargs)
return _app.registry[_name]
class MakoMixin(object):
"""Mixin that adds ``render_template`` and ``render_response`` methods
to a :class:`tipfy.RequestHandler`. It will use the request context to
render templates.
"""
# The Mako creator.
mako_class = Mako
@cached_property
def mako(self):
return self.mako_class.factory(self.app, 'mako')
def render_template(self, _filename, **context):
return self.mako.render_template(self, _filename, **context)
def render_response(self, _filename, **context):
return self.mako.render_response(self, _filename, **context)
| python |
# coding=utf-8
import unittest
import sys
from helpers import xroad, auditchecker
from main.maincontroller import MainController
from tests.xroad_configure_service_222 import configure_service
class XroadDeleteService(unittest.TestCase):
"""
SERVICE_15 Delete a Security Server Client's WSDL
RIA URL: https://jira.ria.ee/browse/XT-272, https://jira.ria.ee/browse/XTKB-27, https://jira.ria.ee/browse/XTKB-95
Depends on finishing other test(s): XroadSecurityServerClientRegistration, XroadConfigureService
Requires helper scenarios:
X-Road version: 6.16.0
"""
def test_xroad_configure_service(self):
main = MainController(self)
# Set test name and number
main.test_number = 'SERVICE_15'
main.test_name = self.__class__.__name__
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
client = xroad.split_xroad_id(main.config.get('ss2.client_id'))
log_checker = auditchecker.AuditChecker(ss_ssh_host, ss_ssh_user, ss_ssh_pass)
wsdl_url = main.config.get('wsdl.remote_path').format(main.config.get('wsdl.service_wsdl'))
wsdl_test_service = main.config.get('wsdl.service_wsdl_test_service1')
# Delete the added service
test_delete_service = configure_service.test_delete_service(case=main, client=client, wsdl_url=wsdl_url,
log_checker=log_checker)
# Delete the other added service
wsdl_test_service_url = main.config.get('wsdl.remote_path').format(wsdl_test_service)
test_delete_service1 = configure_service.test_delete_service(case=main, client=client,
wsdl_url=wsdl_test_service_url)
try:
main.log('Trying to check for and remove leftover service (2): {0}'.format(wsdl_test_service_url))
main.reload_webdriver(url=ss_host, username=ss_user, password=ss_pass)
test_delete_service1()
except Exception:
main.log('XroadDeleteService: Service (2) not found, no need to delete.')
sys.exc_clear()
try:
# Delete service
main.reload_webdriver(url=ss_host, username=ss_user, password=ss_pass)
test_delete_service()
except:
main.log('XroadDeleteService: Failed to delete service')
assert False
finally:
# Test teardown
main.tearDown()
| python |
from typing import Callable, Tuple
import numpy as np
from fedot.core.data.data import InputData
from fedot.core.validation.compose.metric_estimation import metric_evaluation
from fedot.core.validation.split import ts_cv_generator
def ts_metric_calculation(reference_data: InputData, cv_folds: int,
validation_blocks: int,
metrics: [str, Callable] = None,
pipeline=None, log=None) -> [Tuple[float, ...], None]:
""" Determine metric value for time series forecasting pipeline based
on data for validation
:param reference_data: InputData for validation
:param cv_folds: number of folds to split data
:param validation_blocks: number of validation blocks for time series validation
:param metrics: name of metric or callable object
:param pipeline: Pipeline for validation
:param log: object for logging
"""
log.debug(f'Pipeline {pipeline.root_node.descriptive_id} fit for cross validation started')
try:
evaluated_metrics = [[] for _ in range(len(metrics))]
for train_data, test_data, vb_number in ts_cv_generator(reference_data, cv_folds, validation_blocks, log):
# Calculate metric value for every fold of data
evaluated_metrics = metric_evaluation(pipeline, train_data,
test_data, metrics,
evaluated_metrics,
vb_number)
evaluated_metrics = tuple(map(lambda x: np.mean(x), evaluated_metrics))
log.debug(f'Pipeline {pipeline.root_node.descriptive_id} with metrics: {list(evaluated_metrics)}')
except Exception as ex:
log.debug(f'{__name__}. Pipeline assessment warning: {ex}. Continue.')
evaluated_metrics = None
return evaluated_metrics
| python |
#
# PySNMP MIB module TRAPEZE-NETWORKS-BASIC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TRAPEZE-NETWORKS-BASIC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:27:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Counter64, NotificationType, Counter32, IpAddress, Integer32, Bits, Unsigned32, ModuleIdentity, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Counter64", "NotificationType", "Counter32", "IpAddress", "Integer32", "Bits", "Unsigned32", "ModuleIdentity", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
TrpzLicenseFeature, = mibBuilder.importSymbols("TRAPEZE-NETWORKS-LICENSE-FEATURE-TC-MIB", "TrpzLicenseFeature")
trpzMibs, = mibBuilder.importSymbols("TRAPEZE-NETWORKS-ROOT-MIB", "trpzMibs")
trpzBasic = ModuleIdentity((1, 3, 6, 1, 4, 1, 14525, 4, 2))
trpzBasic.setRevisions(('2009-11-16 00:10', '2006-07-10 00:08', '2006-04-14 00:07', '2005-01-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: trpzBasic.setRevisionsDescriptions(('v3.0.0: Moved TrpzLicenseFeature into its own module for easier maintenance. This will be published in 7.1 release.', 'v2.0.6: Fixed MAX-ACCESS of trpzMobilityMemberEntryAddr, an index that was also the only column', 'v2.0.5: Revised for 4.1 release', 'v1: initial version, as for 4.0 and older releases',))
if mibBuilder.loadTexts: trpzBasic.setLastUpdated('200911160010Z')
if mibBuilder.loadTexts: trpzBasic.setOrganization('Trapeze Networks')
if mibBuilder.loadTexts: trpzBasic.setContactInfo('Trapeze Networks Technical Support www.trapezenetworks.com US: 866.TRPZ.TAC International: 925.474.2400 [email protected]')
if mibBuilder.loadTexts: trpzBasic.setDescription("Basic objects for Trapeze Networks wireless switches. Copyright 2004-2009 Trapeze Networks, Inc. All rights reserved. This Trapeze Networks SNMP Management Information Base Specification (Specification) embodies Trapeze Networks' confidential and proprietary intellectual property. Trapeze Networks retains all title and ownership in the Specification, including any revisions. This Specification is supplied 'AS IS' and Trapeze Networks makes no warranty, either express or implied, as to the use, operation, condition, or performance of the Specification.")
trpzBasicSystemInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 14525, 4, 2, 1))
trpzSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzSerialNumber.setStatus('current')
if mibBuilder.loadTexts: trpzSerialNumber.setDescription('The serial number of the switch.')
trpzSwMajorVersionNumber = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzSwMajorVersionNumber.setStatus('current')
if mibBuilder.loadTexts: trpzSwMajorVersionNumber.setDescription('The major release version of the running software.')
trpzSwMinorVersionNumber = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzSwMinorVersionNumber.setStatus('current')
if mibBuilder.loadTexts: trpzSwMinorVersionNumber.setDescription('The minor release version of the running software.')
trpzVersionString = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzVersionString.setStatus('current')
if mibBuilder.loadTexts: trpzVersionString.setDescription('The version string of the running software, including the major, minor, patch and build numbers, such as 3.0.0.185')
trpzMobilityDomainInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2))
trpzMobilityDomainName = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzMobilityDomainName.setStatus('current')
if mibBuilder.loadTexts: trpzMobilityDomainName.setDescription('The mobility domain containing the switch, or a zero-length string when the mobility domain is unknown.')
trpzMobilitySeedIp = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzMobilitySeedIp.setStatus('current')
if mibBuilder.loadTexts: trpzMobilitySeedIp.setDescription("The IPv4 address of the seed switch for this switch's mobility domain, or the IPv4 address 0.0.0.0 if unknown.")
trpzMobilityMemberTableSize = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzMobilityMemberTableSize.setStatus('current')
if mibBuilder.loadTexts: trpzMobilityMemberTableSize.setDescription('The number of entries in the mobility member table, trpzMobilityMemberTable.')
trpzMobilityMemberTable = MibTable((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 4), )
if mibBuilder.loadTexts: trpzMobilityMemberTable.setStatus('current')
if mibBuilder.loadTexts: trpzMobilityMemberTable.setDescription('Table of members of the mobility domain, indexed by the member IPv4 address.')
trpzMobilityMemberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 4, 1), ).setIndexNames((0, "TRAPEZE-NETWORKS-BASIC-MIB", "trpzMobilityMemberEntryAddr"))
if mibBuilder.loadTexts: trpzMobilityMemberEntry.setStatus('current')
if mibBuilder.loadTexts: trpzMobilityMemberEntry.setDescription('An entry in the trpzMobilityMemberTable table.')
trpzMobilityMemberEntryAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 14525, 4, 2, 2, 4, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzMobilityMemberEntryAddr.setStatus('current')
if mibBuilder.loadTexts: trpzMobilityMemberEntryAddr.setDescription('IPv4 address of a member of the mobility domain.')
trpzLicenseInfoGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3))
trpzLicenseInfoTableSize = MibScalar((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzLicenseInfoTableSize.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoTableSize.setDescription('The number of entries in the license table, trpzLicenseInfoTable.')
trpzLicenseInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 2), )
if mibBuilder.loadTexts: trpzLicenseInfoTable.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoTable.setDescription('Table of installed licenses on the switch. The licences provide additional capabilities over the default capabilities of the switch.')
trpzLicenseInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 2, 1), ).setIndexNames((0, "TRAPEZE-NETWORKS-BASIC-MIB", "trpzLicenseInfoEntryFeature"))
if mibBuilder.loadTexts: trpzLicenseInfoEntry.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoEntry.setDescription('A license table entry.')
trpzLicenseInfoEntryFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 2, 1, 1), TrpzLicenseFeature())
if mibBuilder.loadTexts: trpzLicenseInfoEntryFeature.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoEntryFeature.setDescription('The feature being reported on')
trpzLicenseInfoEntryValue = MibTableColumn((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzLicenseInfoEntryValue.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoEntryValue.setDescription('The value of the feature enabled, for example a feature may have multiple levels of licensing, so the value will very with the license level.')
trpzLicenseInfoEntryDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 14525, 4, 2, 3, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trpzLicenseInfoEntryDescr.setStatus('current')
if mibBuilder.loadTexts: trpzLicenseInfoEntryDescr.setDescription("A human interpretable description of this license, for example, '120 APs or DAPs.'")
mibBuilder.exportSymbols("TRAPEZE-NETWORKS-BASIC-MIB", trpzBasicSystemInfo=trpzBasicSystemInfo, trpzSwMinorVersionNumber=trpzSwMinorVersionNumber, trpzBasic=trpzBasic, trpzMobilityMemberTableSize=trpzMobilityMemberTableSize, trpzMobilityDomainName=trpzMobilityDomainName, trpzLicenseInfoTable=trpzLicenseInfoTable, trpzLicenseInfoTableSize=trpzLicenseInfoTableSize, trpzVersionString=trpzVersionString, trpzMobilityMemberTable=trpzMobilityMemberTable, trpzLicenseInfoGroup=trpzLicenseInfoGroup, trpzLicenseInfoEntryDescr=trpzLicenseInfoEntryDescr, PYSNMP_MODULE_ID=trpzBasic, trpzMobilityMemberEntry=trpzMobilityMemberEntry, trpzSerialNumber=trpzSerialNumber, trpzSwMajorVersionNumber=trpzSwMajorVersionNumber, trpzMobilityMemberEntryAddr=trpzMobilityMemberEntryAddr, trpzLicenseInfoEntry=trpzLicenseInfoEntry, trpzLicenseInfoEntryValue=trpzLicenseInfoEntryValue, trpzMobilityDomainInfo=trpzMobilityDomainInfo, trpzLicenseInfoEntryFeature=trpzLicenseInfoEntryFeature, trpzMobilitySeedIp=trpzMobilitySeedIp)
| python |
#!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets to build distribution packages."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
docker_args=[]
for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='build_package.%s' % (name),
timeout_seconds=30*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
flake_retries=0, timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
cwd=cwd,
shortname='build_package.%s' % (name),
timeout_seconds=10*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpPackage:
"""Builds C# nuget packages."""
def __init__(self, linux=False):
self.linux = linux
self.labels = ['package', 'csharp']
if linux:
self.name = 'csharp_package_dotnetcli_linux'
self.labels += ['linux']
else:
self.name = 'csharp_package_dotnetcli_windows'
self.labels += ['windows']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.linux:
return create_docker_jobspec(
self.name,
'tools/dockerfile/test/csharp_coreclr_x64',
'src/csharp/build_packages_dotnetcli.sh')
else:
return create_jobspec(self.name,
['build_packages_dotnetcli.bat'],
cwd='src\\csharp',
shell=True)
def __str__(self):
return self.name
class NodePackage:
"""Builds Node NPM package and collects precompiled binaries"""
def __init__(self):
self.name = 'node_package'
self.labels = ['package', 'node', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_node.sh')
class RubyPackage:
"""Collects ruby gems created in the artifact phase"""
def __init__(self):
self.name = 'ruby_package'
self.labels = ['package', 'ruby', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_ruby.sh')
class PythonPackage:
"""Collects python eggs and wheels created in the artifact phase"""
def __init__(self):
self.name = 'python_package'
self.labels = ['package', 'python', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_python.sh')
class PHPPackage:
"""Copy PHP PECL package artifact"""
def __init__(self):
self.name = 'php_package'
self.labels = ['package', 'php', 'linux']
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_x64',
'tools/run_tests/artifacts/build_package_php.sh')
def targets():
"""Gets list of supported targets"""
return [CSharpPackage(),
CSharpPackage(linux=True),
NodePackage(),
RubyPackage(),
PythonPackage(),
PHPPackage()]
| python |
"""Module with implementation of the Grid classes."""
from bubblebox.library.create import Dataset,Block,Data
from bubblebox.library.utilities import Action
import numpy
import pymorton
class GridBase(Dataset):
"""Base class for the Grid."""
type_ = 'base'
def __init__(self, varlist, nx, ny, xmin, xmax, ymin, ymax, xblocks=1, yblocks=1,
user_bc_type=None, user_bc_val=None):
"""
Initialize the Grid object and allocate the data.
Parameters
----------
varlist : list of strings
List of names for the variables to create.
nx : integer
Number of cells in the x-direction.
ny : integer
Number of cells in the y-direction.
xblocks : integer
Number of blocks in the x-direction
yblocks : integer
Number of blocks in the y-direction
xmin : float
Domain limit at the left side.
xmax : float
Domain limit at the right side.
ymin : float
Domain limit at the bottom side.
ymax : float
Domain limit at the top side.
user_bc_type : dictionary of (string, list) items
User-defined boundary types to overwrite default ones.
user_bc_val : dictionary of (string, list) items
User-defined boundary values to overwrite default ones.
"""
# Perform checks
if nx%xblocks or ny%yblocks:
raise ValueError('[flowx.domain.GridBase]:(nx,ny) must be exactly '+
'divisible by (xblocks,yblocks)')
elif (xblocks%2 or yblocks%2) and xblocks!=1 and yblocks!=1:
raise ValueError('[flowx.domain.GridBase]:(xblocks,yblocks) must be exactly '+
'divisible by 2')
# Organize data at coarsest level
dx,dy = abs(xmax-xmin)/nx,abs(ymax-ymin)/ny
nxb,nyb = int(nx/xblocks),int(ny/yblocks)
levels = None
# Save grid attributes at coarsest level
self.nx,self.ny = nx,ny
self.dx,self.dy = dx,dy
# Initialize block attributes
block_attributes = self.__class__.initialize_block_attributes(xblocks,yblocks,dx,dy,
xmin,xmax,ymin,ymax,levels)
# Initialize data attributes
nblocks = len(block_attributes)
data_attributes = self.__class__.initialize_data_attributes(nblocks,nxb,nyb,varlist)
# Create data and block objects
data = Data(**data_attributes)
blocklist = [Block(data,**attributes) for attributes in block_attributes]
# Call base class constructor
super().__init__(blocklist,data)
# Set gridline coordinates
self.set_gridline_coordinates()
# Set boundary blocks
#self.set_domain_boundaries()
# Boundary condition information
self.bc_type = {}
self.bc_val = {}
self.set_default_bc(varlist)
if user_bc_type is not None and user_bc_val is not None:
self.set_user_bc(user_bc_type, user_bc_val)
self.fill_guard_cells(varlist)
def __del__(self):
"""Destructor"""
self.purge()
@staticmethod
def initialize_block_attributes(xblocks,yblocks,dx,dy,xmin,xmax,ymin,ymax,levels):
"""Private method for initialization"""
block_attributes = []
for lblock in range(xblocks*yblocks):
iloc,jloc = pymorton.deinterleave2(lblock)
imin,imax = [xmin + (iloc/xblocks)*(xmax-xmin), xmin + ((iloc+1)/xblocks)*(xmax-xmin)]
jmin,jmax = [ymin + (jloc/yblocks)*(ymax-ymin), ymin + ((jloc+1)/yblocks)*(ymax-ymin)]
block_attributes.append({'dx' : dx,
'dy' : dy,
'xmin' : imin,
'xmax' : imax,
'ymin' : jmin,
'ymax' : jmax,
'tag' : lblock})
return block_attributes
@staticmethod
def initialize_data_attributes(nblocks,nxb,nyb,varlist):
"""Private method for initialization"""
raise NotImplementedError
def set_gridline_coordinates(self):
"""Set the gridline coordinates."""
raise NotImplementedError
def addvar(self,varkey):
"""Add a variable"""
super().addvar(varkey)
self.set_default_bc(varkey)
def set_default_bc(self,varlist):
"""Set default boundary conditions (homogeneous Neumann)."""
if type(varlist) is str:
varlist = [varlist]
default_bc_type = 4 * ['neumann']
default_bc_val = 4 * [0.0]
num = len(varlist)
self.bc_type = {**self.bc_type, **dict(zip(varlist, num * [default_bc_type]))}
self.bc_val = {**self.bc_val, **dict(zip(varlist, num * [default_bc_val]))}
def set_user_bc(self, user_bc_type, user_bc_val):
"""Overwrite default boundary conditions with user-provided ones.
Parameters
----------
user_bc_type : dictionary of (string, list) items
User-defined boundary types.
user_bc_val : dictionary of (string, list) items
User-defined boundary values.
"""
# Overwrite default boundary types
self.bc_type = {**self.bc_type, **user_bc_type}
# Overwrite default boundary values
self.bc_val = {**self.bc_val, **user_bc_val}
def update_bc_val(self, user_bc_val):
"""Overwrite boundary condition values with user-provided ones.
Parameters
----------
user_bc_val : dictionary of (string, list) items
User-defined boundary values.
"""
self.bc_val = {**self.bc_val, **user_bc_val}
def update_bc_type(self, user_bc_type):
self.bc_type = {**self.bc_type, **user_bc_type}
def compute_error(self, eror, ivar, asol):
"""Compute the error between the numerical and analytical solutions.
Error is defined as the absolute difference between the two solutions.
Arguments
---------
eror : string
Name of the grid variable of the error.
ivar : string
Name of the grid variable of the numerical solution.
asol : string
Name of the grid variable of the analytical solution.
"""
for block in self.blocklist:
block[eror] = numpy.abs(block[ivar] - block[asol])
def get_l2_norm(self, eror):
"""Compute the L2 norm for a given variable.
Arguments
---------
eror : string
Name of the grid variable for which norm is desired
Returns
-------
l2_norm : float
The L2-norm.
"""
l2_norm = 0.
for block in self.blocklist:
l2_norm = l2_norm + (numpy.sqrt(numpy.sum(block[eror]**2)) /
((self.nxb+2*self.xguard) * (self.nyb+2*self.yguard)))
return l2_norm/self.nblocks
def fill_guard_cells(self, varlist, **kwargs):
"""Fill value at guard cells for given variable names.
Parameters
----------
varlist : string or list of strings
Name of variables to update.
"""
self.halo_exchange(varlist, **kwargs)
# Convert single string to a list
if type(varlist) is str:
varlist = [varlist]
locations = ['xlow','xhigh','ylow','yhigh']
# TODO add a call to exchange data between blocks
# TODO figure out how to tag blocks at boundary etc.
# TODO make this efficient
for varkey in varlist:
bc_type_var = self.bc_type[varkey]
bc_val_var = self.bc_val[varkey]
for block in self.blocklist:
deltas = [block.dx, block.dx, block.dy, block.dy]
neighbors = [block.neighdict[location] for location in locations]
blockdata = block[varkey]
for location,neighbor,delta,bc_type,bc_val in zip(locations,neighbors,deltas,
bc_type_var,bc_val_var):
if neighbor is None:
if bc_type == 'neumann':
self.__class__.fill_guard_cells_neumann(blockdata,location,bc_val,delta)
elif bc_type == 'dirichlet':
self.__class__.fill_guard_cells_dirichlet(blockdata,location,bc_val)
elif bc_type == 'outflow':
self.__class__.fill_guard_cells_dirichlet(blockdata,location,bc_val)
elif bc_type == 'projection':
self.__class__.fill_guard_cells_projection(blockdata,location)
elif bc_type == None:
None
else:
raise ValueError('Boundary type "{}" not implemented'.format(bc_type))
@staticmethod
def fill_guard_cells_dirichlet(blockdata, loc, bc_val):
"""Fill guard cells using a Dirichlet condition.
Method implemented in child classes.
Parameters
----------
loc : string
Boundary location;
choices: ['left', 'right', 'bottom', 'top'].
bc_val : float
Neumann boundary value.
"""
raise NotImplementedError()
@staticmethod
def fill_guard_cells_neumann(blockdata, loc, bc_val, delta):
"""Fill guard cells using a Neumann condition.
Parameters
----------
loc : string
Boundary location;
choices: ['left', 'right', 'bottom', 'top'].
bc_val : float
Neumann boundary value.
delta : float
Grid-cell width.
"""
if loc == 'xlow':
blockdata[:,:,0] = bc_val * delta + blockdata[:,:,1]
elif loc == 'xhigh':
blockdata[:,:,-1] = bc_val * delta + blockdata[:,:,-2]
elif loc == 'ylow':
blockdata[:,0,:] = bc_val * delta + blockdata[:,1,:]
elif loc == 'yhigh':
blockdata[:,-1,:] = bc_val * delta + blockdata[:,-2,:]
else:
raise ValueError('Unknown boundary location "{}"'.format(loc))
@staticmethod
def fill_guard_cells_projection(blockdata, loc):
"""Fill guard cells with projection BC.
Parameters
----------
loc : string
Boundary location;
choices: ['left', 'right', 'bottom', 'top'].
"""
if loc == 'xlow':
blockdata[:,:,0] = 2*blockdata[:,:,1] - blockdata[:,:,2]
elif loc == 'xhigh':
blockdata[:,:,-1] = 2*blockdata[:,:,-2] - blockdata[:,:,-3]
elif loc == 'ylow':
blockdata[:,0,:] = 2*blockdata[:,1,:] - blockdata[:,2,:]
elif loc == 'yhigh':
blockdata[:,-1,:] = 2*blockdata[:,-2,:] - blockdata[:,-3,:]
else:
raise ValueError('Unknown boundary location "{}"'.format(loc))
| python |
COLUMNS = [
'TIPO_REGISTRO',
'NRO_RV_ORIGINAL',
'NRO_CARTAO',
'NRO_PV_ORIGINAL',
'DT_TRANSACAO_CV',
'NRO_NSU',
'VL_TRANSACAO_ORIGINAL',
'NRO_AUTORIZACAO',
'TID',
'NRO_PEDIDO'
] | python |
"""Setup script"""
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
from Cython.Build import cythonize
import numpy as np
my_modules = cythonize("pysparselp/*.pyx", annotate=True)
libname = "pysparselp"
setup(
name=libname,
version="0.0.1",
author="Martin de La Gorce",
author_email="[email protected]",
description="Python algorithms to solve linear programming problems with with sparse matrices",
packages=find_packages(),
license="MIT",
ext_modules=my_modules, # additional source file(s)),
include_dirs=[np.get_include()],
package_data={"pysparselp": ["*.pyx"]},
install_requires=["numpy", "scipy"],
)
| python |
import pandas as pd
class HelperDataFrame(pd.DataFrame):
"""Inherits from a Pandas Data Frame and adds a couple methods."""
def __init__(self, df):
super().__init__(data=df)
# self.random_state = 42
def randomize(self):
"""Shuffles observations of a dataframe"""
return self.sample(frac=1, random_state=42)
def null_count(self):
"""Get total null cells"""
return self.isnull().sum().sum()
if __name__ == "__main__":
print("HelperDataFrame")
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
app
~~~~~~~~~~~
The Flask application module.
:author: Jeff Kereakoglow
:date: 2014-11-14
:copyright: (c) 2014 by Alexis Digital
:license: MIT, see LICENSE for more details
"""
import os
from utils import prepare_json_response
from flask import Flask, jsonify, request
from werkzeug.contrib.cache import SimpleCache
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.httpauth import HTTPBasicAuth
# Initialize core objects
app = Flask(__name__)
cache = SimpleCache(__name__)
db = SQLAlchemy(app)
auth = HTTPBasicAuth()
app.config.from_object("config")
#-- Models
from app.models import user
if not os.path.exists("db.sqlite"):
db.create_all()
#-- Controllers
from app.controllers import default
from app.controllers import user
app.register_blueprint(default.mod)
app.register_blueprint(user.mod)
#-- Error handlers
# Override the default handlers with JSON responses
@app.errorhandler(400)
def forbidden(error):
"""
Renders 400 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 400: Bad request",
success=False,
data=None
)
), 400
@app.errorhandler(401)
def forbidden(error):
"""
Renders 400 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 401: Unauthorized",
success=False,
data=None
)
), 401
@app.errorhandler(403)
def forbidden(error):
"""
Renders 403 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 403: Forbidden",
success=False,
data=None
)
), 403
@app.errorhandler(404)
def not_found(error):
"""
Renders 404 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 404: Not found",
success=False,
data=None
)
), 404
@app.errorhandler(405)
def not_found(error):
"""
Renders 405 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 405: Method not allowed",
success=False,
data=None
)
), 405
@app.errorhandler(500)
def internal_server_error(error):
"""
Renders 500 response
:returns: JSON
:rtype: flask.Response
"""
return jsonify(
prepare_json_response(
message="Error 500: Internal server error",
success=False,
data=None
)
), 405
| python |
"""
This falls into my "bad idea that I'm playing with" category. Withold judgement and ye lunches.
Upgraded to plausible.
"""
from importlib import import_module
class Singleton(type):
instance_list = {}
def __call__(klass, *args, **kwargs):
if not klass in klass.instance_list:
klass.instance_list[klass] = super(Singleton, klass).__call__(*args, **kwargs)
return klass.instance_list[klass]
def lockable_class(self):
self.__is_locked = False
return self
class MissingPluginException(Exception):
pass
class attach_methods(object):
def __init__(self, *modules, **kwargs):
self.methods = {}
#allow installing the functions under a specific dictionary
self.method_dict_name = kwargs.get("method_dict_name", None)
self.filter_attribute = kwargs.get("filter_attribute", None)
self.modules = modules
self.methods = {}
def __call__(self, klass):
self.get_methods(klass)
self.install_methods(klass)
return klass
def get_methods(self, klass):
filter_attribute = getattr(klass, "filter_attribute", self.filter_attribute)
for _module in self.modules:
imported_module = import_module(_module)
for method in dir(imported_module):
resolved_method = getattr(imported_module, method)
if (method[0:2] != "__" and not filter_attribute) or (filter_attribute and getattr(resolved_method, filter_attribute, False)):
self.methods[method] = resolved_method
def install_methods(self, klass):
method_dict_name = getattr(klass, "method_dict_name", self.method_dict_name)
if method_dict_name:
setattr(klass, method_dict_name, self.methods)
else:
for method in self.methods:
setattr(klass, method, self.methods[method])
def plugin(func):
def wrapped(*args, **kwargs):
print "Executing " + func.__name__
return func(*args, **kwargs)
set_function_attribute(wrapped, "plugin", True)
return wrapped
def set_function_attribute(func, name, value):
setattr(func, name, value)
class PluggableObject(object):
filter_attribute = "plugin"
method_dict_name = "plugins"
def __init__(self):
pass
def dispatch_plugin(self, name, *args, **kwargs):
try:
plugin = self.plugins[name]
except KeyError:
raise MissingPluginException("There is not a plugin installed for %s" % name)
return plugin(self, *args, **kwargs) | python |
# -*- coding: utf-8 -*-
# @FILE : consts.py
# @AUTH : model_creater
| python |
#!/usr/bin/env python
import numpy as np
import math
from multi_link_common import *
#height is probably 0 from multi_link_common.py
#total mass and total length are also defined in multi_link_common.py
num_links = 8.0
link_length = total_length/num_links
link_mass = total_mass/num_links
ee_location = np.matrix([0., -link_length*8.0, height]).T
#bod_shapes = ['cube', 'cube', 'cube', 'cube', 'cube', 'cube', 'cube','cube']
bod_shapes = ['capsule', 'capsule', 'capsule', 'capsule', 'capsule', 'capsule', 'capsule', 'capsule']
bod_dimensions = [[0.03, 0.03, link_length]]*8
bod_com_position = [[0., -link_length/2., height],
[0., -3.0/2.0*link_length, height],
[0., -5.0/2.0*link_length, height],
[0., -7.0/2.0*link_length, height],
[0., -9.0/2.0*link_length, height],
[0., -11.0/2.0*link_length, height],
[0., -13.0/2.0*link_length, height],
[0., -15.0/2.0*link_length, height]]
bod_color = [[0.4, 0.4, 0.4, 1], [0.8, 0.8, 0.8, 1], [0.33, 0.33, 0.33, 1], [0.5, 0.5, 0.5, 1], [0.7, 0.7, 0.7, 1], [0.45, 0.45, 0.45, 1], [0.35, 0.35, 0.35, 1], [0.6, 0.6, 0.6, 1]]
bod_num_links = 8
bod_mass = [link_mass]*bod_num_links
bod_names = ['link1', 'link2', 'link3', 'link4', 'link5', 'link6', 'link7', 'link8']
bodies ={'shapes':bod_shapes, 'dim':bod_dimensions, 'num_links':bod_num_links,
'com_pos':bod_com_position, 'mass':bod_mass, 'name':bod_names, 'color':bod_color}
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.], [0.,0.,1.]]
b_jt_anchor = [[0., 0., height],
[0., -link_length, height],
[0., -2*link_length, height],
[0., -3*link_length, height],
[0., -4*link_length, height],
[0., -5*link_length, height],
[0., -6*link_length, height],
[0., -7*link_length, height]]
b_jt_kp = [25., 10., 8., 6., 5., 2.5, 1.5, 1.] #[30., 20., 15., 5., 4., 3., 2., 1.]
b_jt_kd = [1.8, 1.0, 1.0, 1.0, 1.2, 0.8, 0.5, 0.2] #[16.1, 10., 8., 3., 2., 1., 0.8, 0.5]
b_jt_limits_max = np.radians([180, 120, 120, 120, 120, 120, 120, 120]).tolist()
b_jt_limits_min = np.radians([-180, -120, -120, -120, -120, -120, -120, -120]).tolist()
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.], [0.,0.,1.]]
b_jt_attach = [[0, -1], [1, 0], [2,1], [3,2], [4,3], [5,4], [6,5], [7,6]]
b_jt_start = [-2.06, 0.766, 0.446, 0.467, 0.811, 0.882, 0.775, 0.243] #(gives ee pos of [0, -0.2, 0]
b_jts = {'anchor':b_jt_anchor, 'axis':b_jt_axis, 'jt_lim_max':b_jt_limits_max,
'jt_lim_min':b_jt_limits_min, 'jt_init':b_jt_start, 'jt_attach':b_jt_attach,
'jt_stiffness':b_jt_kp, 'jt_damping':b_jt_kd}
| python |
import streamlit as st
st.sidebar.subheader("About dspy")
st.sidebar.info("A webapp that is running on python and teaching python!")
st.sidebar.markdown("""
<img src="https://media.giphy.com/media/3o7527pa7qs9kCG78A/giphy.gif" width="200">
""", unsafe_allow_html=True)
st.title("`dspy` - Data Science with Python")
st.markdown("""
___
""")
st.subheader("Please select what you would like to do")
features = ["python 101 - Learn the basics of python",
"pyPrac - Solve problems using python",
"pandas - Learn data analysis and manipulation",]
selection = st.radio("", features)
if selection == features[0]:
st.balloons()
else:
st.write("")
| python |
#! /usr/bin/env python3
import sys
import os
import cmd2
import logging
import inspect
# local modules
import subcmd
from subcmdfactory import SubCmdFactory
from config import Config, Observer, Subject
class QsmShell(cmd2.Cmd, Observer):
intro = 'Type help or ? to list the command.\n'
def emptyline(self):
""" Disable the last command when hitting enter """
pass
def do_shell(self, line):
"""Run a shell command by use a ! prefix """
print ("running shell command:", line)
output = os.popen(line).read()
print (output)
self.last_output = output
def do_exit(self, arg):
""" exit from the shell """
return True
def do_EOF(self, arg):
return True
def regCmds(self, cmds):
""" Register all of the support commands into cmd2
"""
for cmd in cmds:
self.regCmd(cmd)
def regCmd(self, cmd):
""" based cmd name to register the method with
do_xxx
help_xxx
complete_xxx
"""
funcdef = """def do_{}(self, arg):
SubCmdFactory().Factory('{}').run(arg)""".format(cmd, cmd)
assign = "QsmShell.do_{0} = do_{0}".format(cmd)
exec(funcdef)
exec(assign)
funcdef = """def help_{}(self):
print(SubCmdFactory().Factory('{}').__doc__)""".format(cmd, cmd)
assign = "QsmShell.help_{0} = help_{0}".format(cmd)
exec(funcdef)
exec(assign)
funcdef = """def complete_{}(self, text, line, begidx, endidx):
subcls = SubCmdFactory().Factory('{}')
return [ i for i in subcls.getSupportCmds() if i.startswith(text)]
""".format(cmd, cmd.capitalize())
assign = "QsmShell.complete_{0} = complete_{0}".format(cmd)
exec(funcdef)
exec(assign)
def __init__(self, **kwarg):
""" load the shell environment from config
"""
# Attach the shell to the config publisher.
Config().attach(self)
self.__setPrompt(Config().current)
super().__init__(**kwarg)
def __setPrompt(self, env):
"""
setup the prompt shell by providing a dict.
"""
self.prompt = "{}:{}({})>".format(env.get('host'), env.get('user'), env.get('passw'))
def update(self, subject: Subject) -> None:
self.__setPrompt(subject)
| python |
import os
import time
import gpustat
import numpy as np
from redlock import Redlock
GPU_LOCK_TIMEOUT = 5000 # ms
class GPUManager(object):
def __init__(self, verbose: bool=False):
self.lock_manager = Redlock([{"host": "localhost", "port": 6379, "db": 0}, ])
self.verbose = verbose
def get_free_gpu(self):
"""
If some GPUs are available, try reserving one by checking out an exclusive redis lock.
If none available or can't get lock, sleep and check again.
"""
while True:
gpu_ind = self._get_free_gpu()
if gpu_ind is not None:
return gpu_ind
if self.verbose:
print(f'pid {os.getpid()} sleeping')
time.sleep(GPU_LOCK_TIMEOUT / 1000)
def _get_free_gpu(self):
try:
available_gpu_inds = [
gpu.index
for gpu in gpustat.GPUStatCollection.new_query()
if gpu.memory_used < 0.5 * gpu.memory_total
]
except Exception:
return [0] # Return dummy GPU index if no CUDA GPUs are installed
if available_gpu_inds:
gpu_ind = np.random.choice(available_gpu_inds)
if self.verbose:
print(f'pid {os.getpid()} picking gpu {gpu_ind}')
if self.lock_manager.lock(f'gpu_{gpu_ind}', GPU_LOCK_TIMEOUT):
return int(gpu_ind)
if self.verbose:
print(f'pid {os.getpid()} couldnt get lock')
return None
| python |
from matplotlib import pyplot as plt
from matplotlib import animation
import random
import numpy as np
from boids.flock import Flock
from boids.flight import Flight
from argparse import ArgumentParser
import yaml
import os
from nose.tools import assert_equal
from nose.tools import assert_raises
| python |
import orca
import numpy as np
from urbansim.utils import misc
def register_skim_access_variable(
column_name, variable_to_summarize, impedance_measure,
distance, skims_table, agg=np.sum, log=False):
"""
Register skim-based accessibility variable with orca.
Parameters
----------
column_name : str
Name of the orca column to register this variable as.
impedance_measure : str
Name of the skims column to use to measure inter-zone impedance.
variable_to_summarize : str
Name of the zonal variable to summarize.
distance : int
Distance to query in the skims (e.g. 30 minutes travel time).
mode_name: str
Name of the mode to query in the skims.
period: str
Period (AM, PM, OffPeak) to query in the skims.
Returns
-------
column_func : function
"""
@orca.column('zones', column_name, cache=True, cache_scope='iteration')
def column_func(zones):
df = skims_table.to_frame()
results = misc.compute_range(
df, zones.get_column(variable_to_summarize),
impedance_measure, distance, agg=agg)
if len(results) < len(zones):
results = results.reindex(zones.index).fillna(0)
# add vars from orig zone, typically not included in skims
results = results + zones[variable_to_summarize]
if log:
results = results.apply(eval('np.log1p'))
return results
return
| python |
import chainer
from chainer.dataset import dataset_mixin
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
import chainercv
from collections import defaultdict
import glob
import os
import numpy as np
import xml.etree.ElementTree as ET
class DogDataset(dataset_mixin.DatasetMixin):
def __init__(self, crop=False, size=32, use_cache=False, **kwargs):
root = '../input/all-dogs/all-dogs/'
paths = sorted(os.listdir(root))
self.crop = crop
self.size = size
self.use_cache = use_cache
if self.crop:
self._dataset = DogCropDataset()
else:
self._dataset = chainer.datasets.ImageDataset(paths, root=root)
self.idx_cache_dict = dict()
def __len__(self):
return len(self._dataset)
def get_example(self, i):
if self.crop:
if self.use_cache and i in self.idx_cache_dict:
path, label = self.idx_cache_dict[i]
img = chainercv.utils.read_image(path)
else:
img, bbox, label = self._dataset[i]
# TODO: translation
ymin, xmin, ymax, xmax = bbox
img = img[:, ymin:ymax, xmin:xmax]
if self.use_cache:
path = '/kaggle/{}.png'.format(i)
chainercv.utils.write_image(img, path)
self.idx_cache_dict[i] = (path, label)
else:
img = self._dataset[i]
label = 0
# img = chainercv.transforms.resize(img, (32, 32))
img = chainercv.transforms.scale(img, self.size, fit_short=True)
img = chainercv.transforms.random_crop(img, (self.size, self.size))
img = chainercv.transforms.random_flip(img, x_random=True)
img = (img / 128. - 1.).astype(np.float32)
img += np.random.uniform(size=img.shape, low=0., high=1. / 128)
return img, label
class DogBBoxDataset(GetterDataset):
def __init__(self):
super(DogBBoxDataset, self).__init__()
root_image = '../input/all-dogs/all-dogs/'
root_annot = '../input/annotation/Annotation/'
annots = glob.glob(root_annot + '*/*')
annots = sorted(annots)
breeds = os.listdir(root_annot)
breeds = ['-'.join(breed.split('-')[1:]) for breed in breeds]
self.names = list(set(breeds))
self.image_annot_dict = defaultdict(list)
for annot in annots:
annot_ = annot.split('/')
breed, path = annot_[:-1], annot_[-1]
self.image_annot_dict[path + '.jpg'].append(annot)
image_paths = sorted(list(self.image_annot_dict.keys()))
# no image for ../input/all-dogs/all-dogs/n02105855_2933.jpg
image_paths = [path for path in image_paths if os.path.isfile(os.path.join(root_image, path))]
self._dataset = chainer.datasets.ImageDataset(image_paths, root=root_image)
self.add_getter('image', self.get_image)
self.add_getter(('bbox', 'label'), self.get_annotation)
def __len__(self):
return len(self._dataset)
def get_image(self, i):
img = self._dataset[i]
return img
def get_annotation(self, i):
path = self._dataset._paths[i]
annots = self.image_annot_dict[path]
bbox = list()
label = list()
for annot in annots:
tree = ET.parse(annot)
root = tree.getroot()
objects = root.findall('object')
for o in objects:
bndbox = o.find('bndbox')
ymin = int(bndbox.find('ymin').text)
xmin = int(bndbox.find('xmin').text)
ymax = int(bndbox.find('ymax').text)
xmax = int(bndbox.find('xmax').text)
bbox.append((ymin, xmin, ymax, xmax))
nm = o.find('name')
label.append(self.names.index(nm.text))
bbox = np.array(bbox)
label = np.array(label)
return bbox, label
class DogCropDataset(dataset_mixin.DatasetMixin):
def __init__(self):
self.dataset = DogBBoxDataset()
self.names = self.dataset.names
self.indices = list()
self.bboxes = list()
self.labels = list()
for i in range(len(self.dataset)):
bbox, label = self.dataset.get_example_by_keys(i, (1, 2))
self.indices.append(np.ones_like(label) * i)
self.bboxes.append(bbox)
self.labels.append(label)
self.indices = np.concatenate(self.indices, axis=0)
self.bboxes = np.concatenate(self.bboxes, axis=0)
self.labels = np.concatenate(self.labels, axis=0)
def __len__(self):
return len(self.labels)
def get_example(self, i):
idx = self.indices[i]
img, = self.dataset.get_example_by_keys(idx, (0,))
bbox, label = self.bboxes[i], self.labels[i]
return img, bbox, label
| python |
#! /usr/bin/env python
import numpy as np
import cv2
import glob
import yaml
class CameraCalib :
def __init__(self,img_path='/tmp',CHESSX=8,CHESSY=6,extension=".jpg"):
"""
Initialize Camera Calibration Class
@param: img_path = [path to get images], CHESSX = [chessboard corners in X direction ]
CHESSY = [chessboard corners in Y direction]
"""
self.img_path = img_path
self.chessx = CHESSX
self.chessy = CHESSY
self.data = {}
self.file_extension = extension
def show_image(self,image,time=1000):
"""
Image Visualization for [time] msecs.
@param: image, time [in msecs]
"""
y = 540
x = 1.5*y
imS = cv2.resize(image, (int(x), y)) # Resize image
cv2.imshow("output", imS)
cv2.waitKey(time)
def calcReprojectionError(self, objpoints, imgpoints, mtx, dist, rvecs, tvecs):
mean_error = 0
for i in xrange(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
mean_error += error
print("Re-projection Error: {}".format(mean_error / len(objpoints)))
def compute(self,visualization=True,save_yaml=True):
"""
Camera calibration and camera matrix computation.
@param: visualization = [True|False] to enable imgs visualization,
save_yaml = [True|False] to save image in a yaml file.
"""
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((self.chessy*self.chessx,3), np.float32)
objp[:,:2] = np.mgrid[0:self.chessx,0:self.chessy].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(self.img_path+'/*'+self.file_extension)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# show grey image
if(visualization):
self.show_image(gray)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (self.chessx,self.chessy),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (self.chessx,self.chessy), corners2,ret)
if(visualization):
self.show_image(img)
cv2.destroyAllWindows()
# calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# transform the matrix and distortion coefficients to writable lists
self.data = {'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
self.calcReprojectionError(objpoints,imgpoints,mtx,dist,rvecs,tvecs)
# print results
print("Camera Calibration Matrix:\n",self.data)
# and save it to a file
if (save_yaml):
with open("calibration_matrix.yaml", "w") as f:
yaml.dump(self.data, f)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-cw", "--chessboard_width", required=False, default="8", help="number of intersections in x axis")
ap.add_argument("-ch", "--chessboard_height", required=False, default="6", help="number of intersections in y axis")
ap.add_argument("-sd", "--square_dimension", required=False, default="0.026", help="square dimension in meters")
ap.add_argument("-p", "--path", required=True, help="path to images folder")
ap.add_argument("-e", "--file_extension", required=False, default=".jpg", help="extension of images")
ap.add_argument("-a", "--auto_mode", required=False, default="True", \
help="automatic mode uses all images inside images folder to run calibration")
args = vars(ap.parse_args())
auto_mode = eval(args["auto_mode"])
CHESSBOARD_WIDTH = int(args["chessboard_width"])
CHESSBOARD_HEIGHT = int(args["chessboard_height"])
CALIBRATION_SQUARE_DIMENSION = float(args["square_dimension"]) # meters
# initialize class
cam_calibration = CameraCalib(img_path=args["path"],CHESSX=CHESSBOARD_WIDTH, CHESSY=CHESSBOARD_HEIGHT,extension=args["file_extension"])
# Compute Calibration
cam_calibration.compute(True)
| python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from edward.util.tensorflow import get_control_variate_coef
class test_get_control_variate_coef(tf.test.TestCase):
def test_calculate_correct_coefficient(self):
with self.test_session():
f = tf.constant([1.0, 2.0, 3.0, 4.0])
h = tf.constant([2.0, 3.0, 8.0, 1.0])
self.assertAllClose(get_control_variate_coef(f, h).eval(),
0.03448276)
if __name__ == '__main__':
tf.test.main()
| python |
# Copyright 2020 Jiang Shenghu
# SPDX-License-Identifier: Apache-2.0
from tvm import topi
from ..poly import TensorTable, Statement, ScheduleTree
from .conv import PlainConv2d, Conv2d
def schedule(**kwargs):
init_t = 'stmt_init[n, c, h, w]'
calc_t = 'stmt_calc[n, c, h, w, i, j, k]'
output_constraints = '0 <= n < batch and 0 <= c < out_channel ' \
'and 0 <= h < out_height and 0 <= w < out_width'
calc_constraints = '0 <= i < in_group_size and 0 <= j < kernel_height and 0 <= k < kernel_width'
domain = '[batch, in_channel, in_height, in_width, out_channel, out_height, out_width, ' \
'kernel_height, kernel_width, in_group_size] -> {' \
f'{init_t}: {output_constraints}; ' \
f'{calc_t}: {output_constraints} and {calc_constraints}' \
'}'
outer_schedule = '[%s]' % ', '.join(map(
lambda x: f'{{{init_t}->[({x})];{calc_t}->[({x})]}}', ('n', 'c', 'h', 'w')))
inner_schedule = '[%s]' % ', '.join(map(
lambda x: f'{{{calc_t}->[({x})]}}', ('i', 'j', 'k')))
tree = ScheduleTree.from_yaml(f'''
domain: "{domain}"
child:
schedule: "{outer_schedule}"
permutable: 1
coincident: [1, 1, 1, 1]
child:
sequence:
- filter: "{{{init_t}}}"
- filter: "{{{calc_t}}}"
child:
schedule: "{inner_schedule}"
permutable: 1
coincident: [1, 1, 1]
''')
tree.apply_params(**kwargs)
return tree
def tensors(batch=1, in_channel=1, in_height=1, in_width=1, out_channel=1,
out_height=1, out_width=1, kernel_height=1, kernel_width=1, in_group_size=1, **_):
table = TensorTable()
table.add_tensor('x', [batch, in_channel, in_height, in_width])
table.add_tensor('weight', [out_channel, in_group_size, kernel_height, kernel_width])
table.add_tensor('out', [batch, out_channel, out_height, out_width])
return table
def statements(stride_height=1, stride_width=1, in_group_size=1, out_group_size=1, **_):
def stmt_init(t, n, c, h, w):
t['out'][n, c, h, w] = 0.0
def stmt_calc(t, n, c, h, w, i, j, k):
in_offset = c // out_group_size * in_group_size
t['out'][n, c, h, w] = t['out'][n, c, h, w] \
+ t['x'][n, i + in_offset, h * stride_height + j, w * stride_width + k] \
* t['weight'][c, i, j, k]
res = {}
for f in [stmt_init, stmt_calc]:
res[f.__name__] = Statement.from_calc(f)
return res
class PlainGroupedConv2d(PlainConv2d):
required_args = PlainConv2d.required_args + ['groups']
calculated_args = {**PlainConv2d.calculated_args, **{
'in_group_size': lambda **a: a['in_channel'] // a['groups'],
'out_group_size': lambda **a: a['out_channel'] // a['groups'],
}}
schedule_factory = schedule
tensors_factory = tensors
statements_factory = statements
topi_cuda_task_name = 'group_conv2d_nchw.cuda'
def topi_cuda_args(self, x=None, weight=None, out=None):
return [x, weight, [self.stride_height, self.stride_width], 0, 1, self.groups, out.dtype]
topi_cuda_calc_func = topi.cuda.group_conv2d_nchw
topi_cuda_schedule_func = topi.cuda.schedule_group_conv2d_nchw
topi_cuda_calc_ret_map = ['out']
class GroupedConv2d(Conv2d):
def __init__(self, groups=1, **kwargs):
super().__init__(**kwargs)
op_idx = self._ops.index(self.conv)
self.conv = PlainGroupedConv2d(name=self.name + '.conv', groups=groups, **self.conv.arguments)
self.weight = self.conv.tensors['weight']
self._ops[op_idx] = self.conv
| python |
# =============================================================================
# SIMULATION-BASED ENGINEERING LAB (SBEL) - http://sbel.wisc.edu
# University of Wisconsin-Madison
#
# Copyright (c) 2020 SBEL
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# at https://opensource.org/licenses/BSD-3-Clause
#
# =============================================================================
# Contributors: Nic Olsen, Milad Rakhsha
# =============================================================================
"""
Writes contact forces to files
"""
import numpy as np
def writeforcefile(c_pos, f_contact, filename, params):
with open(filename, 'w') as file:
file.write('cx,cy,cz,fn,fu,fw\n')
if len(f_contact) != 0:
for i in range(f_contact.shape[0]):
out = [str(c_pos[i*3 + j]) for j in range(3)] + [str(f_contact[i,j]) for j in range(3)]
file.write(','.join(out) + '\n')
else:
out = [str(0.0)]*6
file.write(','.join(out) + '\n')
def writeforcefile_with_pairs(contact_pair, f_contact, phi, frame, params):
file= open(params.prefix + "force" +frame + params.suffix, 'w')
file.write('bi,bj,Fn,Ft,phi\n')
if len(f_contact) != 0:
for i in range(f_contact.shape[0]):
out = [str(contact_pair[i][j]) for j in range(2)] + [str(f_contact[i,0]),str(np.linalg.norm(f_contact[i,1:2],2))] + [str(phi[i])]
file.write(','.join(out) + '\n')
else:
pass
| python |
"""
Write a function with a list of ints as a paramter. /
Return True if any two nums sum to 0. /
>>> add_to_zero([]) /
False /
>>> add_to_zero([1]) /
False /
>>> add_to_zero([1, 2, 3]) /
False /
>>> add_to_zero([1, 2, 3, -2]) /
True /
"""
| python |
# encoding=utf-8
# A collection of regular expressions for parsing Tweet text. The regular expression
# list is frozen at load time to ensure immutability. These reular expressions are
# used throughout the Twitter classes. Special care has been taken to make
# sure these reular expressions work with Tweets in all languages.
import re, string
REGEXEN = {} # :nodoc:
# Space is more than %20, U+3000 for example is the full-width space used with Kanji. Provide a short-hand
# to access both the list of characters and a pattern suitible for use with String#split
# Taken from: ActiveSupport::Multibyte::Handlers::UTF8Handler::UNICODE_WHITESPACE
UNICODE_SPACES = []
for space in [9, 10, 11, 12, 13, 32, 133, 160, 5760, 6158, 8192, 8193, 8194, 8195, 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8232, 8233, 8239, 8287, 12288]:
UNICODE_SPACES.append(hex(space))
REGEXEN['spaces'] = re.compile(ur'|'.join(UNICODE_SPACES))
REGEXEN['at_signs'] = re.compile(ur'[%s]' % ur'|'.join(list(u'@@')))
REGEXEN['extract_mentions'] = re.compile(ur'(^|[^a-zA-Z0-9_])(%s)([a-zA-Z0-9_]{1,20})(?=(.|$))' % REGEXEN['at_signs'].pattern)
REGEXEN['extract_reply'] = re.compile(ur'^(?:[%s])*%s([a-zA-Z0-9_]{1,20})' % (REGEXEN['spaces'].pattern, REGEXEN['at_signs'].pattern))
REGEXEN['list_name'] = re.compile(ur'^[a-zA-Z\u0080-\u00ff].{0,79}$')
# Latin accented characters (subtracted 0xD7 from the range, it's a confusable multiplication sign. Looks like "x")
LATIN_ACCENTS = []
for accent in [192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 248, 249, 250, 251, 252, 253, 254, 255]:
LATIN_ACCENTS.append(hex(accent))
REGEXEN['latin_accents'] = re.compile(ur''.join(LATIN_ACCENTS))
# Characters considered valid in a hashtag but not at the beginning, where only a-z and 0-9 are valid.
HASHTAG_CHARACTERS = re.compile(ur'[a-z0-9_%s]' % REGEXEN['latin_accents'].pattern, re.IGNORECASE)
REGEXEN['auto_link_hashtags'] = re.compile(ur'(^|[^0-9A-Z&\/]+)(#|#)([0-9A-Z_]*[A-Z_]+%s*)' % HASHTAG_CHARACTERS.pattern, re.IGNORECASE)
REGEXEN['auto_link_usernames_or_lists'] = re.compile(ur'([^a-zA-Z0-9_]|^)([@@]+)([a-zA-Z0-9_]{1,20})(\/[a-zA-Z][a-zA-Z0-9\u0080-\u00ff\-]{0,79})?')
REGEXEN['auto_link_emoticon'] = re.compile(ur'(8\-\#|8\-E|\+\-\(|\`\@|\`O|\<\|:~\(|\}:o\{|:\-\[|\>o\<|X\-\/|\[:-\]\-I\-|\/\/\/\/Ö\\\\\\\\|\(\|:\|\/\)|∑:\*\)|\( \| \))')
# URL related hash regex collection
REGEXEN['valid_preceding_chars'] = re.compile(ur"(?:[^\/\"':!=]|^|\:)")
punct = re.escape(string.punctuation)
REGEXEN['valid_domain'] = re.compile(ur'(?:[^%s\s][\.-](?=[^%s\s])|[^%s\s]){1,}\.[a-z]{2,}(?::[0-9]+)?' % (punct, punct, punct), re.IGNORECASE)
REGEXEN['valid_url_path_chars'] = re.compile(ur'[\.\,]?[a-z0-9!\*\'\(\);:=\+\$\/%#\[\]\-_,~@\.]', re.IGNORECASE)
# Valid end-of-path chracters (so /foo. does not gobble the period).
# 1. Allow ) for Wikipedia URLs.
# 2. Allow =&# for empty URL parameters and other URL-join artifacts
REGEXEN['valid_url_path_ending_chars'] = re.compile(ur'[a-z0-9\)=#\/]', re.IGNORECASE)
REGEXEN['valid_url_query_chars'] = re.compile(ur'[a-z0-9!\*\'\(\);:&=\+\$\/%#\[\]\-_\.,~]', re.IGNORECASE)
REGEXEN['valid_url_query_ending_chars'] = re.compile(ur'[a-z0-9_&=#]', re.IGNORECASE)
REGEXEN['valid_url'] = re.compile(u'''
(%s)
(
(https?:\/\/|www\.)
(%s)
(/%s*%s?)?
(\?%s*%s)?
)
''' % (
REGEXEN['valid_preceding_chars'].pattern,
REGEXEN['valid_domain'].pattern,
REGEXEN['valid_url_path_chars'].pattern,
REGEXEN['valid_url_path_ending_chars'].pattern,
REGEXEN['valid_url_query_chars'].pattern,
REGEXEN['valid_url_query_ending_chars'].pattern
),
re.IGNORECASE + re.X)
# groups:
# 1 - Preceding character
# 2 - URL
# 3 - Protocol or www.
# 4 - Domain and optional port number
# 5 - URL path
# 6 - Query string
| python |
# !/usr/bin/env python
# -*-coding:utf-8 -*-
# PROJECT : algorithm_mad
# Time :2020/12/22 11:06
# Warning :The Hard Way Is Easier
import random
"""
堆排序
"""
'''堆化'''
def heapify(array, length, i):
largest = i
left = 2 * i + 1
right = 2 * i + 2
if left < length and array[largest] < array[left]:
largest = left
if right < length and array[largest] < array[right]:
largest = right
if largest != i:
array[i], array[largest] = array[largest], array[i]
heapify(array, length, largest)
'''堆排序'''
def HeapSort(array):
length = len(array)
for i in range(length, -1, -1):
heapify(array, length, i)
for i in range(length - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
return array
if __name__ == '__main__':
array = [random.randint(0, 100) for _ in range(10)]
array_sort = HeapSort(array.copy())
print('INPUT:\n%s' % ','.join([str(i) for i in array]))
print('OUTPUT:\n%s' % ','.join([str(i) for i in array_sort]))
| python |
from concurrent.futures import Future
from typing import Any, Callable, TypeVar
from threading import Lock
from amino import do, Do, IO, Map, Dat
from amino.logging import module_log
from ribosome.rpc.error import RpcReadError
from ribosome.rpc.data.rpc import ActiveRpc
A = TypeVar('A')
log = module_log()
PendingRpc = Map[int, Future]
class Requests(Dat['Requests']):
@staticmethod
def cons(current_id: int=0, to_vim: PendingRpc=Map(), from_vim: PendingRpc=Map()) -> 'Requests':
return Requests(current_id, to_vim, from_vim)
def __init__(self, current_id: int, to_vim: PendingRpc, from_vim: PendingRpc) -> None:
self.current_id = current_id
self.to_vim = to_vim
self.from_vim = from_vim
OnMessage = Callable[[bytes], IO[None]]
OnError = Callable[[RpcReadError], IO[None]]
class RpcConcurrency(Dat['RpcConcurrency']):
@staticmethod
def cons(
requests: Requests=None,
lock: Lock=None,
) -> 'RpcConcurrency':
return RpcConcurrency(
requests or Requests.cons(),
lock or Lock(),
)
def exclusive(self, f: Callable[..., IO[A]], *a: Any, **kw: Any) -> IO[A]:
def wrap() -> IO[A]:
with self.lock:
return IO.from_either(f(*a, **kw).attempt)
return IO.suspend(wrap)
def __init__(self, requests: Requests, lock: Lock) -> None:
self.requests = requests
self.lock = lock
def exclusive_unregister_rpc(rc: RpcConcurrency, requests: PendingRpc, rpc: ActiveRpc) -> IO[Future]:
return IO.delay(requests.pop, rpc.id)
def unregister_rpc(rc: RpcConcurrency, requests: PendingRpc, rpc: ActiveRpc) -> IO[Future]:
log.debug1(f'unregistering {rpc}')
return (
IO.failed(f'invalid request id from vim after execution: {rpc}. active requests: {requests}')
if rpc.id not in requests else
rc.exclusive(exclusive_unregister_rpc, rc, requests, rpc)
)
@do(IO[Future])
def exclusive_register_rpc(rc: RpcConcurrency, requests: PendingRpc, rpc: ActiveRpc) -> Do:
f: Future = Future()
yield IO.delay(requests.update, {rpc.id: f})
return f
@do(IO[Future])
def register_rpc(rc: RpcConcurrency, requests: PendingRpc, rpc: ActiveRpc) -> Do:
log.debug1(f'registering {rpc}')
yield (
IO.failed(f'duplicate id in request from vim: {rpc}')
if rpc.id in requests else
rc.exclusive(exclusive_register_rpc, rc, requests, rpc)
)
__all__ = ('Requests', 'OnMessage', 'OnError', 'RpcConcurrency', 'unregister_rpc', 'register_rpc',)
| python |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.contrib.auth.models import User
from uw_gws.utilities import fdao_gws_override
from uw_pws.util import fdao_pws_override
from uw_uwnetid.util import fdao_uwnetid_override
def get_user(username):
try:
user = User.objects.get(username=username)
return user
except Exception as ex:
user = User.objects.create_user(username, password='pass')
return user
def get_user_pass(username):
return 'pass'
| python |
from Jumpscale import j
import os
# import copy
# import sys
import inspect
import types
class JSBase:
def __init__(self, parent=None, topclass=True, **kwargs):
"""
:param parent: parent is object calling us
:param topclass: if True means no-one inherits from us
"""
self._parent = parent
self._class_init() # is needed to init class properties
if topclass:
self._init2(**kwargs)
self._init()
self._obj_cache_reset()
def _class_init(self, topclass=True):
if not hasattr(self.__class__, "_class_init_done"):
# print("_class init:%s"%self.__class__.__name__)
# only needed to execute once, needs to be done at init time, class inheritance does not exist
self.__class__._dirpath_ = "" # path of the directory hosting this class
self.__class__.__objcat_name = ""
self.__class__._cache_expiration = 3600 # expiration of the cache
self.__class__._test_runs = {}
self.__class__._test_runs_error = {}
if not hasattr(self.__class__, "_name"):
self.__class__._name = j.core.text.strip_to_ascii_dense(str(self.__class__)).split(".")[-1].lower()
# short location name:
if "__jslocation__" in self.__dict__:
self.__class__._location = self.__jslocation__
elif "__jslocation__" in self.__class__.__dict__:
self.__class__._location = self.__class__.__jslocation__
elif "__jscorelocation__" in self.__dict__:
self.__class__._location = self.__jslocation__
else:
self.__class__._location = None
parent = self._parent
while parent is not None:
if hasattr(parent, "__jslocation__"):
self.__class__._location = parent.__jslocation__
break
parent = parent._parent
if self.__class__._location is None:
self.__class__._location = self.__class__._name
# walk to all parents, let them know that there are child classes
self.__class__._class_children = []
parent = self._parent
while parent is not None:
if parent.__class__ not in parent._class_children:
parent._class_children.append(parent.__class__)
parent = parent._parent
self.__class__._methods_ = []
self.__class__._properties_ = []
self.__class__._inspected_ = False
# print("classinit_2:%s"%self.__class__)
# print(self.__class__._properties_)
self.__class__._logger_min_level = 100
self.__class__._class_init_done = True
self._key = "%s:%s" % (self.__class__._location, self.__class__._name)
# lets make sure the initial loglevel gets set
self._logger_set(children=False, parents=False)
def _logging_enable_check(self):
"""
check if logging should be disabled for current js location
according to logger includes and excludes (configured)
includes have a higher priority over excludes
will not take minlevel into consideration, its only the excludes & includes
:return: True if logging is enabled
:rtype: bool
"""
if j.core.myenv.config.get("DEBUG", False):
return True
self._key = self._key.lower()
def check(checkitems):
for finditem in checkitems:
finditem = finditem.strip().lower()
if finditem == "*":
return True
if finditem == "":
continue
if "*" in finditem:
if finditem[-1] == "*":
# means at end
if self._key.startswith(finditem[:-1]):
return True
elif finditem[0] == "*":
if self._key.endswith(finditem[1:]):
return True
else:
raise RuntimeError("find item can only have * at start or at end")
else:
if self._key == finditem:
return True
return False
if check(j.core.myenv.log_includes) and not check(j.core.myenv.log_excludes):
return True
return False
def _logger_set(self, minlevel=None, children=True, parents=True):
"""
:param min_level if not set then will use the LOGGER_LEVEL from /sandbox/cfg/jumpscale_config.toml
make sure that logging above minlevel will happen, std = 100
if 100 means will not log anything
- CRITICAL 50
- ERROR 40
- WARNING 30
- INFO 20
- STDOUT 15
- DEBUG 10
- NOTSET 0
if parents and children: will be set on all classes of the self.location e.g. j.clients.ssh (children, ...)
if minlevel specified then it will always consider the logging to be enabled
:return:
"""
if minlevel is not None or self._logging_enable_check():
# if minlevel specified we overrule anything
# print ("%s:loginit"%self.__class__._name)
if minlevel is None:
minlevel = int(j.core.myenv.config.get("LOGGER_LEVEL", 15))
if minlevel is not None or not self._logging_enable_check():
self.__class__._logger_min_level = minlevel
if parents:
parent = self._parent
while parent is not None:
parent._logger_minlevel_set(minlevel)
parent = parent._parent
if children:
for kl in self.__class__._class_children:
# print("%s:minlevel:%s"%(kl,minlevel))
kl._logger_min_level = minlevel
def _init(self):
pass
def _init2(self, **kwargs):
"""
meant to be used by developers of the base classes
:return:
"""
self._obj_cache_reset()
self._key = "%s:%s" % (
self.__class__._location,
self.__class__._name,
) # needs to be done 2, first in class init
def _obj_cache_reset(self):
"""
this empties the runtime state of an obj and the logger and the testruns
:return:
"""
self.__class__._test_runs = {}
self._cache_ = None
self._objid_ = None
for key, obj in self.__dict__.items():
del obj
@property
def _dirpath(self):
if self.__class__._dirpath_ == "":
self.__class__._dirpath_ = os.path.dirname(inspect.getfile(self.__class__))
return self.__class__._dirpath_
@property
def _objid(self):
if self._objid_ is None:
id = self.__class__._location
id2 = ""
try:
id2 = self.data.name
except:
pass
if id2 == "":
try:
if self.data.id is not None:
id2 = self.data.id
except:
pass
if id2 == "":
for item in ["instance", "_instance", "_id", "id", "name", "_name"]:
if item in self.__dict__ and self.__dict__[item]:
self._log_debug("found extra for obj_id")
id2 = str(self.__dict__[item])
break
if id2 != "":
self._objid_ = "%s_%s" % (id, id2)
else:
self._objid_ = id
return self._objid_
def _logger_enable(self):
self._logger_set(0)
@property
def _cache(self):
if self._cache_ is None:
self._cache_ = j.core.cache.get(self._objid, expiration=self._cache_expiration)
return self._cache_
def _inspect(self):
if not self.__class__._inspected_:
# print("INSPECT:%s"%self.__class__)
assert self.__class__._methods_ == []
assert self.__class__._properties_ == []
for name, obj in inspect.getmembers(self.__class__):
if inspect.ismethod(obj):
self.__class__._methods_.append(name)
# elif name.startswith("_"):
# continue
elif inspect.ismethoddescriptor(obj):
continue
elif inspect.isfunction(obj):
self.__class__._methods_.append(name)
elif inspect.isclass(obj):
self.__class__._properties_.append(name)
elif inspect.isgetsetdescriptor(obj):
continue
else:
self.__class__._properties_.append(name)
for item in self.__dict__.keys():
if item.startswith("_"):
continue
if item not in self._methods_:
self.__class__._properties_.append(item)
self.__class__._inspected_ = True
# else:
# print("not inspect:%s"%self.__class__)
def _properties(self, prefix=""):
self._inspect()
if prefix == "_":
return [
item
for item in self.__class__._properties_
if (item.startswith("_") and not item.startswith("__") and not item.endswith("_"))
]
if prefix == "":
return [item for item in self.__class__._properties_ if not item.startswith("_")]
else:
return [item for item in self.__class__._properties_ if item.startswith(prefix)]
def _methods(self, prefix=""):
self._inspect()
if prefix == "_":
return [
item
for item in self.__class__._methods_
if (item.startswith("_") and not item.startswith("__") and not item.endswith("_"))
]
if prefix == "":
return [item for item in self.__class__._methods_ if not item.startswith("_")]
else:
return [item for item in self.__class__._methods_ if item.startswith(prefix)]
def _properties_children(self):
return []
def _properties_model(self):
return []
@property
def _ddict(self):
res = {}
for key in self.__dict__.keys():
if not key.startswith("_"):
v = self.__dict__[key]
if not isinstance(v, types.MethodType):
res[key] = v
return res
################
def _print(self, msg, cat=""):
self._log(msg, cat=cat, level=15)
def _log_debug(self, msg, cat="", data=None, context=None, _levelup=1):
self._log(msg, cat=cat, level=10, data=data, context=context, _levelup=_levelup)
def _log_info(self, msg, cat="", data=None, context=None, _levelup=1):
self._log(msg, cat=cat, level=20, data=data, context=context, _levelup=_levelup)
def _log_warning(self, msg, cat="", data=None, context=None, _levelup=1):
self._log(msg, cat=cat, level=30, data=data, context=context, _levelup=_levelup)
def _log_error(self, msg, cat="", data=None, context=None, _levelup=1):
self._log(msg, cat=cat, level=40, data=data, context=context, _levelup=_levelup)
def _log_critical(self, msg, cat="", data=None, context=None, _levelup=1):
self._log(msg, cat=cat, level=50, data=data, context=context, _levelup=_levelup)
def _log(self, msg, cat="", level=10, data=None, context=None, _levelup=1):
"""
:param msg: what you want to log
:param cat: any dot notation category
:param level: level of the log
:return:
can use {RED}, {RESET}, ... see color codes
levels:
- CRITICAL 50
- ERROR 40
- WARNING 30
- INFO 20
- STDOUT 15
- DEBUG 10
"""
if j.application._in_autocomplete == 2:
raise RuntimeError("s")
if j.application._in_autocomplete:
return None
if j.application.debug or self.__class__._logger_min_level - 1 < level:
# now we will log
frame_ = inspect.currentframe().f_back
levelup = 0
while frame_ and levelup < _levelup:
frame_ = frame_.f_back
levelup += 1
fname = frame_.f_code.co_filename.split("/")[-1]
defname = frame_.f_code.co_name
linenr = frame_.f_lineno
# while obj is None and frame_:
# locals_ = frame_.f_locals
#
# if tbc2 in locals_:
# obj = locals_[tbc2]
# else:
# frame_ = frame_.f_back
# if self._location not in [None,""]:
# if not self._location.endswith(self._name):
# context = "%s:%s:%s"%(self._location,self._name,defname)
# else:
# context = "%s:%s"%(self._location,defname)
# if context=="":
# context = defname
logdict = {}
logdict["linenr"] = linenr
logdict["processid"] = j.application.appname
logdict["message"] = msg
logdict["filepath"] = fname
logdict["level"] = level
if context:
logdict["context"] = context
else:
try:
logdict["context"] = self._key
except Exception as e:
from pudb import set_trace
set_trace()
logdict["context"] = ""
pass # TODO:*1 is not good
logdict["cat"] = cat
logdict["data"] = data
if data and isinstance(data, dict):
# shallow copy the data to avoid changing the original data
hidden_data = data.copy()
if "password" in data or "secret" in data or "passwd" in data:
hidden_data["password"] = "***"
logdict["data"] = hidden_data
j.core.tools.log2stdout(logdict)
################
def _done_check(self, name="", reset=False):
if reset:
self._done_reset(name=name)
if name == "":
return j.core.db.hexists("done", self._objid)
else:
return j.core.db.hexists("done", "%s:%s" % (self._objid, name))
def _done_set(self, name="", value="1"):
if name == "":
return j.core.db.hset("done", self._objid, value)
else:
return j.core.db.hset("done", "%s:%s" % (self._objid, name), value)
def _done_get(self, name=""):
if name == "":
return j.core.db.hget("done", self._objid)
else:
return j.core.db.hget("done", "%s:%s" % (self._objid, name))
def _done_reset(self, name=""):
"""
if name =="" then will remove all from this object
:param name:
:return:
"""
if name == "":
for item in j.core.db.hkeys("done"):
item = item.decode()
# print("reset todo:%s" % item)
if item.find(self._objid) != -1:
j.core.db.hdel("done", item)
# print("reset did:%s" % item)
else:
return j.core.db.hdel("done", "%s:%s" % (self._objid, name))
def _test_error(self, name, error):
j.errorhandler.try_except_error_process(error, die=False)
self.__class__._test_runs_error[name] = error
def _test_run(self, name="", obj_key="main", die=True, **kwargs):
"""
:param name: name of file to execute can be e.g. 10_test_my.py or 10_test_my or subtests/test1.py
the tests are found in subdir tests of this file
if empty then will use all files sorted in tests subdir, but will not go in subdirs
:param obj_key: is the name of the function we will look for to execute, cannot have arguments
to pass arguments to the example script, use the templating feature, std = main
:return: result of the tests
"""
res = self.__test_run(name=name, obj_key=obj_key, die=die, **kwargs)
if self.__class__._test_runs_error != {}:
for key, e in self.__class__._test_runs_error.items():
self._log_error("ERROR FOR TEST: %s\n%s" % (key, e))
self._log_error("SOME TESTS DIT NOT COMPLETE SUCCESFULLY")
else:
self._log_info("ALL TESTS OK")
return res
def __test_run(self, name=None, obj_key="main", die=True, **kwargs):
if name == "":
name = None
if name is not None:
self._log_info("##: TEST RUN: %s" % name.upper())
if name is not None:
if name.endswith(".py"):
name = name[:-3]
tpath = "%s/tests/%s" % (self._dirpath, name)
tpath = tpath.replace("//", "/")
if not name.endswith(".py"):
tpath += ".py"
if not j.sal.fs.exists(tpath):
for item in j.sal.fs.listFilesInDir("%s/tests" % self._dirpath, recursive=False, filter="*.py"):
bname = j.sal.fs.getBaseName(item)
if "_" in bname:
bname2 = "_".join(bname.split("_", 1)[1:]) # remove part before first '_'
else:
bname2 = bname
if bname2.endswith(".py"):
bname2 = bname2[:-3]
if bname2.strip().lower() == name:
self.__test_run(name=bname, obj_key=obj_key, **kwargs)
return
return self._test_error(
name, RuntimeError("Could not find, test:%s in %s/tests/" % (name, self._dirpath))
)
self._log_debug("##: path: %s\n\n" % tpath)
else:
items = [
j.sal.fs.getBaseName(item)
for item in j.sal.fs.listFilesInDir("%s/tests" % self._dirpath, recursive=False, filter="*.py")
]
items.sort()
for name in items:
self.__test_run(name=name, obj_key=obj_key, **kwargs)
return
method = j.tools.codeloader.load(obj_key=obj_key, path=tpath)
self._log_debug("##:LOAD: path: %s\n\n" % tpath)
if die or j.application.debug:
res = method(self=self, **kwargs)
else:
try:
res = method(self=self, **kwargs)
except Exception as e:
if j.application.debug:
raise e
else:
j.errorhandler.try_except_error_process(e, die=False)
self.__class__._test_runs_error[name] = e
return e
self.__class__._test_runs[name] = res
return res
def __str__(self):
out = "## {GRAY}%s {RED}%s{BLUE} %s{RESET}\n\n" % (
self.__objcat_name,
self.__class__._location,
self.__class__.__name__,
)
def add(name, color, items, out):
if len(items) > 0:
out += "{%s}### %s:\n" % (color, name)
if len(items) < 20:
for item in items:
out += " - %s\n" % item
else:
out += " - ...\n"
out += "\n"
return out
out = add("children", "GREEN", self._properties_children(), out)
out = add("data", "YELLOW", self._properties_model(), out)
out = add("methods", "BLUE", self._methods(), out)
out = add("properties", "GRAY", self._properties(), out)
out += "{RESET}"
out = j.core.tools.text_replace(out)
print(out)
# TODO: *1 dirty hack, the ansi codes are not printed, need to check why
return ""
__repr__ = __str__
| python |
# -*- coding: utf-8 -*-
"""
Protocol implementation for `Tokyo Tyrant <http://1978th.net/tokyotyrant/>`_.
Let's assume some defaults for our sandbox::
>>> TEST_HOST = '127.0.0.1'
>>> TEST_PORT = 1983 # default port is 1978
"""
import math
import socket
import struct
import exceptions
# Pyrant constants
MAGIC_NUMBER = 0xc8
ENCODING = 'UTF-8'
ENCODING_ERROR_HANDLING = 'strict' # set to 'replace' or 'ignore' if needed
# Table Types
DB_BTREE = 'B+ tree'
DB_TABLE = 'table'
DB_MEMORY = 'on-memory hash'
DB_HASH = 'hash'
TABLE_COLUMN_SEP = '\x00'
def _ulen(expr):
"Returns length of the string in bytes."
return len(expr.encode(ENCODING)) if isinstance(expr, unicode) else len(expr)
def _pack(code, *args):
# Craft string that we'll use to send data based on args type and content
buf = ''
fmt = '>BB'
largs = []
for arg in args:
if isinstance(arg, int):
fmt += 'I'
largs.append(arg)
elif isinstance(arg, str):
buf += arg
elif isinstance(arg, unicode):
buf += arg.encode(ENCODING)
elif isinstance(arg, long):
fmt += 'Q'
largs.append(arg)
elif isinstance(arg, (list, tuple)):
for v in arg:
if isinstance(v, unicode):
v = v.encode(ENCODING)
else:
v = str(v)
buf += "%s%s" % (struct.pack(">I", len(v)), v)
return "%s%s" % (struct.pack(fmt, MAGIC_NUMBER, code, *largs), buf)
class _TyrantSocket(object):
"""
Socket logic. We use this class as a wrapper to raw sockets.
"""
def __init__(self, host, port, timeout=None):
self._sock = socket.socket()
if not timeout is None:
self._sock.settimeout(timeout)
self._sock.connect((host, port))
self._sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
def __del__(self):
self._sock.close()
def send(self, *args, **kwargs):
"""
Packs arguments and sends the buffer to the socket.
"""
sync = kwargs.pop('sync', True)
# Send message to socket, then check for errors as needed.
self._sock.sendall(_pack(*args))
if not sync:
return
fail_code = ord(self.get_byte())
if fail_code:
raise exceptions.get_for_code(fail_code)
def recv(self, bytes):
"""
Retrieves given number of bytes from the socket and returns them as
string.
"""
d = ''
while len(d) < bytes:
c = self._sock.recv(min(8192, bytes - len(d)))
if not c:
raise socket.error('server disconnected unexpectedly') # pragma: nocover
d += c
return d
def get_byte(self):
"""
Retrieves one byte from the socket and returns it.
"""
return self.recv(1)
def get_int(self):
"""
Retrieves an integer (4 bytes) from the socket and returns it.
"""
return struct.unpack('>I', self.recv(4))[0]
def get_long(self):
"""
Retrieves a long integer (8 bytes) from the socket and returns it.
"""
return struct.unpack('>Q', self.recv(8))[0]
def get_str(self):
"""
Retrieves a string (n bytes, which is an integer just before string)
from the socket and returns it.
"""
return self.recv(self.get_int())
def get_unicode(self):
"""
Retrieves a unicode string from the socket and returns it. This method
uses :meth:`get_str`, which in turn makes use of :meth:`get_int`.
"""
return self.get_str().decode(ENCODING, ENCODING_ERROR_HANDLING)
def get_double(self):
"""
Retrieves two long integers (16 bytes) from the socket and returns them.
"""
intpart, fracpart = struct.unpack('>QQ', self.recv(16))
return intpart + (fracpart * 1e-12)
def get_strpair(self):
"""
Retrieves a pair of strings (n bytes, n bytes which are 2 integers just
before the pair) and returns them as a tuple of strings.
"""
klen = self.get_int()
vlen = self.get_int()
return self.recv(klen), self.recv(vlen)
class TyrantProtocol(object):
"""
A straightforward implementation of the Tokyo Tyrant protocol. Provides all
low level constants and operations. Provides a level of abstraction that is
just enough to communicate with server from Python using Tyrant API.
More sophisticated APIs can be built on top of this class. Two of them are
included in pyrant: the dict-like API (:class:`~pyrant.Pyrant`) and the
query API (:class:`~pyrant.query.Query`).
Let's connect to a sanbdox Tyrant server::
>>> from pyrant import protocol
>>> p = protocol.TyrantProtocol(host=TEST_HOST, port=TEST_PORT)
# remove anything that could be left from previous time
>>> p.vanish()
# make sure there are zero records in the database
>>> p.rnum()
0
"""
# Protocol commands
PUT = 0x10
PUTKEEP = 0x11
PUTCAT = 0x12
PUTSHL = 0x13
PUTNR = 0x18
OUT = 0x20
GET = 0x30
MGET = 0x31
VSIZ = 0x38
ITERINIT = 0x50
ITERNEXT = 0x51
FWMKEYS = 0x58
ADDINT = 0x60
ADDDOUBLE = 0x61
EXT = 0x68
SYNC = 0x70
VANISH = 0x72
COPY = 0x73
RESTORE = 0x74
SETMST = 0x78
RNUM = 0x80
SIZE = 0x81
STAT = 0x88
MISC = 0x90
# Query conditions
RDBQCSTREQ = 0 # string is equal to
RDBQCSTRINC = 1 # string is included in
RDBQCSTRBW = 2 # string begins with
RDBQCSTREW = 3 # string ends with
RDBQCSTRAND = 4 # string includes all tokens in
RDBQCSTROR = 5 # string includes at least one token in
RDBQCSTROREQ = 6 # string is equal to at least one token in
RDBQCSTRRX = 7 # string matches regular expressions of
RDBQCNUMEQ = 8 # number is equal to
RDBQCNUMGT = 9 # number is greater than
RDBQCNUMGE = 10 # number is greater than or equal to
RDBQCNUMLT = 11 # number is less than
RDBQCNUMLE = 12 # number is less than or equal to
RDBQCNUMBT = 13 # number is between two tokens of
RDBQCNUMOREQ = 14 # number is equal to at least one token in
RDBQCFTSPH = 15 # full-text search with the phrase of
RDBQCFTSAND = 16 # full-text search with all tokens in
RDBQCFTSOR = 17 # full-text search with at least one token in
RDBQCFTSEX = 18 # full-text search with the compound expression of
RDBQCNEGATE = 1 << 24 # negation flag
RDBQCNOIDX = 1 << 25 # no index flag
# Order types
RDBQOSTRASC = 0 # string ascending
RDBQOSTRDESC = 1 # string descending
RDBQONUMASC = 2 # number ascending
RDBQONUMDESC = 3 # number descending
# Operation types
TDBMSUNION = 0 # union
TDBMSISECT = 1 # intersection
TDBMSDIFF = 2 # difference
# Miscellaneous operation options
RDBMONOULOG = 1 # omission of update log
# Scripting extension options
RDBXOLCKREC = 1 # record locking
RDBXOLCKGLB = 2 # global locking
# Index types (for table databases)
TDBITLEXICAL = 0 # lexical string
TDBITDECIMAL = 1 # decimal string
TDBITTOKEN = 2 # token inverted index
TDBITQGRAM = 3 # q-gram inverted index
TDBITOPT = 9998 # optimize index
TDBITVOID = 9999 # remove index
TDBITKEEP = 1 << 24 # keep existing index
def __init__(self, host, port, timeout=None):
# connect to the remote database
self._sock = _TyrantSocket(host, port, timeout)
# expose connection info (not used internally)
self.host = host
self.port = port
def put(self, key, value):
"""
Unconditionally sets key to value::
>>> p.put(u'foo', u'bar\x00baz')
>>> p.rnum()
1
>>> p.put('fox', u'box\x00quux')
>>> p.rnum()
2
"""
self._sock.send(self.PUT, _ulen(key), _ulen(value), key, value)
def putkeep(self, key, value):
"""
Sets key to value if key does not already exist.
"""
self._sock.send(self.PUTKEEP, _ulen(key), _ulen(value), key, value)
def putcat(self, key, value):
"""
Appends value to the existing value for key, or sets key to value if it
does not already exist.
"""
self._sock.send(self.PUTCAT, _ulen(key), _ulen(value), key, value)
def putshl(self, key, value, width):
"""
Equivalent to::
self.putcat(key, value)
self.put(key, self.get(key)[-width:])
"""
self._sock.send(self.PUTSHL, _ulen(key), _ulen(value), width, key,
value)
def putnr(self, key, value):
"""
Sets key to value without waiting for a server response.
"""
self._sock.send(self.PUTNR, _ulen(key), _ulen(value), key, value,
sync=False)
def out(self, key):
"""
Removes key from server.
"""
self._sock.send(self.OUT, _ulen(key), key)
def genuid(self):
"""
Generates and returns a unique primary key. Raises `ValueError` if the
database could not return sensible data.
"""
res = self.misc('genuid', [])
if not len(res) == 1 or not res[0]:
raise ValueError('Could not generate primary key: %s' % repr(res)) # pragma: nocover
return res[0]
def get(self, key, literal=False):
"""
Returns the value of `key` as stored on the server::
>>> p.get(u'foo')
u'bar\x00baz'
>>> p.get(u'fox')
u'box\x00quux'
"""
self._sock.send(self.GET, _ulen(key), key)
return self._sock.get_str() if literal else self._sock.get_unicode()
def getint(self, key):
"""
Returns an integer for given `key`. Value must be set by
:meth:`~pyrant.protocol.TyrantProtocol.addint`.
"""
return self.addint(key)
def getdouble(self, key):
"""
Returns a double for given key. Value must be set by
:meth:`~adddouble`.
"""
return self.adddouble(key)
def mget(self, keys):
"""
Returns key,value pairs from the server for the given list of keys::
>>> p.mget(['foo', 'fox'])
[('foo', 'bar\x00baz'), ('fox', 'box\x00quux')]
"""
self._sock.send(self.MGET, len(keys), keys)
numrecs = self._sock.get_int()
return [self._sock.get_strpair() for i in xrange(numrecs)]
def vsiz(self, key):
"""
Returns the size of a value for given key.
"""
self._sock.send(self.VSIZ, _ulen(key), key)
return self._sock.get_int()
def iterinit(self):
"""
Begins iteration over all keys of the database.
>>> p.iterinit() # now we can call iternext()
"""
self._sock.send(self.ITERINIT)
def iternext(self):
"""
Returns the next key after ``iterinit`` call. Raises an exception which
is subclass of :class:`~pyrant.protocol.TyrantError` on iteration end::
>>> p.iternext() # assume iterinit() was already called
u'foo'
>>> p.iternext()
u'fox'
>>> p.iternext()
Traceback (most recent call last):
...
InvalidOperation
"""
self._sock.send(self.ITERNEXT)
return self._sock.get_unicode()
def fwmkeys(self, prefix, maxkeys=-1):
"""
Get up to the first maxkeys starting with prefix
"""
self._sock.send(self.FWMKEYS, _ulen(prefix), maxkeys, prefix)
numkeys = self._sock.get_int()
return [self._sock.get_unicode() for i in xrange(numkeys)]
def addint(self, key, num=0):
"""
Adds given integer to existing one. Stores and returns the sum.
"""
self._sock.send(self.ADDINT, _ulen(key), num, key)
return self._sock.get_int()
def adddouble(self, key, num=0.0):
"""
Adds given double to existing one. Stores and returns the sum.
"""
fracpart, intpart = math.modf(num)
fracpart, intpart = int(fracpart * 1e12), int(intpart)
self._sock.send(self.ADDDOUBLE, _ulen(key), long(intpart),
long(fracpart), key)
return self._sock.get_double()
def ext(self, func, opts, key, value):
"""
Calls ``func(key, value)`` with ``opts``.
:param opts: a bitflag that can be `RDBXOLCKREC` for record locking
and/or `RDBXOLCKGLB` for global locking.
"""
self._sock.send(self.EXT, len(func), opts, _ulen(key), _ulen(value),
func, key, value)
return self._sock.get_unicode()
def sync(self): # TODO: better documentation (why would someone need this?)
"""
Synchronizes the updated contents of the remote database object with the
file and the device.
"""
self._sock.send(self.SYNC)
def vanish(self):
"""
Removes all records from the database.
"""
self._sock.send(self.VANISH)
def copy(self, path):
"""
Hot-copies the database to given path.
"""
self._sock.send(self.COPY, _ulen(path), path)
def restore(self, path, msec):
"""
Restores the database from `path` at given timestamp (in `msec`).
"""
self._sock.send(self.RESTORE, _ulen(path), msec, path)
def setmst(self, host, port):
"""
Sets master to `host`:`port`.
"""
self._sock.send(self.SETMST, len(host), port, host)
def rnum(self):
"""
Returns the number of records in the database.
"""
self._sock.send(self.RNUM)
return self._sock.get_long()
def add_index(self, name, kind=None, keep=False):
"""
Sets index on given column. Returns `True` if index was successfully
created.
:param name: column name for which index should be set.
:param kind: index type, one of: `lexical`, `decimal`, `token`,
`q-gram`.
:param keep: if True, index is only created if it did not yet exist.
Default is False, i.e. any existing index is reset.
.. note:: we have chosen not to mimic the original API here because it
is a bit too confusing. Instead of a single cumbersome function
Pyrant provides three: :meth:`~add_index`, :meth:`~optimize_index`
and :meth:`~drop_index`. They all do what their names suggest.
"""
# TODO: replace "kind" with keyword arguments
TYPES = {
'lexical': self.TDBITLEXICAL,
'decimal': self.TDBITDECIMAL,
'token': self.TDBITTOKEN,
'q-gram': self.TDBITQGRAM,
}
kind = 'lexical' if kind is None else kind
assert kind in TYPES, 'unknown index type "%s"' % kind
type_code = TYPES[kind]
if keep:
type_code |= self.TDBITKEEP
try:
self.misc('setindex', [name, type_code])
except exceptions.InvalidOperation:
return False
else:
return True
def optimize_index(self, name):
"""
Optimizes index for given column. Returns `True` if the operation was
successfully performed. In most cases the operation fails when the
index does not exist. You can add index using :meth:`~add_index`.
"""
try:
self.misc('setindex', [name, self.TDBITOPT])
except exceptions.InvalidOperation:
return False
else:
return True
def drop_index(self, name):
"""
Removes index for given column. Returns `True` if the operation was
successfully performed. In most cases the operation fails when the
index doesn't exist. You can add index using :meth:`~add_index`.
"""
try:
self.misc('setindex', [name, self.TDBITVOID])
except exceptions.InvalidOperation:
return False
else:
return True
def size(self):
"""
Returns the size of the database in bytes.
"""
self._sock.send(self.SIZE)
return self._sock.get_long()
def stat(self):
"""
Returns some statistics about the database.
"""
self._sock.send(self.STAT)
return self._sock.get_unicode()
def search(self, conditions, limit=10, offset=0,
order_type=0, order_column=None, opts=0,
ms_conditions=None, ms_type=None, columns=None,
out=False, count=False, hint=False):
"""
Returns list of keys for elements matching given ``conditions``.
:param conditions: a list of tuples in the form ``(column, op, expr)``
where `column` is name of a column and `op` is operation code (one of
TyrantProtocol.RDBQC[...]). The conditions are implicitly combined
with logical AND. See `ms_conditions` and `ms_type` for more complex
operations.
:param limit: integer. Defaults to 10.
:param offset: integer. Defaults to 0.
:param order_column: string; if defined, results are sorted by this
column using default or custom ordering method.
:param order_type: one of TyrantProtocol.RDBQO[...]; if defined along
with `order_column`, results are sorted by the latter using given
method. Default is RDBQOSTRASC.
:param opts: a bitflag (see
:meth:`~pyrant.protocol.TyrantProtocol.misc`
:param ms_conditions: MetaSearch conditions.
:param ms_type: MetaSearch operation type.
:param columns: iterable; if not empty, returns only given columns for
matched records.
:param out: boolean; if True, all items that correspond to the query are
deleted from the database when the query is executed.
:param count: boolean; if True, the return value is the number of items
that correspond to the query.
:param hint: boolean; if True, the hint string is added to the return
value.
"""
# TODO: split this function into separate functions if they return
# different results:
#
# - search = misc('search', []) --> list of keys
# - searchget = misc('search', ['get']) --> list of items
# - searchout = misc('search', ['out']) --> boolean
# - searchcount = misc('search', ['count']) --> integer
#
# Some functions should be of course left as keywords for the
# above-mentioned functions:
#
# - addcond = misc('search', ['addcond...'])
# - setorder = misc('search', ['setorder...'])
# - setlimit = misc('search', ['setlimit...'])
# - hint = misc('search', ['hint'])
# - metasearch stuff, including functions 'mstype', 'addcond' and 'next'.
#
# See http://1978th.net/tokyotyrant/spex.html#tcrdbapi
# sanity check
assert limit is None or 0 <= limit, 'wrong limit value "%s"' % limit
assert offset is None or 0 <= offset, 'wrong offset value "%s"' % offset
if offset and not limit:
# this is required by TDB API. Could be worked around somehow?
raise ValueError('Offset cannot be specified without limit.')
assert ms_type in (None, self.TDBMSUNION, self.TDBMSISECT, self.TDBMSDIFF)
assert order_type in (self.RDBQOSTRASC, self.RDBQOSTRDESC,
self.RDBQONUMASC, self.RDBQONUMDESC)
# conditions
args = ['addcond\x00%s\x00%d\x00%s' % cond for cond in conditions]
# MetaSearch support (multiple additional queries, one Boolean operation)
if ms_type is not None and ms_conditions:
args += ['mstype\x00%s' % ms_type]
for conds in ms_conditions:
args += ['next']
args += ['addcond\x00%s\x00%d\x00%s' % cond for cond in conds]
# return only selected columns
if columns:
args += ['get\x00%s' % '\x00'.join(columns)]
# set order in query
if order_column:
args += ['setorder\x00%s\x00%d' % (order_column, order_type)]
# set limit and offset
if limit: # and 0 <= offset:
# originally this is named setlimit(max,skip).
# it is *not* possible to specify offset without limit.
args += ['setlimit\x00%d\x00%d' % (limit, offset)]
# drop all records yielded by the query
if out:
args += ['out']
if count:
args += ['count']
if hint:
args += ['hint']
return self.misc('search', args, opts)
def misc(self, func, args, opts=0):
"""
Executes custom function.
:param func: the function name (see below)
:param opts: a bitflag (see below)
Functions supported by all databases:
* `putlist` stores records. It receives keys and values one after
the other, and returns an empty list.
* `outlist` removes records. It receives keys, and returns
an empty list.
* `getlist` retrieves records. It receives keys, and returns values.
Functions supported by the table database (in addition to mentioned above):
* `setindex`
* `search`
* `genuid`.
Possible options:
* :const:`TyrantProtocol.RDBMONOULOG` to prevent writing to the update log.
"""
try:
self._sock.send(self.MISC, len(func), opts, len(args), func, args)
finally:
numrecs = self._sock.get_int()
return [self._sock.get_unicode() for i in xrange(numrecs)]
| python |
import h2o
h2o.init()
weather_hex = h2o.import_file("weather.csv")
# To see a brief summary of the data, run the following command.
weather_hex.describe()
| python |
from Tkinter import Tk, Label, Button
def update_label():
global n
n += 1
l["text"] = "Number of clicks: %d" % n
w = Tk()
n = 0
l = Label(w, text="There have been no clicks yet")
l.pack()
Button(w, text="click me", command=update_label).pack()
w.mainloop()
| python |
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utility methods used for parsing timestamps and datetimes from Discord."""
from __future__ import annotations
__all__: typing.List[str] = [
"DISCORD_EPOCH",
"datetime_to_discord_epoch",
"discord_epoch_to_datetime",
"unix_epoch_to_datetime",
"Intervalish",
"timespan_to_int",
"local_datetime",
"utc_datetime",
"monotonic",
"monotonic_ns",
"uuid",
]
import datetime
import time
import typing
import uuid as uuid_
Intervalish = typing.Union[int, float, datetime.timedelta]
"""Type hint representing a naive time period or time span.
This is a type that is like an interval of some sort.
This is an alias for `typing.Union[int, float, datetime.datetime]`,
where `builtins.int` and `builtins.float` types are interpreted as a number of seconds.
"""
DISCORD_EPOCH: typing.Final[int] = 1_420_070_400
"""Discord epoch used within snowflake identifiers.
This is defined as the number of seconds between
`1/1/1970 00:00:00 UTC` and `1/1/2015 00:00:00 UTC`.
References
----------
* [Discord API documentation - Snowflakes](https://discord.com/developers/docs/reference#snowflakes)
"""
# Default to the standard lib parser, that isn't really ISO compliant but seems
# to work for what we need.
def slow_iso8601_datetime_string_to_datetime(datetime_str: str) -> datetime.datetime:
"""Parse an ISO-8601-like datestring into a datetime.
Parameters
----------
datetime_str : builtins.str
The date string to parse.
Returns
-------
datetime.datetime
The corresponding date time.
"""
if datetime_str.endswith(("z", "Z")):
# Python's parser cannot handle zulu time, it isn't a proper ISO-8601 compliant parser.
datetime_str = datetime_str[:-1] + "+00:00"
return datetime.datetime.fromisoformat(datetime_str)
fast_iso8601_datetime_string_to_datetime: typing.Optional[typing.Callable[[str], datetime.datetime]]
try:
# CISO8601 is around 600x faster than modules like dateutil, which is
# going to be noticeable on big bots where you are parsing hundreds of
# thousands of "joined_at" fields on users on startup.
import ciso8601
# Discord appears to actually use RFC-3339, which isn't a true ISO-8601 implementation,
# but somewhat of a subset with some edge cases.
# See https://tools.ietf.org/html/rfc3339#section-5.6
fast_iso8601_datetime_string_to_datetime = ciso8601.parse_rfc3339
except ImportError:
fast_iso8601_datetime_string_to_datetime = None
iso8601_datetime_string_to_datetime: typing.Callable[[str], datetime.datetime] = (
fast_iso8601_datetime_string_to_datetime or slow_iso8601_datetime_string_to_datetime
)
def discord_epoch_to_datetime(epoch: int, /) -> datetime.datetime:
"""Parse a Discord epoch into a `datetime.datetime` object.
Parameters
----------
epoch : builtins.int
Number of milliseconds since `1/1/2015 00:00:00 UTC`.
Returns
-------
datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
"""
return datetime.datetime.fromtimestamp(epoch / 1_000 + DISCORD_EPOCH, datetime.timezone.utc)
def datetime_to_discord_epoch(timestamp: datetime.datetime) -> int:
"""Parse a `datetime.datetime` object into an `builtins.int` `DISCORD_EPOCH` offset.
Parameters
----------
timestamp : datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
Returns
-------
builtins.int
Number of milliseconds since `1/1/2015 00:00:00 UTC`.
"""
return int((timestamp.timestamp() - DISCORD_EPOCH) * 1_000)
def unix_epoch_to_datetime(epoch: typing.Union[int, float], /, *, is_millis: bool = True) -> datetime.datetime:
"""Parse a UNIX epoch to a `datetime.datetime` object.
!!! note
If an epoch that's outside the range of what this system can handle,
this will return `datetime.datetime.max` if the timestamp is positive,
or `datetime.datetime.min` otherwise.
Parameters
----------
epoch : typing.Union[builtins.int, builtins.float]
Number of seconds/milliseconds since `1/1/1970 00:00:00 UTC`.
is_millis : builtins.bool
`builtins.True` by default, indicates the input timestamp is measured in
milliseconds rather than seconds
Returns
-------
datetime.datetime
Number of seconds since `1/1/1970 00:00:00 UTC`.
"""
# Datetime seems to raise an OSError when you try to convert an out of range timestamp on Windows and a ValueError
# if you try on a UNIX system so we want to catch both.
try:
epoch /= (is_millis * 1_000) or 1
return datetime.datetime.fromtimestamp(epoch, datetime.timezone.utc)
except (OSError, ValueError):
if epoch > 0:
return datetime.datetime.max
else:
return datetime.datetime.min
def timespan_to_int(value: Intervalish, /) -> int:
"""Cast the given timespan in seconds to an integer value.
Parameters
----------
value : Intervalish
The number of seconds.
Returns
-------
builtins.int
The integer number of seconds. Fractions are discarded. Negative values
are removed.
"""
if isinstance(value, datetime.timedelta):
value = value.total_seconds()
return int(max(0, value))
def local_datetime() -> datetime.datetime:
"""Return the current date/time for the system's time zone."""
return utc_datetime().astimezone()
def utc_datetime() -> datetime.datetime:
"""Return the current date/time for UTC (GMT+0)."""
return datetime.datetime.now(tz=datetime.timezone.utc)
# time.monotonic_ns is no slower than time.monotonic, but is more accurate.
# Also, fun fact that monotonic_ns appears to be 1µs faster on average than
# monotonic on ARM64 architectures, but on x86, monotonic is around 1ns faster
# than monotonic_ns. Just thought that was kind of interesting to note down.
# (RPi 3B versus i7 6700)
# time.perf_counter and time.perf_counter_ns don't have proper typehints, causing
# pdoc to not be able to recognise them. This is just a little hack around that.
def monotonic() -> float:
"""Performance counter for benchmarking.""" # noqa: D401 - Imperative mood
return time.perf_counter()
def monotonic_ns() -> int:
"""Performance counter for benchmarking as nanoseconds.""" # noqa: D401 - Imperative mood
return time.perf_counter_ns()
def uuid() -> str:
"""Generate a unique UUID (1ns precision)."""
return uuid_.uuid1(None, monotonic_ns()).hex
| python |
import json
import os
import copy
__author__ = 'nekmo'
class Field(object):
def __call__(self, value):
return self.parse(value)
def parse(self, value):
raise NotImplementedError
class IntegerField(Field):
def parse(self, value):
return int(value)
class BooleanField(Field):
def parse(self, value):
return bool(value)
class BaseParser(object):
_key = None # Si el padre es un diccionario, el key del mismo
_parent = None # El elemento padre
parser = None
config = None
def save(self):
self.config.save()
class ListParser(list, BaseParser):
def __init__(self, parser=None, data=None, config=None):
"""
:param parser: Con qué parseador se debe parsear cada elemento
:param data: Datos con los que poblar los elementos
:param config: Config raíz para poder usar método save()
:return:
"""
super().__init__()
# TODO: debería validarse cada elemento de data
self.extend(data or [])
class DictParser(dict, BaseParser):
schema = None
default = None
def __init__(self, parser=None, data=None, config=None):
self.config = config
super().__init__()
if data:
self.update(data)
self.default = self.default
def __getattr__(self, item):
if item in self:
return self[item]
elif item in (self.default or {}) and item in self.schema:
return self.parse_schema_element(item, copy.deepcopy(self.default[item]))
return self.__getattribute__(item)
def parse_schema(self, data):
new_data = {}
for key, value in data.items():
new_data[key] = self.parse_schema_element(key, value)
return new_data
def parse_schema_element(self, key, value):
parser = self.parser or self.schema[key]
if isinstance(parser, Field):
return parser(value)
else:
element = parser(data=value, config=self.config)
element._key = key
element._parent = self
return element
def update(self, E=None, **F):
new_data = self.parse_schema(E)
return super(DictParser, self).update(new_data, **F)
class Config(DictParser):
is_loaded = False
default = None
def __init__(self, config_file, default=None):
super().__init__()
self.config_file = config_file
self.default = default or self.default or {}
def __setitem__(self, key, value):
self.load()
return super(Config, self).__setitem__(key, value)
def __getitem__(self, item):
self.load()
return super(Config, self).__getitem__(item)
def __delitem__(self, key):
self.load()
return super(Config, self).__delitem__(key)
def __getattr__(self, item):
if item in ['is_loaded']:
return self.__getattribute__(item)
self.load()
if item in self:
return self[item]
return self.__getattribute__(item)
def load(self):
if self.is_loaded:
return
self.is_loaded = True
self.clear()
if os.path.exists(self.config_file):
self.update(json.load(open(self.config_file, 'r')))
else:
default = copy.deepcopy(self.default)
self.save(default)
self.update(default)
return self
def save(self, data=None):
config_dir = os.path.dirname(self.config_file)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
json.dump(data or self, open(self.config_file, 'w'))
| python |
import logging.config
import os
class Config(object):
SERVER_NAME = '127.0.0.1:5000'
LOGGING_CONFIG_FILE = 'logging-config.ini'
@classmethod
def init_app(cls, app):
logging_config_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), cls.LOGGING_CONFIG_FILE))
logging.config.fileConfig(logging_config_path)
class DevelopmentConfig(Config):
DEBUG = True
config_map = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
| python |
"""
InputReader
--------------------------------------------------
Input Reader that loads previous output files
"""
import yaml
import json
def load_previous_outputs_as_inputs(file_paths: list) -> dict:
print("Start loading input files...")
previous_records = {}
for file_path in file_paths:
print("Loading {}...".format(file_path))
# start reading files
data = None
# try yaml and json
input_stream = None
try:
input_stream = open(file_path)
data = yaml.safe_load(input_stream)
print("{} successfully loaded as yaml file.".format(file_path))
input_stream.close()
except yaml.YAMLError:
if input_stream:
input_stream.close()
data = None
if not data:
try:
input_stream = open(file_path)
data = json.load(input_stream)
print("{} successfully loaded as json file.".format(file_path))
input_stream.close()
except json.JSONDecodeError:
if input_stream:
input_stream.close()
data = None
if not data or not isinstance(data, dict):
print("Loading {} failed both in yaml and json. Skipped.".format(file_path))
continue
# read data into dict and merge data if necessary
for user_dict in data["results"]:
if user_dict["owner__username"] in previous_records:
to_merge_user_object = previous_records[user_dict["owner__username"]]
# iterate all repos in data
for repo_object in user_dict["repos"]:
# update to the latest scanned ones
repo_name = repo_object["repo__name"]
if repo_name in to_merge_user_object["repos"]:
if repo_object["date"] > \
to_merge_user_object["repos"][repo_name]["date"]:
to_merge_user_object["repos"][repo_name]["date"] = \
repo_object["date"]
to_merge_user_object["repos"][repo_name]["status"] = repo_object["status"]
# or add the repos if no collision
else:
to_merge_user_object["repos"][repo_name] = {
**repo_object
}
else:
previous_records[user_dict["owner__username"]] = {
**user_dict,
"repos": {
repo_object["repo__name"]: {**repo_object} for repo_object in user_dict["repos"]
}
}
print("Inputs loading finished.")
return previous_records
| python |
from robo_ai.resources.assistants import AssistantsResource
from robo_ai.resources.client_resource import ClientResource
from robo_ai.resources.oauth import OauthResource
class BaseResource(ClientResource):
def _register_resources(self):
self._add_resource('assistants', AssistantsResource)
self._add_resource('oauth', OauthResource)
@property
def assistants(self) -> AssistantsResource:
return self._get_resource('assistants')
@property
def oauth(self) -> OauthResource:
return self._get_resource('oauth')
| python |
# sdspy
import configparser
import datetime
import json
from performance_counters import PerformanceCounters as PC
from sds_client import SdsClient
from sds_stream import SdsStream
from sds_type import SdsType
from sds_type_code import SdsTypeCode
from sds_type_data import SdsTypeData
from sds_type_property import SdsTypeProperty
import time
import xml.etree.ElementTree
import xml
| python |
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import pickle
import operator
from random import randint
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import data_io.settings as Settings
from data_io.testdata import sliding_window
from . import utils
import cv2 as cv
from math import ceil
import logging
def get_mean_accuracy(accuracies):
# average accuracy is only calculated over the testing results which is index 2
testingAccuracies = [i[2] for i in accuracies]
return float(sum(testingAccuracies)) / float(len(accuracies))
class ModelTester(object):
"""Class to test and evaluate models."""
def __init__(self, classifier, transformation=None, size=(-1,-1), transformationBack=None):
"""
Instantiates model tester.
Keyword arguments:
classifier -- reference to the model.
transformation -- optional method for image transformation before prediction
size -- desired image size. Default: (-1, -1) means do not change size
transformationBack -- optional method for the transformation of the prediction image format back to a displayable format
"""
self.classifier = classifier
self.transformation = transformation # method to transform the data (needed for NNs)
self.transformationBack = transformationBack # since the TestData module applies the transformation we have to reverse the transformation on the images to display them.
self.size = size
def __yield_image_predictions(self, segmentIndex, classes=None, maxNumberOfImages=-1, shuffle=False, slidingWindow=False, slidingWindowSize=(300, 300), slidingWindowStride=64):
"""
Calls the predict method for each image and returns the result of the prediction.
Keyword arguments:
segmentsIndex -- Index of the segment to test.
classes -- List of classes to test. Default: Test all classes
maxNumberOfImages -- number of images to test. Default: use all
shuffle -- reshuffle images
slidingWindow -- test sliding window
slidingWindowSize -- size of the sliding window. Default: (300, 300) Pixels
slidingWindowStride -- stride of the sliding window. Default: 64 Pixels
Returns:
Generator((class_, prediction, img)) := (Class Name, prediction, image that was tested)
"""
if classes is None:
classes = self.classifier.testData.classes
if shuffle:
self.classifier.testData.reshuffle_segment_data(segmentIndex)
prevRandomSamplingStatus = Settings.E_RANDOM_SAMPLE
for class_ in classes:
# load test images for this class and predict
predictions = []
for img, _ in self.classifier.testData.load_data(segmentIndex, classes=[class_], grayscale=self.classifier.grayscale, outputActions=False, maxNumberOfImagesPerClass=maxNumberOfImages, size=self.size, transformation=self.transformation, resolutionSize=self.classifier.imageSize):
# classifier tester expects a list in the form of [(class_, [predictions])]
if slidingWindow:
# prevent random sampling
Settings.E_RANDOM_SAMPLE = False
voteDict = {cls: 0 for cls in classes}
slVis = np.copy(img)
# is slVis grayscale?
if self.classifier.grayscale:
slVis = cv.cvtColor(slVis, cv.COLOR_GRAY2BGR)
for roi, slImg in sliding_window(img, slidingWindowSize, slidingWindowStride):
p = self.classifier.predict([slImg])
if p is None:
continue
# outputs the class with highest confidence
p = p[0][0]
voteDict[p] += 1
# overlay imagePart if correct class
if p == class_:
slVis = roi.overlay_rectangle(slVis)
cv.imwrite(self.classifier.modelSaver.get_save_path_for_visualizations() + "slidingWindow/{0}.jpg".format(class_), slVis)
print "Sliding Window prediction for class {0} Votes:\n{1}\n\n".format(class_, voteDict)
Settings.E_RANDOM_SAMPLE = prevRandomSamplingStatus
prediction = self.classifier.predict([img])
if prediction is None:
continue
yield (class_, prediction, img)
def __yield_class_predictions(self, segmentIndex):
"""
Calls the predict method for each class and yields the result as a tuple with the class and a list of predictions.
Keyword arguments:
segmentIndex -- index of the test data segment
Returns:
Generator((class_, predictions)) := (Class name, List of predictions)
"""
for class_ in self.classifier.testData.classes:
# load test images for this class and predict
predictions = [p for _, p, _ in self.__yield_image_predictions(segmentIndex, [class_])]
yield (class_, predictions)
def test_classifier(self, segments=["test"]):
"""
Completely evaluates a classifier and prints the results to the console window and saves the results to the model directory.
Keyword arguments:
segments -- List of segments to test onto
Returns:
dictionary of results of the segments.
"""
if Settings.G_DETAILED_CONSOLE_OUTPUT:
print "## Testing classifier:\n"
results = {}
for segment in segments:
print "# Testing",segment
# stores classes as key and the prediction results (list) as value.
segmentResults = {}
precisionRecallValues = {}
for class_, predictions in self.__yield_class_predictions(segment):
# number of matches for 1,2,...,numberOfClasses-1 candidates
topNMatches = [0] * (self.classifier.testData.numberOfClasses - 1)
images = 0.0
# load images and predict.
for prediction in predictions:
predictionRank = self.__test_top_n_prediction(class_, prediction)
#prevent problems with less than 6 classes
maxRank = min(self.classifier.testData.numberOfClasses - 1, len(predictionRank)-1)
for i in xrange(maxRank+1):
topNMatches[i] += predictionRank[i]
images += 1.0
# Calculate accuracy for class.
segmentResults[class_] = [matches / images for matches in topNMatches]
# calculate Precision recall
precisionValues = []
recallValues = []
f1Scores = []
for top in xrange(self.classifier.testData.numberOfClasses - 1):
# All correctly classified items
truePositives = float(topNMatches[top])
# all predicted images without the correctly predicted images. In case of top-1 the total ammount of images is exactly the number of returned predictions.
# For top-2 we have twice as much predictions to consider.
falsePositives = float((len(predictions) * (top+1))-truePositives)
# All items that were not correctly classified.
falseNegatives = float(len(predictions) - truePositives)
precision = truePositives / (truePositives + falsePositives)
recall = truePositives / (truePositives + falseNegatives)
#f1Score = 2.0 * ((precision * recall) / (precision + recall))
precisionValues.append(precision)
recallValues.append(recall)
#f1Scores.append(f1Score)
precisionRecallValues[class_] = (precisionValues, recallValues)
if Settings.G_DETAILED_CONSOLE_OUTPUT:
print "\t- Testing {0} - Accuracy: {1:.2f}% - T5 Precision: {2:.2f} - T5 Recall: {3:.2f}".format(class_, segmentResults[class_][0]*100, precisionValues[4], recallValues[4])
# Calculate overall top 1 accuracy.
segmentAccuracy = sum([a[0] for (_, a) in segmentResults.iteritems()]) / len(segmentResults)
segmentError = 1 - segmentAccuracy
# sort accuracies of classes so that we can get the best and worst classes
segmentResultsList = segmentResults.items()
# segmentResultsList contains the top-n accuracies but we only need the top-1 accuracy
segmentResultsList = [(class_, values[0]) for (class_, values) in segmentResultsList]
segmentResultsList = sorted(segmentResultsList, key=operator.itemgetter(1), reverse=True)
# prevent overflow
bestBound = min(2, len(segmentResultsList))
worstBound = max(2, len(segmentResultsList)-2)
bestClasses = segmentResultsList[0:bestBound]
worstClasses = segmentResultsList[worstBound:]
results[segment] = [segmentAccuracy, segmentError, bestClasses, worstClasses, segmentResults, precisionRecallValues]
# Save the results
self.save_results(results, False)
return results
def plot_random_predictions(self, segmentIndex="test", cols=4):
""" Creates an image with predictions of random images from the segment index and the model confidences."""
# result will have a format like this: [(real class, [(class, prediction for class), (class, prediction for class), ...], image)]
results = []
for class_, prediction, image in self.__yield_image_predictions(segmentIndex, maxNumberOfImages=1, shuffle=True, slidingWindow=True):
# convert image back to cv format if neccessary
if not self.transformationBack is None:
image = self.transformationBack(image)
# take the first 4 predictions and turn them to percent (looks better)
top4 = [(cls, p[0]*100.0) for cls, p in prediction[0:4]]
top4.reverse()
# convert the images from bgr to rgb if color
if len(image.shape) > 2 and image.shape[2] != 1:
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results.append((class_, top4, image))
# plot results
rows = int((ceil(float(len(results)) / cols)) * 2)
f, axarr = plt.subplots(rows, cols)
f.set_size_inches(int(cols*4),int((rows/2)*5))
f.suptitle(str(self.classifier), fontsize=20)
i = 0
for y in range(0, rows, 2):
for x in range(cols):
if i >= len(results):
# disable axis for empty images
axarr[y, x].axis('off')
axarr[y+1, x].axis('off')
continue
if self.classifier.grayscale:
axarr[y, x].imshow(results[i][2], cmap = cm.Greys_r)
else:
axarr[y, x].imshow(results[i][2])
axarr[y, x].set_title(results[i][0])
axarr[y, x].axis('off')
# add bars
barPlotValues = zip(*results[i][1]) # barPlotValues[0] = labels, barPlotValues[1] = prediction values
positions = np.arange(0, 0.8, 0.2)
bars = axarr[y+1, x].barh(positions, barPlotValues[1], align='center', height=0.2, color=Settings.G_COLOR_PALETTE[0], linewidth=0)
# color bar of correct result differently
if results[i][0] in barPlotValues[0]:
correctBarIndex = barPlotValues[0].index(results[i][0])
bars[correctBarIndex].set_color(Settings.G_COLOR_PALETTE[3])
for class_, yPos in zip(barPlotValues[0], positions):
axarr[y+1, x].text(95, yPos, class_, horizontalalignment='right', verticalalignment='center', fontsize=8)
axarr[y+1, x].axis('off')
axarr[y+1, x].set_xlim([0, 100])
i += 1
name = "RandomResults_" + self.classifier.name + "_" + utils.get_uuid()
utils.save_plt_figure(plt, name, self.classifier.modelSaver.get_save_path_for_visualizations())
raw_input("Random results plotting complete. Press any key to continue")
def __test_top_n_prediction(self, class_, predictions):
""" Computes the top-N predictions."""
topN = []
for n in range(1, len(predictions)):
# take n-size slice out of predictions and create list without the confidence.
# the result should look something like this for the top 3 ["bread", "chocolate", "rice"] if the list looks like this
# ["bread", "chocolate", "rice", "noodles", "ice", ...].
topNPredictions = [c for (c, _) in predictions[:n]]
if class_ in topNPredictions:
topN.append(1)
else:
topN.append(0)
return topN
def yield_top_n_results_as_list(self, results):
""" Returns a generator that yields the top-N results."""
for segment in results:
result = results[segment]
# Iterate through classPredictions and display the top-n categories
for class_ in result[4]:
classPredictions = result[4][class_]
topN = []
for accuracy in classPredictions:
topN.append(accuracy)
yield (segment, class_, topN)
def format_results_string(self, results):
""" Formats the results and creates a string that can be saved or printed to the console."""
output = ""
#overallAccuracy, classPredictions = results
output += "\n\n\nTest report for " + self.classifier.name + "\n"
detailedOutput = "\n\nDetailed report:"
outputRows = []
for segment in results:
result = results[segment]
outputRows.append([segment, result[1], result[0], result[2], result[3]])
#detailed output:
detailedOutput += "\n\n********************************************************\nSegment " + segment + "\n"
detailedOutputRows = []
# Iterate through classPredictions and display the top5 categories
for class_ in result[4]:
classPredictions = result[4][class_]
detailedRow = [class_]
for accuracy in classPredictions:
detailedRow.append(accuracy)
detailedOutputRows.append(detailedRow)
detailedOutputTitle = ["class"]
detailedOutputTitle.extend(self.__get_top_title())
detailedOutput += utils.get_table(detailedOutputTitle, 6, *detailedOutputRows).get_string()
output += utils.get_table(["segment", "segment_loss", "segment_accuracy", "top-2", "flop-2"], 6, *outputRows).get_string()
output += detailedOutput
return output
def __get_top_title(self):
""" Returns the Top-N title used for the csv output."""
return ["Top " + str(n+1) for n in range(self.classifier.testData.numberOfClasses-1)]
def export_results_csv(self, results, confMatrices):
""" Exports the results to a csv file."""
writer = self.classifier.modelSaver.get_csv_exporter()
# export test data stats
writer.export(self.classifier.testData.export_test_data_information(), "testDataStats")
# get mean / std images if pre computed
mean = self.classifier.testData.mean_image
if not mean is None:
# there is propably also a std image
std = self.classifier.testData.std_image
cv.imwrite(self.classifier.modelSaver.get_save_path_for_visualizations() + "testDataMeanImage.jpg", mean)
cv.imwrite(self.classifier.modelSaver.get_save_path_for_visualizations() + "testDataStdImage.jpg", std)
# export conf matrices and results
iterationOutput = []
iterationOutputTitle = ["iteration", "segment", "segment loss", "segment accuracy"]
iterationOutputTitle.extend([class_ + " t1 accuracy" for class_ in self.classifier.testData.classes])
iterationOutput.append(iterationOutputTitle)
for iteration in xrange(len(results)):
if iteration < len(confMatrices):
self.export_confusion_matrix_as_csv(confMatrices[iteration], fileName="ConfusionMatrix_iteration" + str(iteration+1))
try:
iterationResults = results[iteration]
except:
# could not extract iterationResults because in this case results does not contain a list of iterations because it had only one iteration.
# This shouldn't happen -> FIXME
return
for segment in iterationResults:
result = iterationResults[segment]
iterationOutputRow = [iteration+1, segment, result[1], result[0]]
for class_ in self.classifier.testData.classes:
iterationOutputRow.append(result[4][class_][0])
iterationOutput.append(iterationOutputRow)
# export precision recall
precisionRecallValues = result[5] # precisionRecallValues[class_] = (precisionValues, recallValues)
for class_ in precisionRecallValues:
precisionCSV = [["Top-N", "precision", "recall"]]
precisionValues, recallValues = precisionRecallValues[class_]
for i in xrange(len(precisionValues)):
precisionCSV.append([i+1, precisionValues[i], recallValues[i]])
writer.export(precisionCSV, "{0}_PrecisionRecall_{1}".format(segment, class_))
# export top-n results
segmentTopResults = []
segmentTopResultsTitle = ["segment", "class"]
segmentTopResultsTitle.extend(self.__get_top_title())
segmentTopResults.append(segmentTopResultsTitle)
for (sgmt, class_, topN) in self.yield_top_n_results_as_list(iterationResults):
segmentTopResultsRow = [sgmt, class_]
segmentTopResultsRow.extend(topN)
segmentTopResults.append(segmentTopResultsRow)
writer.export(segmentTopResults, name="iteration_" + str(iteration+1) + "_topN")
writer.export(iterationOutput, name="detailedResults")
def save_results(self, results, exportToCSV=True):
""" Exports the result string to a text file and saves the results to csv if exportToCSV is True."""
path = self.classifier.modelSaver.get_save_path()
resultString = self.format_results_string(results)
with open(path + "Results.txt", "w") as f:
f.write(resultString)
if exportToCSV:
self.export_results_csv(results, [])
def plot_confusion_matrix(self, save=True, show=True, confMatrix=None):
"""
Plots a confusion matrix and saves the image.
Keyword arguments:
save -- Save confusion matrix
show -- Show confusion matrix. Only works locally or via vcn.
confMatrix -- precomputed confusion matrix - Default: Compute new.
"""
if confMatrix is None:
confMatrix = self.compute_confusion_matrix()
# normalize matrix
normConfMatrix = []
for i in confMatrix:
a = sum(i, 0)
temp = []
for j in i:
temp.append(float(j)/float(a))
normConfMatrix.append(temp)
# can we plot labels? Only plot labels if we have less than 10 classes
showLables = len(confMatrix[0]) < 10
# we can not create the figure on the server since tkinter does not work because the server does not have a display output.
# in this case we save the confusion matrix which we can load on a machine with a display to create the plot from there.
try:
# create figure and clear it
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(normConfMatrix), cmap=plt.cm.jet, interpolation='nearest')
if showLables:
w = len(confMatrix)
h = len(confMatrix[0])
for x in xrange(w):
for y in xrange(h):
if normConfMatrix[x][y] > 0:
ax.annotate(str(confMatrix[x][y]), xy=(y, x), horizontalalignment='center', verticalalignment='center')
plt.xticks(range(w), self.classifier.testData.classes)
plt.yticks(range(h), self.classifier.testData.classes)
else:
plt.xticks([]),plt.yticks([])
cb = fig.colorbar(res)
if show:
plt.show()
if save:
utils.save_plt_figure(plt, "conf_matrix_{0}".format(self.classifier.name))
except Exception, e:
path = utils.get_temp_path() + "ConfMatrix.tmp"
logging.exception("Error while saving confusion matrix. Saving results in {0}.".format(path))
self.export_confusion_matrix_as_csv(confMatrix)
def calculate_confusion_score(self, confMatrix=None):
"""
Calculates the sum the the diagonal of the confusion matrix.
This is the number of correctly classified images.
"""
if confMatrix is None:
confMatrix = self.compute_confusion_matrix()
diagonalSum = np.trace(confMatrix)
return diagonalSum
def export_confusion_matrix_as_csv(self, confMatrix=None, fileName="ConfusionMatrix"):
"""
Exports the confusion matrix to csv.
Keyword arguments:
confMatrix -- precomputed confusion matrix
"""
if confMatrix is None:
confMatrix = self.compute_confusion_matrix()
writer = self.classifier.modelSaver.get_csv_exporter()
writer.export(confMatrix, fileName)
# export keys
convKeys = [range(self.classifier.testData.numberOfClasses)]
convKeys.append(self.classifier.testData.classes)
writer.export(convKeys, fileName + "_Keys")
def compute_confusion_matrix(self, export=True):
""" Computes the confusion matrix for the classifier using the test segmentindex. """
# construct the confusion matrix
confusionMatrix = np.zeros((self.classifier.testData.numberOfClasses, self.classifier.testData.numberOfClasses))
classes = self.classifier.testData.classes
classIndex = 0
for class_, predictions in self.__yield_class_predictions("test"):
for prediction in predictions:
predictedClass, _ = prediction[0]
confusionMatrix[classIndex][classes.index(predictedClass)] += 1
classIndex += 1
if export:
self.export_confusion_matrix_as_csv(confusionMatrix)
return confusionMatrix
def classify_image_folder(self, path):
""" Classifies images from a folder from a given path and prints the top-1 prediction on the console."""
if not path.endswith("/"):
path += "/"
if not utils.check_if_dir_exists(path):
raise Exception("Path '{0}' does not exist.".format(path))
from os import walk
# Load filenames
_, _, filenames = walk(path).next()
# Load images
#Load flag for cv.imread.
loadFlag = cv.IMREAD_GRAYSCALE if self.classifier.grayscale else cv.IMREAD_UNCHANGED
from data_io.testdata import load_image
for imgName in filenames:
imgPath = path + imgName
img = load_image(imgPath, loadFlag, 1)
if self.size != (-1, -1):
img = utils.crop_to_square(img)
desiredArea = self.size[0] * self.size[1]
img = utils.equalize_image_size(img, desiredArea)
if not self.transformation is None:
img = self.transformation(img)
prediction = self.classifier.predict([img])
print "Img {0}: {1}".format(imgName, prediction[0])# only top-1 prediction
def classify_webcam(self):
""" Classifies frames from the webcam."""
cam = cv.VideoCapture(0)
while True:
ret_val, img = cam.read()
cv.imshow('TUM FoodCam', img)
try:
prediction = self.classifier.predict([img])
print "{0}".format(prediction[0])# only top-1 prediction
except:
pass
if cv.waitKey(1) == 27:
break # esc to quit
cv.destroyAllWindows()
| python |
class CircularQueue:
""" Queue implementation using circularly linked list for storage """
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
class _Node:
""" LightWwight, non public class for storing a singly linked list node """
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
def __init__(self):
""" Create an empty Queue """
self._tail = None
self._size = 0
#-------------------------------------------------------------------------------------
def __len__(self):
""" Return the number of element inn the queue """
return self._size
#-------------------------------------------------------------------------------------
def is_empty(self):
""" Return Ture if the queue is empty """
return self._size == 0
#-------------------------------------------------------------------------------------
def first(self):
""" Return (but do not remove ) the element at the front og the queue
Raise Empty exception if the queueu is empty
"""
if self.is_empty():
raise Empty('Queue is Empty')
head = self._tail._next
return head._element
#-------------------------------------------------------------------------------------
def dequeue(self):
""" Remove and return first element of the queue
Raise Empty exception if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
oldhead = self._tail._next
if self._size == 1:
self._tail = None
else:
self._tail._next = oldhead._next
self._size -= 1
return oldhead._element
#-------------------------------------------------------------------------------------
def enqueue(self, e):
""" Add element to the back of queue """
newest = self._Node(e, None)
if self.is_empty():
newest._next = newest # initialy circular
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
#-------------------------------------------------------------------------------------
def rotate(self):
""" Rotate front element to the back of the queue """
if self._size > 0:
self._tail = self._tail._next
#-------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
cq = CircularQueue()
s1 = cq.__len__()
print('Initial Size : ', s1)
cq.enqueue(10)
cq.enqueue(20)
cq.enqueue(30)
cq.enqueue(40)
cq.enqueue(50)
cq.enqueue(60)
s2 = cq.__len__()
print('Size : ', s2)
f1 = cq.first()
print('First element : ', f1)
d1 = cq.dequeue()
print('Dequeued element : ', d1)
s3 = cq.__len__()
print('Size : ', s3)
cq.rotate()
f2 = cq.first()
print('First element : ', f2)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
"""
OUTPUT :
Initial Size : 0
Size : 6
First element : 10
Dequeued element : 10
Size : 5
First element : 30
"""
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
| python |
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "uh-v-hc=h7=%4(5g&f13217*!ja%osm%l0oyb$^n2kk^ij#&zj"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"django.contrib.staticfiles",
"django_blockstack_auth",
]
SITE_ID = 1
STATIC_URL = '/static/'
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
'APP_DIRS': True
}
]
| python |
import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
CLICKHOUSE_DATABASE = 'test'
def initialize_database(nodes, shard):
for node in nodes:
node.query('''
CREATE DATABASE {database};
CREATE TABLE `{database}`.src (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
'''.format(shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
initialize_database([node1, node2], 1)
yield cluster
except Exception as ex:
print ex
finally:
cluster.shutdown()
def test_consistent_part_after_move_partition(start_cluster):
# insert into all replicas
for i in range(100):
node1.query('INSERT INTO `{database}`.src VALUES ({value} % 2, {value})'.format(database=CLICKHOUSE_DATABASE,
value=i))
query_source = 'SELECT COUNT(*) FROM `{database}`.src'.format(database=CLICKHOUSE_DATABASE)
query_dest = 'SELECT COUNT(*) FROM `{database}`.dest'.format(database=CLICKHOUSE_DATABASE)
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
node1.query('ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest'.format(database=CLICKHOUSE_DATABASE))
assert_eq_with_retry(node2, query_source, node1.query(query_source))
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.